/kernel/linux/linux-5.10/lib/ |
D | list-test.c | 348 struct list_head entries[3], *cur; in list_test_list_cut_position() local 353 list_add_tail(&entries[0], &list1); in list_test_list_cut_position() 354 list_add_tail(&entries[1], &list1); in list_test_list_cut_position() 355 list_add_tail(&entries[2], &list1); in list_test_list_cut_position() 357 /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */ in list_test_list_cut_position() 358 list_cut_position(&list2, &list1, &entries[1]); in list_test_list_cut_position() 359 /* after: [list2] -> entries[0] -> entries[1], [list1] -> entries[2] */ in list_test_list_cut_position() 362 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position() 369 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position() 376 struct list_head entries[3], *cur; in list_test_list_cut_before() local [all …]
|
D | stackdepot.c | 65 unsigned long entries[1]; /* Variable-sized array of entries. */ member 104 static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, in depot_alloc_stack() argument 107 int required_size = offsetof(struct stack_record, entries) + in depot_alloc_stack() 139 memcpy(stack->entries, entries, size * sizeof(unsigned long)); in depot_alloc_stack() 155 static inline u32 hash_stack(unsigned long *entries, unsigned int size) in hash_stack() argument 157 return jhash2((u32 *)entries, in hash_stack() 177 /* Find a stack that is equal to the one stored in entries in the hash */ 179 unsigned long *entries, int size, in find_stack() argument 187 !stackdepot_memcmp(entries, found->entries, size)) in find_stack() 194 * stack_depot_fetch - Fetch stack entries from a depot [all …]
|
D | test_rhashtable.c | 31 MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)"); 73 unsigned int entries; member 138 unsigned int entries) in test_rht_lookup() argument 142 for (i = 0; i < entries; i++) { in test_rht_lookup() 175 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) in test_bucket_stats() argument 201 pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n", in test_bucket_stats() 202 total, atomic_read(&ht->nelems), entries, chain_len); in test_bucket_stats() 204 if (total != atomic_read(&ht->nelems) || total != entries) in test_bucket_stats() 209 unsigned int entries) in test_rhashtable() argument 218 * Insert entries into table with all keys even numbers in test_rhashtable() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
D | vega10_pptable.h | 131 UCHAR ucNumEntries; /* Number of entries. */ 132 ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */ 165 UCHAR ucNumEntries; /* Number of entries. */ 166 ATOM_Vega10_GFXCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 171 UCHAR ucNumEntries; /* Number of entries. */ 172 ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 177 UCHAR ucNumEntries; /* Number of entries. */ 178 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 183 UCHAR ucNumEntries; /* Number of entries. */ 184 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member [all …]
|
D | smu_helper.c | 224 vvalue = vol_table->entries[i].value; in phm_trim_voltage_table() 228 if (vvalue == table->entries[j].value) { in phm_trim_voltage_table() 235 table->entries[table->count].value = vvalue; in phm_trim_voltage_table() 236 table->entries[table->count].smio_low = in phm_trim_voltage_table() 237 vol_table->entries[i].smio_low; in phm_trim_voltage_table() 265 vol_table->entries[i].value = dep_table->entries[i].mvdd; in phm_get_svi2_mvdd_voltage_table() 266 vol_table->entries[i].smio_low = 0; in phm_get_svi2_mvdd_voltage_table() 293 vol_table->entries[i].value = dep_table->entries[i].vddci; in phm_get_svi2_vddci_voltage_table() 294 vol_table->entries[i].smio_low = 0; in phm_get_svi2_vddci_voltage_table() 321 vol_table->entries[i].value = lookup_table->entries[i].us_vdd; in phm_get_svi2_vdd_voltage_table() [all …]
|
D | pptable_v1_0.h | 166 UCHAR ucNumEntries; /* Number of entries. */ 167 ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */ member 181 UCHAR ucNumEntries; /* Number of entries. */ 182 ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 196 UCHAR ucNumEntries; /* Number of entries. */ 197 ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 212 UCHAR ucNumEntries; /* Number of entries. */ 213 ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 224 UCHAR ucNumEntries; /* Number of entries. */ 225 ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */ member [all …]
|
D | hwmgr_ppt.h | 50 uint32_t count; /* Number of entries. */ 51 …phm_ppt_v1_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. … member 73 uint32_t count; /* Number of entries. */ 74 …phm_ppt_v1_mm_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. … member 89 phm_ppt_v1_voltage_lookup_record entries[1]; /* Dynamically allocate count entries. */ member 105 uint32_t count; /* Number of entries. */ 106 …phm_ppt_v1_pcie_record entries[1]; /* Dynamically allocate count entries. … member
|
D | vega10_processpptables.c | 320 (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; in init_over_drive_limits() 365 mm_dependency_record = &mm_dependency_table->entries[i]; in get_mm_clock_voltage_table() 366 mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd; in get_mm_clock_voltage_table() 367 mm_table->entries[i].samclock = in get_mm_clock_voltage_table() 369 mm_table->entries[i].eclk = le32_to_cpu(mm_dependency_record->ulEClk); in get_mm_clock_voltage_table() 370 mm_table->entries[i].vclk = le32_to_cpu(mm_dependency_record->ulVClk); in get_mm_clock_voltage_table() 371 mm_table->entries[i].dclk = le32_to_cpu(mm_dependency_record->ulDClk); in get_mm_clock_voltage_table() 592 clk_table->entries[i].vddInd = in get_socclk_voltage_dependency_table() 593 clk_dep_table->entries[i].ucVddInd; in get_socclk_voltage_dependency_table() 594 clk_table->entries[i].clk = in get_socclk_voltage_dependency_table() [all …]
|
/kernel/linux/linux-5.10/kernel/ |
D | stacktrace.c | 18 * stack_trace_print - Print the entries in the stack trace 19 * @entries: Pointer to storage array 20 * @nr_entries: Number of entries in the storage array 23 void stack_trace_print(const unsigned long *entries, unsigned int nr_entries, in stack_trace_print() argument 28 if (WARN_ON(!entries)) in stack_trace_print() 32 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]); in stack_trace_print() 37 * stack_trace_snprint - Print the entries in the stack trace into a buffer 40 * @entries: Pointer to storage array 41 * @nr_entries: Number of entries in the storage array 46 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument [all …]
|
/kernel/linux/linux-5.10/arch/x86/kernel/cpu/ |
D | intel.c | 299 * the TLB when any changes are made to any of the page table entries. in early_init_intel() 793 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, 823 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 824 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 825 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 826 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 829 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 830 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 831 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 832 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() [all …]
|
/kernel/linux/linux-5.10/tools/lib/api/fd/ |
D | array.c | 15 fda->entries = NULL; in fdarray__init() 27 struct pollfd *entries = realloc(fda->entries, size); in fdarray__grow() local 29 if (entries == NULL) in fdarray__grow() 34 free(entries); in fdarray__grow() 38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr); in fdarray__grow() 42 fda->entries = entries; in fdarray__grow() 65 free(fda->entries); in fdarray__exit() 84 fda->entries[fda->nr].fd = fd; in fdarray__add() 85 fda->entries[fda->nr].events = revents; in fdarray__add() 101 if (!fda->entries[fd].events) in fdarray__filter() [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/ |
D | iommu_api.c | 34 u64 entries; /* number of entries in hpas/hpages[] */ member 57 unsigned long entries, unsigned long dev_hpa, in mm_iommu_do_alloc() argument 66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc() 70 locked_entries = entries; in mm_iommu_do_alloc() 80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 88 * we use @ua and @entries natural alignment to allow IOMMU pages in mm_iommu_do_alloc() 91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc() 102 chunk = min(chunk, entries); in mm_iommu_do_alloc() 103 for (entry = 0; entry < entries; entry += chunk) { in mm_iommu_do_alloc() [all …]
|
/kernel/linux/linux-5.10/kernel/events/ |
D | callchain.c | 50 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local 53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu() 56 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu() 58 kfree(entries); in release_callchain_buffers_rcu() 63 struct callchain_cpus_entries *entries; in release_callchain_buffers() local 65 entries = callchain_cpus_entries; in release_callchain_buffers() 67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers() 74 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local 83 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers() 84 if (!entries) in alloc_callchain_buffers() [all …]
|
/kernel/linux/linux-5.10/tools/perf/util/ |
D | mem2node.c | 50 struct phys_entry *entries, *tmp_entries; in mem2node__init() local 62 entries = zalloc(sizeof(*entries) * max); in mem2node__init() 63 if (!entries) in mem2node__init() 84 struct phys_entry *prev = &entries[j - 1]; in mem2node__init() 93 phys_entry__init(&entries[j++], start, bsize, n->node); in mem2node__init() 97 /* Cut unused entries, due to merging. */ in mem2node__init() 98 tmp_entries = realloc(entries, sizeof(*entries) * j); in mem2node__init() 100 entries = tmp_entries; in mem2node__init() 104 entries[i].node, entries[i].start, entries[i].end); in mem2node__init() 106 phys_entry__insert(&entries[i], &map->root); in mem2node__init() [all …]
|
D | rb_resort.h | 6 * a new sort criteria, that must be present in the entries of the source 15 * fields to be present in each of the entries in the new, sorted, rb_tree. 18 * pre-calculating them from multiple entries in the original 'entry' from 19 * the rb_tree used as a source for the entries to be sorted: 72 struct rb_root entries; \ 79 struct rb_node **p = &sorted->entries.rb_node, *parent = NULL; \ 88 rb_insert_color(sorted_nd, &sorted->entries); \ 92 struct rb_root *entries) \ 96 for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \ 103 static struct __name##_sorted *__name##_sorted__new(struct rb_root *entries, \ [all …]
|
/kernel/linux/linux-5.10/include/linux/ |
D | llist.h | 35 * The list entries deleted via llist_del_all can be traversed with 37 * entries can not be traversed safely before deleted from the list. 38 * The order of deleted entries is from the newest to the oldest added 101 * llist_for_each - iterate over some deleted entries of a lock-less list 103 * @node: the first entry of deleted list entries 105 * In general, some entries of the lock-less list can be traversed 109 * If being used on entries deleted from lock-less list directly, the 118 * llist_for_each_safe - iterate over some deleted entries of a lock-less list 122 * @node: the first entry of deleted list entries 124 * In general, some entries of the lock-less list can be traversed [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/nfpcore/ |
D | nfp_nsp_eth.c | 247 union eth_table_entry *entries; in __nfp_eth_read_ports() local 251 entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); in __nfp_eth_read_ports() 252 if (!entries) in __nfp_eth_read_ports() 255 ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); in __nfp_eth_read_ports() 262 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 270 nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n", in __nfp_eth_read_ports() 281 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 282 nfp_eth_port_translate(nsp, &entries[i], i, in __nfp_eth_read_ports() 289 kfree(entries); in __nfp_eth_read_ports() 294 kfree(entries); in __nfp_eth_read_ports() [all …]
|
/kernel/linux/linux-5.10/tools/perf/tests/ |
D | fdarray.c | 14 fda->entries[fd].fd = fda->nr - fd; in fdarray__init_revents() 15 fda->entries[fd].events = revents; in fdarray__init_revents() 16 fda->entries[fd].revents = revents; in fdarray__init_revents() 58 fda->entries[2].revents = POLLIN; in test__fdarray__filter() 60 pr_debug("\nfiltering all but fda->entries[2]:"); in test__fdarray__filter() 70 fda->entries[0].revents = POLLIN; in test__fdarray__filter() 71 fda->entries[3].revents = POLLIN; in test__fdarray__filter() 73 pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):"); in test__fdarray__filter() 103 if (fda->entries[_idx].fd != _fd) { \ in test__fdarray__add() 104 pr_debug("\n%d: fda->entries[%d](%d) != %d!", \ in test__fdarray__add() [all …]
|
/kernel/linux/linux-5.10/arch/x86/kernel/ |
D | e820.c | 28 * the first 128 E820 memory entries in boot_params.e820_table and the remaining 29 * (if any) entries of the SETUP_E820_EXT nodes. We use this to: 83 struct e820_entry *entry = &table->entries[i]; in _e820__mapped_any() 118 struct e820_entry *entry = &e820_table->entries[i]; in __e820__mapped_all() 170 if (x >= ARRAY_SIZE(table->entries)) { in __e820__range_add() 171 pr_err("too many entries; ignoring [mem %#010llx-%#010llx]\n", in __e820__range_add() 176 table->entries[x].addr = start; in __e820__range_add() 177 table->entries[x].size = size; in __e820__range_add() 178 table->entries[x].type = type; in __e820__range_add() 210 e820_table->entries[i].addr, in e820__print_table() [all …]
|
/kernel/linux/linux-5.10/Documentation/ABI/testing/ |
D | sysfs-firmware-efi-esrt | 13 Description: The number of entries in the ESRT 18 Description: The maximum number of entries that /could/ be registered 27 What: /sys/firmware/efi/esrt/entries/entry$N/ 31 subdirectory under entries/ . 32 example: /sys/firmware/efi/esrt/entries/entry0/ 34 What: /sys/firmware/efi/esrt/entries/entry$N/fw_type 46 What: /sys/firmware/efi/esrt/entries/entry$N/fw_class 51 What: /sys/firmware/efi/esrt/entries/entry$N/fw_version 57 What: /sys/firmware/efi/esrt/entries/entry$N/lowest_supported_fw_version 62 What: /sys/firmware/efi/esrt/entries/entry$N/capsule_flags [all …]
|
D | sysfs-firmware-dmi-entries | 1 What: /sys/firmware/dmi/entries/ 15 DMI is structured as a large table of entries, where 19 entries. 21 Some entries are required by the specification, but many 27 Multiple entries of the same type are allowed. In order 31 to say, if there are 'N' multiple entries with the same type 34 entries "T-0" through "T-(N-1)": 38 /sys/firmware/dmi/entries/17-0 39 /sys/firmware/dmi/entries/17-1 40 /sys/firmware/dmi/entries/17-2 [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/radeon/ |
D | r600_dpm.c | 828 radeon_table->entries = kzalloc(size, GFP_KERNEL); in r600_parse_clk_voltage_dep_table() 829 if (!radeon_table->entries) in r600_parse_clk_voltage_dep_table() 832 entry = &atom_table->entries[0]; in r600_parse_clk_voltage_dep_table() 834 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | in r600_parse_clk_voltage_dep_table() 836 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage); in r600_parse_clk_voltage_dep_table() 938 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); in r600_parse_extended_power_table() 949 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); in r600_parse_extended_power_table() 950 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); in r600_parse_extended_power_table() 961 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); in r600_parse_extended_power_table() 962 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); in r600_parse_extended_power_table() [all …]
|
/kernel/linux/linux-5.10/drivers/misc/vmw_vmci/ |
D | vmci_handle_array.c | 68 array->entries[array->size] = handle; in vmci_handle_arr_append_entry() 84 if (vmci_handle_is_equal(array->entries[i], entry_handle)) { in vmci_handle_arr_remove_entry() 85 handle = array->entries[i]; in vmci_handle_arr_remove_entry() 87 array->entries[i] = array->entries[array->size]; in vmci_handle_arr_remove_entry() 88 array->entries[array->size] = VMCI_INVALID_HANDLE; in vmci_handle_arr_remove_entry() 105 handle = array->entries[array->size]; in vmci_handle_arr_remove_tail() 106 array->entries[array->size] = VMCI_INVALID_HANDLE; in vmci_handle_arr_remove_tail() 121 return array->entries[index]; in vmci_handle_arr_get_entry() 130 if (vmci_handle_is_equal(array->entries[i], entry_handle)) in vmci_handle_arr_has_entry() 143 return array->entries; in vmci_handle_arr_get_handles()
|
/kernel/linux/linux-5.10/arch/x86/include/asm/e820/ |
D | types.h | 66 * which cannot be fit into so few entries - so we have a mechanism 70 * ( Those extra entries are enumerated via the EFI memory map, not 74 * entries, based on a heuristic calculation: up to three entries per 78 * E820 entries that might need room in the same arrays, prior to the 80 * of three memory map entries per node is "enough" entries for 82 * use of additional EFI map entries. Future platforms may want 83 * to allow more than three entries per node or otherwise refine 92 * The whole array of E820 entries: 96 struct e820_entry entries[E820_MAX_ENTRIES]; member
|
/kernel/linux/linux-5.10/Documentation/core-api/ |
D | xarray.rst | 44 are used to distinguish value entries from normal pointers, so you must 45 decide whether they want to store value entries or tagged pointers in 49 conflict with value entries or internal entries. 51 An unusual feature of the XArray is the ability to create entries which 55 entries can be explicitly split into smaller entries, or storing ``NULL`` 66 You can then set entries using xa_store() and get entries 84 You can copy entries out of the XArray into a plain array by calling 85 xa_extract(). Or you can iterate over the present entries in the XArray 107 If all entries in the array are ``NULL``, the xa_empty() function 110 Finally, you can remove all entries from an XArray by calling [all …]
|