| /kernel/linux/linux-5.10/lib/ |
| D | list-test.c | 348 struct list_head entries[3], *cur; in list_test_list_cut_position() local 353 list_add_tail(&entries[0], &list1); in list_test_list_cut_position() 354 list_add_tail(&entries[1], &list1); in list_test_list_cut_position() 355 list_add_tail(&entries[2], &list1); in list_test_list_cut_position() 357 /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */ in list_test_list_cut_position() 358 list_cut_position(&list2, &list1, &entries[1]); in list_test_list_cut_position() 359 /* after: [list2] -> entries[0] -> entries[1], [list1] -> entries[2] */ in list_test_list_cut_position() 362 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position() 369 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position() 376 struct list_head entries[3], *cur; in list_test_list_cut_before() local [all …]
|
| D | stackdepot.c | 65 unsigned long entries[1]; /* Variable-sized array of entries. */ member 104 static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, in depot_alloc_stack() argument 107 int required_size = offsetof(struct stack_record, entries) + in depot_alloc_stack() 139 memcpy(stack->entries, entries, size * sizeof(unsigned long)); in depot_alloc_stack() 155 static inline u32 hash_stack(unsigned long *entries, unsigned int size) in hash_stack() argument 157 return jhash2((u32 *)entries, in hash_stack() 177 /* Find a stack that is equal to the one stored in entries in the hash */ 179 unsigned long *entries, int size, in find_stack() argument 187 !stackdepot_memcmp(entries, found->entries, size)) in find_stack() 194 * stack_depot_fetch - Fetch stack entries from a depot [all …]
|
| D | test_rhashtable.c | 31 MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)"); 73 unsigned int entries; member 138 unsigned int entries) in test_rht_lookup() argument 142 for (i = 0; i < entries; i++) { in test_rht_lookup() 175 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) in test_bucket_stats() argument 201 pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n", in test_bucket_stats() 202 total, atomic_read(&ht->nelems), entries, chain_len); in test_bucket_stats() 204 if (total != atomic_read(&ht->nelems) || total != entries) in test_bucket_stats() 209 unsigned int entries) in test_rhashtable() argument 218 * Insert entries into table with all keys even numbers in test_rhashtable() [all …]
|
| /kernel/linux/linux-4.19/tools/perf/tests/ |
| D | fdarray.c | 14 fda->entries[fd].fd = fda->nr - fd; in fdarray__init_revents() 15 fda->entries[fd].revents = revents; in fdarray__init_revents() 57 fda->entries[2].revents = POLLIN; in test__fdarray__filter() 58 expected_fd[0] = fda->entries[2].fd; in test__fdarray__filter() 60 pr_debug("\nfiltering all but fda->entries[2]:"); in test__fdarray__filter() 69 if (fda->entries[0].fd != expected_fd[0]) { in test__fdarray__filter() 70 pr_debug("\nfda->entries[0].fd=%d != %d\n", in test__fdarray__filter() 71 fda->entries[0].fd, expected_fd[0]); in test__fdarray__filter() 76 fda->entries[0].revents = POLLIN; in test__fdarray__filter() 77 expected_fd[0] = fda->entries[0].fd; in test__fdarray__filter() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
| D | vega10_pptable.h | 131 UCHAR ucNumEntries; /* Number of entries. */ 132 ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */ 165 UCHAR ucNumEntries; /* Number of entries. */ 166 ATOM_Vega10_GFXCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 171 UCHAR ucNumEntries; /* Number of entries. */ 172 ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 177 UCHAR ucNumEntries; /* Number of entries. */ 178 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 183 UCHAR ucNumEntries; /* Number of entries. */ 184 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member [all …]
|
| D | smu_helper.c | 224 vvalue = vol_table->entries[i].value; in phm_trim_voltage_table() 228 if (vvalue == table->entries[j].value) { in phm_trim_voltage_table() 235 table->entries[table->count].value = vvalue; in phm_trim_voltage_table() 236 table->entries[table->count].smio_low = in phm_trim_voltage_table() 237 vol_table->entries[i].smio_low; in phm_trim_voltage_table() 265 vol_table->entries[i].value = dep_table->entries[i].mvdd; in phm_get_svi2_mvdd_voltage_table() 266 vol_table->entries[i].smio_low = 0; in phm_get_svi2_mvdd_voltage_table() 293 vol_table->entries[i].value = dep_table->entries[i].vddci; in phm_get_svi2_vddci_voltage_table() 294 vol_table->entries[i].smio_low = 0; in phm_get_svi2_vddci_voltage_table() 321 vol_table->entries[i].value = lookup_table->entries[i].us_vdd; in phm_get_svi2_vdd_voltage_table() [all …]
|
| /kernel/linux/linux-4.19/drivers/gpu/drm/amd/powerplay/hwmgr/ |
| D | vega10_pptable.h | 131 UCHAR ucNumEntries; /* Number of entries. */ 132 ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */ 165 UCHAR ucNumEntries; /* Number of entries. */ 166 ATOM_Vega10_GFXCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 171 UCHAR ucNumEntries; /* Number of entries. */ 172 ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 177 UCHAR ucNumEntries; /* Number of entries. */ 178 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member 183 UCHAR ucNumEntries; /* Number of entries. */ 184 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member [all …]
|
| D | smu_helper.c | 176 vvalue = vol_table->entries[i].value; in phm_trim_voltage_table() 180 if (vvalue == table->entries[j].value) { in phm_trim_voltage_table() 187 table->entries[table->count].value = vvalue; in phm_trim_voltage_table() 188 table->entries[table->count].smio_low = in phm_trim_voltage_table() 189 vol_table->entries[i].smio_low; in phm_trim_voltage_table() 217 vol_table->entries[i].value = dep_table->entries[i].mvdd; in phm_get_svi2_mvdd_voltage_table() 218 vol_table->entries[i].smio_low = 0; in phm_get_svi2_mvdd_voltage_table() 245 vol_table->entries[i].value = dep_table->entries[i].vddci; in phm_get_svi2_vddci_voltage_table() 246 vol_table->entries[i].smio_low = 0; in phm_get_svi2_vddci_voltage_table() 273 vol_table->entries[i].value = lookup_table->entries[i].us_vdd; in phm_get_svi2_vdd_voltage_table() [all …]
|
| /kernel/linux/linux-4.19/arch/x86/kernel/cpu/ |
| D | intel.c | 281 * the TLB when any changes are made to any of the page table entries. in early_init_intel() 845 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, 875 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 876 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 877 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 878 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 881 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 882 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 883 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 884 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() [all …]
|
| /kernel/linux/linux-5.10/kernel/ |
| D | stacktrace.c | 18 * stack_trace_print - Print the entries in the stack trace 19 * @entries: Pointer to storage array 20 * @nr_entries: Number of entries in the storage array 23 void stack_trace_print(const unsigned long *entries, unsigned int nr_entries, in stack_trace_print() argument 28 if (WARN_ON(!entries)) in stack_trace_print() 32 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]); in stack_trace_print() 37 * stack_trace_snprint - Print the entries in the stack trace into a buffer 40 * @entries: Pointer to storage array 41 * @nr_entries: Number of entries in the storage array 46 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument [all …]
|
| /kernel/linux/linux-5.10/arch/x86/kernel/cpu/ |
| D | intel.c | 299 * the TLB when any changes are made to any of the page table entries. in early_init_intel() 793 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, 823 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 824 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 825 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 826 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 829 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 830 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 831 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 832 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() [all …]
|
| /kernel/linux/linux-5.10/tools/lib/api/fd/ |
| D | array.c | 15 fda->entries = NULL; in fdarray__init() 27 struct pollfd *entries = realloc(fda->entries, size); in fdarray__grow() local 29 if (entries == NULL) in fdarray__grow() 34 free(entries); in fdarray__grow() 38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr); in fdarray__grow() 42 fda->entries = entries; in fdarray__grow() 65 free(fda->entries); in fdarray__exit() 84 fda->entries[fda->nr].fd = fd; in fdarray__add() 85 fda->entries[fda->nr].events = revents; in fdarray__add() 101 if (!fda->entries[fd].events) in fdarray__filter() [all …]
|
| /kernel/linux/linux-4.19/tools/perf/util/ |
| D | mem2node.c | 47 struct phys_entry *entries, *tmp_entries; in mem2node__init() local 59 entries = zalloc(sizeof(*entries) * max); in mem2node__init() 60 if (!entries) in mem2node__init() 81 struct phys_entry *prev = &entries[j - 1]; in mem2node__init() 90 phys_entry__init(&entries[j++], start, bsize, n->node); in mem2node__init() 94 /* Cut unused entries, due to merging. */ in mem2node__init() 95 tmp_entries = realloc(entries, sizeof(*entries) * j); in mem2node__init() 97 entries = tmp_entries; in mem2node__init() 101 entries[i].node, entries[i].start, entries[i].end); in mem2node__init() 103 phys_entry__insert(&entries[i], &map->root); in mem2node__init() [all …]
|
| D | rb_resort.h | 6 * a new sort criteria, that must be present in the entries of the source 15 * fields to be present in each of the entries in the new, sorted, rb_tree. 18 * pre-calculating them from multiple entries in the original 'entry' from 19 * the rb_tree used as a source for the entries to be sorted: 72 struct rb_root entries; \ 79 struct rb_node **p = &sorted->entries.rb_node, *parent = NULL; \ 88 rb_insert_color(sorted_nd, &sorted->entries); \ 92 struct rb_root *entries) \ 96 for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \ 103 static struct __name##_sorted *__name##_sorted__new(struct rb_root *entries, \ [all …]
|
| /kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/ |
| D | iommu_api.c | 34 u64 entries; /* number of entries in hpas/hpages[] */ member 57 unsigned long entries, unsigned long dev_hpa, in mm_iommu_do_alloc() argument 66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc() 70 locked_entries = entries; in mm_iommu_do_alloc() 80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 88 * we use @ua and @entries natural alignment to allow IOMMU pages in mm_iommu_do_alloc() 91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc() 102 chunk = min(chunk, entries); in mm_iommu_do_alloc() 103 for (entry = 0; entry < entries; entry += chunk) { in mm_iommu_do_alloc() [all …]
|
| /kernel/linux/linux-5.10/kernel/events/ |
| D | callchain.c | 50 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local 53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu() 56 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu() 58 kfree(entries); in release_callchain_buffers_rcu() 63 struct callchain_cpus_entries *entries; in release_callchain_buffers() local 65 entries = callchain_cpus_entries; in release_callchain_buffers() 67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers() 74 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local 83 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers() 84 if (!entries) in alloc_callchain_buffers() [all …]
|
| /kernel/linux/linux-4.19/kernel/events/ |
| D | callchain.c | 51 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local 54 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu() 57 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu() 59 kfree(entries); in release_callchain_buffers_rcu() 64 struct callchain_cpus_entries *entries; in release_callchain_buffers() local 66 entries = callchain_cpus_entries; in release_callchain_buffers() 68 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers() 75 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local 84 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers() 85 if (!entries) in alloc_callchain_buffers() [all …]
|
| /kernel/linux/linux-5.10/tools/perf/util/ |
| D | mem2node.c | 50 struct phys_entry *entries, *tmp_entries; in mem2node__init() local 62 entries = zalloc(sizeof(*entries) * max); in mem2node__init() 63 if (!entries) in mem2node__init() 84 struct phys_entry *prev = &entries[j - 1]; in mem2node__init() 93 phys_entry__init(&entries[j++], start, bsize, n->node); in mem2node__init() 97 /* Cut unused entries, due to merging. */ in mem2node__init() 98 tmp_entries = realloc(entries, sizeof(*entries) * j); in mem2node__init() 100 entries = tmp_entries; in mem2node__init() 104 entries[i].node, entries[i].start, entries[i].end); in mem2node__init() 106 phys_entry__insert(&entries[i], &map->root); in mem2node__init() [all …]
|
| D | rb_resort.h | 6 * a new sort criteria, that must be present in the entries of the source 15 * fields to be present in each of the entries in the new, sorted, rb_tree. 18 * pre-calculating them from multiple entries in the original 'entry' from 19 * the rb_tree used as a source for the entries to be sorted: 72 struct rb_root entries; \ 79 struct rb_node **p = &sorted->entries.rb_node, *parent = NULL; \ 88 rb_insert_color(sorted_nd, &sorted->entries); \ 92 struct rb_root *entries) \ 96 for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \ 103 static struct __name##_sorted *__name##_sorted__new(struct rb_root *entries, \ [all …]
|
| /kernel/linux/linux-4.19/tools/lib/api/fd/ |
| D | array.c | 15 fda->entries = NULL; in fdarray__init() 27 struct pollfd *entries = realloc(fda->entries, size); in fdarray__grow() local 29 if (entries == NULL) in fdarray__grow() 34 free(entries); in fdarray__grow() 39 fda->entries = entries; in fdarray__grow() 62 free(fda->entries); in fdarray__exit() 81 fda->entries[fda->nr].fd = fd; in fdarray__add() 82 fda->entries[fda->nr].events = revents; in fdarray__add() 97 if (fda->entries[fd].revents & revents) { in fdarray__filter() 105 fda->entries[nr] = fda->entries[fd]; in fdarray__filter() [all …]
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | llist.h | 35 * The list entries deleted via llist_del_all can be traversed with 37 * entries can not be traversed safely before deleted from the list. 38 * The order of deleted entries is from the newest to the oldest added 101 * llist_for_each - iterate over some deleted entries of a lock-less list 103 * @node: the first entry of deleted list entries 105 * In general, some entries of the lock-less list can be traversed 109 * If being used on entries deleted from lock-less list directly, the 118 * llist_for_each_safe - iterate over some deleted entries of a lock-less list 122 * @node: the first entry of deleted list entries 124 * In general, some entries of the lock-less list can be traversed [all …]
|
| /kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/nfpcore/ |
| D | nfp_nsp_eth.c | 247 union eth_table_entry *entries; in __nfp_eth_read_ports() local 251 entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); in __nfp_eth_read_ports() 252 if (!entries) in __nfp_eth_read_ports() 255 ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); in __nfp_eth_read_ports() 262 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 270 nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n", in __nfp_eth_read_ports() 281 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 282 nfp_eth_port_translate(nsp, &entries[i], i, in __nfp_eth_read_ports() 289 kfree(entries); in __nfp_eth_read_ports() 294 kfree(entries); in __nfp_eth_read_ports() [all …]
|
| /kernel/linux/linux-4.19/arch/x86/kernel/ |
| D | e820.c | 27 * the first 128 E820 memory entries in boot_params.e820_table and the remaining 28 * (if any) entries of the SETUP_E820_EXT nodes. We use this to: 81 struct e820_entry *entry = &e820_table->entries[i]; in e820__mapped_any() 105 struct e820_entry *entry = &e820_table->entries[i]; in __e820__mapped_all() 157 if (x >= ARRAY_SIZE(table->entries)) { in __e820__range_add() 158 pr_err("too many entries; ignoring [mem %#010llx-%#010llx]\n", in __e820__range_add() 163 table->entries[x].addr = start; in __e820__range_add() 164 table->entries[x].size = size; in __e820__range_add() 165 table->entries[x].type = type; in __e820__range_add() 196 e820_table->entries[i].addr, in e820__print_table() [all …]
|
| /kernel/linux/linux-5.10/tools/perf/tests/ |
| D | fdarray.c | 14 fda->entries[fd].fd = fda->nr - fd; in fdarray__init_revents() 15 fda->entries[fd].events = revents; in fdarray__init_revents() 16 fda->entries[fd].revents = revents; in fdarray__init_revents() 58 fda->entries[2].revents = POLLIN; in test__fdarray__filter() 60 pr_debug("\nfiltering all but fda->entries[2]:"); in test__fdarray__filter() 70 fda->entries[0].revents = POLLIN; in test__fdarray__filter() 71 fda->entries[3].revents = POLLIN; in test__fdarray__filter() 73 pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):"); in test__fdarray__filter() 103 if (fda->entries[_idx].fd != _fd) { \ in test__fdarray__add() 104 pr_debug("\n%d: fda->entries[%d](%d) != %d!", \ in test__fdarray__add() [all …]
|
| /kernel/linux/linux-4.19/lib/ |
| D | test_rhashtable.c | 34 MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)"); 76 unsigned int entries; member 141 unsigned int entries) in test_rht_lookup() argument 145 for (i = 0; i < entries; i++) { in test_rht_lookup() 178 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) in test_bucket_stats() argument 209 pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n", in test_bucket_stats() 210 total, atomic_read(&ht->nelems), entries, chain_len); in test_bucket_stats() 212 if (total != atomic_read(&ht->nelems) || total != entries) in test_bucket_stats() 217 unsigned int entries) in test_rhashtable() argument 226 * Insert entries into table with all keys even numbers in test_rhashtable() [all …]
|