Home
last modified time | relevance | path

Searched full:entries (Results 1 – 25 of 7124) sorted by relevance

12345678910>>...285

/kernel/linux/linux-6.6/drivers/net/ethernet/engleder/
Dtsnep_selftests.c357 qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); in tsnep_test_taprio()
361 qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; in tsnep_test_taprio()
367 qopt->entries[0].gate_mask = 0x02; in tsnep_test_taprio()
368 qopt->entries[0].interval = 200000; in tsnep_test_taprio()
369 qopt->entries[1].gate_mask = 0x03; in tsnep_test_taprio()
370 qopt->entries[1].interval = 800000; in tsnep_test_taprio()
371 qopt->entries[2].gate_mask = 0x07; in tsnep_test_taprio()
372 qopt->entries[2].interval = 240000; in tsnep_test_taprio()
373 qopt->entries[3].gate_mask = 0x01; in tsnep_test_taprio()
374 qopt->entries[3].interval = 80000; in tsnep_test_taprio()
[all …]
/kernel/linux/linux-6.6/tools/testing/selftests/netfilter/
Dnft_audit.sh28 lsplit='s/^\(.*\) entries=\([^ ]*\) \(.*\)$/pfx="\1"\nval="\2"\nsfx="\3"/'
38 echo "$tpfx entries=$sum $tsfx"
44 echo "$tpfx entries=$sum $tsfx"
65 "table=$table family=2 entries=1 op=nft_register_table"
68 "table=$table family=2 entries=1 op=nft_register_chain"
71 "table=$table family=2 entries=2 op=nft_register_chain"
76 "table=$table family=2 entries=1 op=nft_register_rule"
79 "table=$table family=2 entries=2 op=nft_register_rule"
90 "table=$table family=2 entries=6 op=nft_register_rule"
97 'table=t2 family=2 entries=500 op=nft_register_rule'
[all …]
/kernel/linux/linux-6.6/lib/
Dhashtable_test.c90 /* Both entries should have been visited exactly once. */ in hashtable_test_hash_add()
125 struct hashtable_test_entry entries[3]; in hashtable_test_hash_for_each() local
130 /* Add three entries to the hashtable. */ in hashtable_test_hash_for_each()
132 entries[i].key = i; in hashtable_test_hash_for_each()
133 entries[i].data = i + 10; in hashtable_test_hash_for_each()
134 entries[i].visited = 0; in hashtable_test_hash_for_each()
135 hash_add(hash, &entries[i].node, entries[i].key); in hashtable_test_hash_for_each()
149 KUNIT_EXPECT_EQ(test, entries[j].visited, 1); in hashtable_test_hash_for_each()
154 struct hashtable_test_entry entries[3]; in hashtable_test_hash_for_each_safe() local
160 /* Add three entries to the hashtable. */ in hashtable_test_hash_for_each_safe()
[all …]
Dstackdepot.c63 unsigned long entries[]; /* Variable-sized array of frames */ member
126 unsigned long entries = 0; in stack_depot_early_init() local
145 * If stack_bucket_number_order is not set, leave entries as 0 to rely in stack_depot_early_init()
149 entries = 1UL << stack_bucket_number_order; in stack_depot_early_init()
153 entries, in stack_depot_early_init()
173 unsigned long entries; in stack_depot_init() local
186 entries = 1UL << stack_bucket_number_order; in stack_depot_init()
190 entries = nr_free_buffer_pages(); in stack_depot_init()
191 entries = roundup_pow_of_two(entries); in stack_depot_init()
194 entries >>= (scale - PAGE_SHIFT); in stack_depot_init()
[all …]
Dlist-test.c387 struct list_head entries[3], *cur; in list_test_list_cut_position() local
392 list_add_tail(&entries[0], &list1); in list_test_list_cut_position()
393 list_add_tail(&entries[1], &list1); in list_test_list_cut_position()
394 list_add_tail(&entries[2], &list1); in list_test_list_cut_position()
396 /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */ in list_test_list_cut_position()
397 list_cut_position(&list2, &list1, &entries[1]); in list_test_list_cut_position()
398 /* after: [list2] -> entries[0] -> entries[1], [list1] -> entries[2] */ in list_test_list_cut_position()
401 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position()
408 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position()
415 struct list_head entries[3], *cur; in list_test_list_cut_before() local
[all …]
/kernel/linux/linux-5.10/lib/
Dlist-test.c348 struct list_head entries[3], *cur; in list_test_list_cut_position() local
353 list_add_tail(&entries[0], &list1); in list_test_list_cut_position()
354 list_add_tail(&entries[1], &list1); in list_test_list_cut_position()
355 list_add_tail(&entries[2], &list1); in list_test_list_cut_position()
357 /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */ in list_test_list_cut_position()
358 list_cut_position(&list2, &list1, &entries[1]); in list_test_list_cut_position()
359 /* after: [list2] -> entries[0] -> entries[1], [list1] -> entries[2] */ in list_test_list_cut_position()
362 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position()
369 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position()
376 struct list_head entries[3], *cur; in list_test_list_cut_before() local
[all …]
Dstackdepot.c65 unsigned long entries[1]; /* Variable-sized array of entries. */ member
104 static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, in depot_alloc_stack() argument
107 int required_size = offsetof(struct stack_record, entries) + in depot_alloc_stack()
139 memcpy(stack->entries, entries, size * sizeof(unsigned long)); in depot_alloc_stack()
155 static inline u32 hash_stack(unsigned long *entries, unsigned int size) in hash_stack() argument
157 return jhash2((u32 *)entries, in hash_stack()
177 /* Find a stack that is equal to the one stored in entries in the hash */
179 unsigned long *entries, int size, in find_stack() argument
187 !stackdepot_memcmp(entries, found->entries, size)) in find_stack()
194 * stack_depot_fetch - Fetch stack entries from a depot
[all …]
/kernel/linux/linux-6.6/kernel/
Dstacktrace.c19 * stack_trace_print - Print the entries in the stack trace
20 * @entries: Pointer to storage array
21 * @nr_entries: Number of entries in the storage array
24 void stack_trace_print(const unsigned long *entries, unsigned int nr_entries, in stack_trace_print() argument
29 if (WARN_ON(!entries)) in stack_trace_print()
33 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]); in stack_trace_print()
38 * stack_trace_snprint - Print the entries in the stack trace into a buffer
41 * @entries: Pointer to storage array
42 * @nr_entries: Number of entries in the storage array
47 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/pm/powerplay/hwmgr/
Dvega10_pptable.h131 UCHAR ucNumEntries; /* Number of entries. */
132 ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */
165 UCHAR ucNumEntries; /* Number of entries. */
166 ATOM_Vega10_GFXCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member
171 UCHAR ucNumEntries; /* Number of entries. */
172 ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member
177 UCHAR ucNumEntries; /* Number of entries. */
178 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member
183 UCHAR ucNumEntries; /* Number of entries. */
184 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member
[all …]
Dsmu_helper.c224 vvalue = vol_table->entries[i].value; in phm_trim_voltage_table()
228 if (vvalue == table->entries[j].value) { in phm_trim_voltage_table()
235 table->entries[table->count].value = vvalue; in phm_trim_voltage_table()
236 table->entries[table->count].smio_low = in phm_trim_voltage_table()
237 vol_table->entries[i].smio_low; in phm_trim_voltage_table()
265 vol_table->entries[i].value = dep_table->entries[i].mvdd; in phm_get_svi2_mvdd_voltage_table()
266 vol_table->entries[i].smio_low = 0; in phm_get_svi2_mvdd_voltage_table()
293 vol_table->entries[i].value = dep_table->entries[i].vddci; in phm_get_svi2_vddci_voltage_table()
294 vol_table->entries[i].smio_low = 0; in phm_get_svi2_vddci_voltage_table()
321 vol_table->entries[i].value = lookup_table->entries[i].us_vdd; in phm_get_svi2_vdd_voltage_table()
[all …]
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/pm/powerplay/hwmgr/
Dvega10_pptable.h131 UCHAR ucNumEntries; /* Number of entries. */
132 ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */
165 UCHAR ucNumEntries; /* Number of entries. */
166 ATOM_Vega10_GFXCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ member
171 UCHAR ucNumEntries; /* Number of entries. */
172 ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member
177 UCHAR ucNumEntries; /* Number of entries. */
178 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member
183 UCHAR ucNumEntries; /* Number of entries. */
184 ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ member
[all …]
Dsmu_helper.c224 vvalue = vol_table->entries[i].value; in phm_trim_voltage_table()
228 if (vvalue == table->entries[j].value) { in phm_trim_voltage_table()
235 table->entries[table->count].value = vvalue; in phm_trim_voltage_table()
236 table->entries[table->count].smio_low = in phm_trim_voltage_table()
237 vol_table->entries[i].smio_low; in phm_trim_voltage_table()
265 vol_table->entries[i].value = dep_table->entries[i].mvdd; in phm_get_svi2_mvdd_voltage_table()
266 vol_table->entries[i].smio_low = 0; in phm_get_svi2_mvdd_voltage_table()
293 vol_table->entries[i].value = dep_table->entries[i].vddci; in phm_get_svi2_vddci_voltage_table()
294 vol_table->entries[i].smio_low = 0; in phm_get_svi2_vddci_voltage_table()
321 vol_table->entries[i].value = lookup_table->entries[i].us_vdd; in phm_get_svi2_vdd_voltage_table()
[all …]
/kernel/linux/linux-5.10/kernel/
Dstacktrace.c18 * stack_trace_print - Print the entries in the stack trace
19 * @entries: Pointer to storage array
20 * @nr_entries: Number of entries in the storage array
23 void stack_trace_print(const unsigned long *entries, unsigned int nr_entries, in stack_trace_print() argument
28 if (WARN_ON(!entries)) in stack_trace_print()
32 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]); in stack_trace_print()
37 * stack_trace_snprint - Print the entries in the stack trace into a buffer
40 * @entries: Pointer to storage array
41 * @nr_entries: Number of entries in the storage array
46 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument
[all …]
/kernel/linux/linux-6.6/arch/x86/kernel/cpu/
Dintel.c382 * the TLB when any changes are made to any of the page table entries. in early_init_intel()
775 * entries for their respective TLB types. The 0x63 descriptor is an
776 * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
804 " (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" },
810 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
840 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
841 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
842 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
843 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
846 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
[all …]
/kernel/linux/linux-5.10/arch/x86/kernel/cpu/
Dintel.c299 * the TLB when any changes are made to any of the page table entries. in early_init_intel()
793 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
823 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
824 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
825 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
826 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
829 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
830 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
831 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
832 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
[all …]
/kernel/linux/linux-5.10/tools/lib/api/fd/
Darray.c15 fda->entries = NULL; in fdarray__init()
27 struct pollfd *entries = realloc(fda->entries, size); in fdarray__grow() local
29 if (entries == NULL) in fdarray__grow()
34 free(entries); in fdarray__grow()
38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr); in fdarray__grow()
42 fda->entries = entries; in fdarray__grow()
65 free(fda->entries); in fdarray__exit()
84 fda->entries[fda->nr].fd = fd; in fdarray__add()
85 fda->entries[fda->nr].events = revents; in fdarray__add()
101 if (!fda->entries[fd].events) in fdarray__filter()
[all …]
/kernel/linux/linux-6.6/arch/powerpc/mm/book3s64/
Diommu_api.c34 u64 entries; /* number of entries in hpas/hpages[] */ member
57 unsigned long entries, unsigned long dev_hpa, in mm_iommu_do_alloc() argument
66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc()
70 locked_entries = entries; in mm_iommu_do_alloc()
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
88 * we use @ua and @entries natural alignment to allow IOMMU pages in mm_iommu_do_alloc()
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc()
102 chunk = min(chunk, entries); in mm_iommu_do_alloc()
103 for (entry = 0; entry < entries; entry += chunk) { in mm_iommu_do_alloc()
[all …]
/kernel/linux/linux-6.6/tools/lib/api/fd/
Darray.c15 fda->entries = NULL; in fdarray__init()
27 struct pollfd *entries = realloc(fda->entries, size); in fdarray__grow() local
29 if (entries == NULL) in fdarray__grow()
34 free(entries); in fdarray__grow()
38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr); in fdarray__grow()
42 fda->entries = entries; in fdarray__grow()
65 free(fda->entries); in fdarray__exit()
84 fda->entries[fda->nr].fd = fd; in fdarray__add()
85 fda->entries[fda->nr].events = revents; in fdarray__add()
99 entry = &from->entries[pos]; in fdarray__dup_entry_from()
[all …]
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/
Diommu_api.c34 u64 entries; /* number of entries in hpas/hpages[] */ member
57 unsigned long entries, unsigned long dev_hpa, in mm_iommu_do_alloc() argument
66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc()
70 locked_entries = entries; in mm_iommu_do_alloc()
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
88 * we use @ua and @entries natural alignment to allow IOMMU pages in mm_iommu_do_alloc()
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc()
102 chunk = min(chunk, entries); in mm_iommu_do_alloc()
103 for (entry = 0; entry < entries; entry += chunk) { in mm_iommu_do_alloc()
[all …]
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/
Ddcn315_clk_mgr.c258 .entries = {
306 .entries = {
343 .entries = {
390 /* skip empty entries, the smu array has no holes*/ in dcn315_build_watermark_ranges()
391 if (!bw_params->wm_table.entries[i].valid) in dcn315_build_watermark_ranges()
394 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; in dcn315_build_watermark_ranges()
395 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; in dcn315_build_watermark_ranges()
406 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; in dcn315_build_watermark_ranges()
409 bw_params->clk_table.entries[i].dcfclk_mhz; in dcn315_build_watermark_ranges()
489 …struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entri… in dcn315_clk_mgr_helper_populate_bw_params()
[all …]
/kernel/linux/linux-5.10/kernel/events/
Dcallchain.c50 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local
53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu()
56 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu()
58 kfree(entries); in release_callchain_buffers_rcu()
63 struct callchain_cpus_entries *entries; in release_callchain_buffers() local
65 entries = callchain_cpus_entries; in release_callchain_buffers()
67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers()
74 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local
83 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers()
84 if (!entries) in alloc_callchain_buffers()
[all …]
/kernel/linux/linux-6.6/kernel/events/
Dcallchain.c50 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local
53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu()
56 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu()
58 kfree(entries); in release_callchain_buffers_rcu()
63 struct callchain_cpus_entries *entries; in release_callchain_buffers() local
65 entries = callchain_cpus_entries; in release_callchain_buffers()
67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers()
74 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local
83 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers()
84 if (!entries) in alloc_callchain_buffers()
[all …]
/kernel/linux/linux-6.6/tools/perf/util/
Dmem2node.c50 struct phys_entry *entries, *tmp_entries; in mem2node__init() local
62 entries = zalloc(sizeof(*entries) * max); in mem2node__init()
63 if (!entries) in mem2node__init()
84 struct phys_entry *prev = &entries[j - 1]; in mem2node__init()
93 phys_entry__init(&entries[j++], start, bsize, n->node); in mem2node__init()
97 /* Cut unused entries, due to merging. */ in mem2node__init()
98 tmp_entries = realloc(entries, sizeof(*entries) * j); in mem2node__init()
101 entries = tmp_entries; in mem2node__init()
105 entries[i].node, entries[i].start, entries[i].end); in mem2node__init()
107 phys_entry__insert(&entries[i], &map->root); in mem2node__init()
[all …]
/kernel/linux/linux-5.10/tools/perf/util/
Dmem2node.c50 struct phys_entry *entries, *tmp_entries; in mem2node__init() local
62 entries = zalloc(sizeof(*entries) * max); in mem2node__init()
63 if (!entries) in mem2node__init()
84 struct phys_entry *prev = &entries[j - 1]; in mem2node__init()
93 phys_entry__init(&entries[j++], start, bsize, n->node); in mem2node__init()
97 /* Cut unused entries, due to merging. */ in mem2node__init()
98 tmp_entries = realloc(entries, sizeof(*entries) * j); in mem2node__init()
100 entries = tmp_entries; in mem2node__init()
104 entries[i].node, entries[i].start, entries[i].end); in mem2node__init()
106 phys_entry__insert(&entries[i], &map->root); in mem2node__init()
[all …]
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/
Ddcn314_clk_mgr.c359 .entries = {
396 .entries = {
443 /* skip empty entries, the smu array has no holes*/ in dcn314_build_watermark_ranges()
444 if (!bw_params->wm_table.entries[i].valid) in dcn314_build_watermark_ranges()
447 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; in dcn314_build_watermark_ranges()
448 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; in dcn314_build_watermark_ranges()
459 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; in dcn314_build_watermark_ranges()
462 bw_params->clk_table.entries[i].dcfclk_mhz; in dcn314_build_watermark_ranges()
572 …struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entri… in dcn314_clk_mgr_helper_populate_bw_params()
594 /* Invalid number of entries in the table from PMFW. */ in dcn314_clk_mgr_helper_populate_bw_params()
[all …]

12345678910>>...285