/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_table.c | 6 int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, in mlx5dr_table_set_miss_action() argument 17 mlx5dr_domain_lock(tbl->dmn); in mlx5dr_table_set_miss_action() 19 if (!list_empty(&tbl->matcher_list)) in mlx5dr_table_set_miss_action() 20 last_matcher = list_last_entry(&tbl->matcher_list, in mlx5dr_table_set_miss_action() 24 if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX || in mlx5dr_table_set_miss_action() 25 tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { in mlx5dr_table_set_miss_action() 29 last_htbl = tbl->rx.s_anchor; in mlx5dr_table_set_miss_action() 31 tbl->rx.default_icm_addr = action ? in mlx5dr_table_set_miss_action() 32 action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : in mlx5dr_table_set_miss_action() 33 tbl->rx.nic_dmn->default_icm_addr; in mlx5dr_table_set_miss_action() [all …]
|
/kernel/linux/linux-5.10/fs/nfs/ |
D | nfs4session.c | 27 static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue) in nfs4_init_slot_table() argument 29 tbl->highest_used_slotid = NFS4_NO_SLOT; in nfs4_init_slot_table() 30 spin_lock_init(&tbl->slot_tbl_lock); in nfs4_init_slot_table() 31 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue); in nfs4_init_slot_table() 32 init_waitqueue_head(&tbl->slot_waitq); in nfs4_init_slot_table() 33 init_completion(&tbl->complete); in nfs4_init_slot_table() 39 static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize) in nfs4_shrink_slot_table() argument 42 if (newsize >= tbl->max_slots) in nfs4_shrink_slot_table() 45 p = &tbl->slots; in nfs4_shrink_slot_table() 53 tbl->max_slots--; in nfs4_shrink_slot_table() [all …]
|
D | nfs4session.h | 82 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, 84 extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl); 85 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); 86 extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid); 87 extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl, 90 extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); 91 extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); 92 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); 93 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, 95 void nfs41_wake_slot_table(struct nfs4_slot_table *tbl); [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/kernel/ |
D | iommu.c | 164 struct iommu_table *tbl, in iommu_range_alloc() argument 197 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc() 200 pool = &(tbl->large_pool); in iommu_range_alloc() 202 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 222 if (limit + tbl->it_offset > mask) { in iommu_range_alloc() 223 limit = mask - tbl->it_offset + 1; in iommu_range_alloc() 230 pool = &(tbl->pools[0]); in iommu_range_alloc() 238 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() 239 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift), in iommu_range_alloc() 248 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_doorbell.c | 56 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; in pvrdma_uar_table_init() local 61 tbl->last = 0; in pvrdma_uar_table_init() 62 tbl->top = 0; in pvrdma_uar_table_init() 63 tbl->max = num; in pvrdma_uar_table_init() 64 tbl->mask = mask; in pvrdma_uar_table_init() 65 spin_lock_init(&tbl->lock); in pvrdma_uar_table_init() 66 tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); in pvrdma_uar_table_init() 67 if (!tbl->table) in pvrdma_uar_table_init() 71 set_bit(0, tbl->table); in pvrdma_uar_table_init() 78 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; in pvrdma_uar_table_cleanup() local [all …]
|
/kernel/linux/linux-5.10/tools/perf/util/ |
D | syscalltbl.c | 59 static int syscalltbl__init_native(struct syscalltbl *tbl) in syscalltbl__init_native() argument 68 entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries); in syscalltbl__init_native() 69 if (tbl->syscalls.entries == NULL) in syscalltbl__init_native() 80 qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp); in syscalltbl__init_native() 81 tbl->syscalls.nr_entries = nr_entries; in syscalltbl__init_native() 82 tbl->syscalls.max_id = syscalltbl_native_max_id; in syscalltbl__init_native() 88 struct syscalltbl *tbl = malloc(sizeof(*tbl)); in syscalltbl__new() local 89 if (tbl) { in syscalltbl__new() 90 if (syscalltbl__init_native(tbl)) { in syscalltbl__new() 91 free(tbl); in syscalltbl__new() [all …]
|
/kernel/linux/linux-5.10/include/linux/ |
D | rhashtable.h | 119 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, in rht_bucket_index() argument 122 return hash & (tbl->size - 1); in rht_bucket_index() 156 struct rhashtable *ht, const struct bucket_table *tbl, in rht_key_hashfn() argument 159 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); in rht_key_hashfn() 161 return rht_bucket_index(tbl, hash); in rht_key_hashfn() 165 struct rhashtable *ht, const struct bucket_table *tbl, in rht_head_hashfn() argument 171 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: in rht_head_hashfn() 173 tbl->hash_rnd)) : in rht_head_hashfn() 174 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); in rht_head_hashfn() 183 const struct bucket_table *tbl) in rht_grow_above_75() argument [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/platforms/powernv/ |
D | pci-ioda-tce.c | 48 void pnv_pci_setup_iommu_table(struct iommu_table *tbl, in pnv_pci_setup_iommu_table() argument 52 tbl->it_blocksize = 16; in pnv_pci_setup_iommu_table() 53 tbl->it_base = (unsigned long)tce_mem; in pnv_pci_setup_iommu_table() 54 tbl->it_page_shift = page_shift; in pnv_pci_setup_iommu_table() 55 tbl->it_offset = dma_offset >> tbl->it_page_shift; in pnv_pci_setup_iommu_table() 56 tbl->it_index = 0; in pnv_pci_setup_iommu_table() 57 tbl->it_size = tce_size >> 3; in pnv_pci_setup_iommu_table() 58 tbl->it_busno = 0; in pnv_pci_setup_iommu_table() 59 tbl->it_type = TCE_PCI; in pnv_pci_setup_iommu_table() 83 static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) in pnv_tce() argument [all …]
|
/kernel/linux/linux-5.10/lib/ |
D | rhashtable.c | 38 const struct bucket_table *tbl, in head_hashfn() argument 41 return rht_head_hashfn(ht, tbl, he, ht->p); in head_hashfn() 53 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) in lockdep_rht_bucket_is_held() argument 57 if (unlikely(tbl->nest)) in lockdep_rht_bucket_is_held() 59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); in lockdep_rht_bucket_is_held() 67 const struct bucket_table *tbl) in nested_table_top() argument 72 return (void *)rcu_dereference_protected(tbl->buckets[0], 1); in nested_table_top() 94 static void nested_bucket_table_free(const struct bucket_table *tbl) in nested_bucket_table_free() argument 96 unsigned int size = tbl->size >> tbl->nest; in nested_bucket_table_free() 97 unsigned int len = 1 << tbl->nest; in nested_bucket_table_free() [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/ |
D | iommu.h | 40 int (*set)(struct iommu_table *tbl, 51 int (*xchg_no_kill)(struct iommu_table *tbl, 57 void (*tce_kill)(struct iommu_table *tbl, 62 __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); 64 void (*clear)(struct iommu_table *tbl, 67 unsigned long (*get)(struct iommu_table *tbl, long index); 68 void (*flush)(struct iommu_table *tbl); 69 void (*free)(struct iommu_table *tbl); 119 #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ argument 120 ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) [all …]
|
/kernel/linux/linux-5.10/net/netfilter/ipvs/ |
D | ip_vs_lblc.c | 168 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) in ip_vs_lblc_hash() argument 172 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash() 173 atomic_inc(&tbl->entries); in ip_vs_lblc_hash() 179 ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, in ip_vs_lblc_get() argument 185 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblc_get() 198 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, in ip_vs_lblc_new() argument 203 en = ip_vs_lblc_get(af, tbl, daddr); in ip_vs_lblc_new() 220 ip_vs_lblc_hash(tbl, en); in ip_vs_lblc_new() 231 struct ip_vs_lblc_table *tbl = svc->sched_data; in ip_vs_lblc_flush() local 237 tbl->dead = true; in ip_vs_lblc_flush() [all …]
|
D | ip_vs_lblcr.c | 331 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) in ip_vs_lblcr_hash() argument 335 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash() 336 atomic_inc(&tbl->entries); in ip_vs_lblcr_hash() 342 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, in ip_vs_lblcr_get() argument 348 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get() 361 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, in ip_vs_lblcr_new() argument 366 en = ip_vs_lblcr_get(af, tbl, daddr); in ip_vs_lblcr_new() 382 ip_vs_lblcr_hash(tbl, en); in ip_vs_lblcr_new() 397 struct ip_vs_lblcr_table *tbl = svc->sched_data; in ip_vs_lblcr_flush() local 403 tbl->dead = true; in ip_vs_lblcr_flush() [all …]
|
/kernel/linux/linux-5.10/drivers/net/wireless/intel/iwlwifi/dvm/ |
D | rs.c | 436 static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) in get_expected_tpt() argument 438 if (tbl->expected_tpt) in get_expected_tpt() 439 return tbl->expected_tpt[rs_index]; in get_expected_tpt() 450 static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, in rs_collect_tx_data() argument 461 window = &(tbl->win[scale_index]); in rs_collect_tx_data() 464 tpt = get_expected_tpt(tbl, scale_index); in rs_collect_tx_data() 529 struct iwl_scale_tbl_info *tbl, in rate_n_flags_from_tbl() argument 534 if (is_legacy(tbl->lq_type)) { in rate_n_flags_from_tbl() 539 } else if (is_Ht(tbl->lq_type)) { in rate_n_flags_from_tbl() 546 if (is_siso(tbl->lq_type)) in rate_n_flags_from_tbl() [all …]
|
/kernel/linux/linux-5.10/net/core/ |
D | neighbour.c | 58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 124 atomic_dec(&n->tbl->gc_entries); in neigh_mark_dead() 132 write_lock_bh(&n->tbl->lock); in neigh_update_gc_list() 147 atomic_dec(&n->tbl->gc_entries); in neigh_update_gc_list() 150 list_add_tail(&n->gc_list, &n->tbl->gc_list); in neigh_update_gc_list() 151 atomic_inc(&n->tbl->gc_entries); in neigh_update_gc_list() 156 write_unlock_bh(&n->tbl->lock); in neigh_update_gc_list() 182 struct neigh_table *tbl) in neigh_del() argument 191 lockdep_is_held(&tbl->lock)); in neigh_del() 202 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) in neigh_remove_one() argument [all …]
|
/kernel/linux/linux-5.10/drivers/net/wireless/marvell/mwifiex/ |
D | 11n_rxreorder.c | 110 struct mwifiex_rx_reorder_tbl *tbl, in mwifiex_11n_dispatch_pkt_until_start_win() argument 120 pkt_to_send = (start_win > tbl->start_win) ? in mwifiex_11n_dispatch_pkt_until_start_win() 121 min((start_win - tbl->start_win), tbl->win_size) : in mwifiex_11n_dispatch_pkt_until_start_win() 122 tbl->win_size; in mwifiex_11n_dispatch_pkt_until_start_win() 125 if (tbl->rx_reorder_ptr[i]) { in mwifiex_11n_dispatch_pkt_until_start_win() 126 skb = tbl->rx_reorder_ptr[i]; in mwifiex_11n_dispatch_pkt_until_start_win() 128 tbl->rx_reorder_ptr[i] = NULL; in mwifiex_11n_dispatch_pkt_until_start_win() 136 for (i = 0; i < tbl->win_size - pkt_to_send; ++i) { in mwifiex_11n_dispatch_pkt_until_start_win() 137 tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i]; in mwifiex_11n_dispatch_pkt_until_start_win() 138 tbl->rx_reorder_ptr[pkt_to_send + i] = NULL; in mwifiex_11n_dispatch_pkt_until_start_win() [all …]
|
/kernel/linux/linux-5.10/drivers/net/wireless/intel/iwlegacy/ |
D | 4965-rs.c | 389 il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx) in il4965_get_expected_tpt() argument 391 if (tbl->expected_tpt) in il4965_get_expected_tpt() 392 return tbl->expected_tpt[rs_idx]; in il4965_get_expected_tpt() 404 il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx, in il4965_rs_collect_tx_data() argument 415 win = &(tbl->win[scale_idx]); in il4965_rs_collect_tx_data() 418 tpt = il4965_get_expected_tpt(tbl, scale_idx); in il4965_rs_collect_tx_data() 482 il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl, in il4965_rate_n_flags_from_tbl() argument 487 if (is_legacy(tbl->lq_type)) { in il4965_rate_n_flags_from_tbl() 492 } else if (is_Ht(tbl->lq_type)) { in il4965_rate_n_flags_from_tbl() 499 if (is_siso(tbl->lq_type)) in il4965_rate_n_flags_from_tbl() [all …]
|
/kernel/linux/linux-5.10/net/mac80211/ |
D | mesh_pathtbl.c | 18 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); 45 struct mesh_table *tbl = tblptr; in mesh_path_rht_free() local 47 mesh_path_free_rcu(tbl, mpath); in mesh_path_rht_free() 71 static void mesh_table_free(struct mesh_table *tbl) in mesh_table_free() argument 73 rhashtable_free_and_destroy(&tbl->rhead, in mesh_table_free() 74 mesh_path_rht_free, tbl); in mesh_table_free() 75 kfree(tbl); in mesh_table_free() 214 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, in mpath_lookup() argument 219 mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params); in mpath_lookup() 251 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) in __mesh_path_lookup_by_idx() argument [all …]
|
/kernel/linux/linux-5.10/drivers/firmware/efi/ |
D | memattr.c | 24 efi_memory_attributes_table_t *tbl; in efi_memattr_init() local 29 tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl)); in efi_memattr_init() 30 if (!tbl) { in efi_memattr_init() 36 if (tbl->version > 1) { in efi_memattr_init() 38 tbl->version); in efi_memattr_init() 42 tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size; in efi_memattr_init() 47 early_memunmap(tbl, sizeof(*tbl)); in efi_memattr_init() 131 efi_memory_attributes_table_t *tbl; in efi_memattr_apply_permissions() local 134 if (tbl_size <= sizeof(*tbl)) in efi_memattr_apply_permissions() 146 tbl = memremap(efi_mem_attr_table, tbl_size, MEMREMAP_WB); in efi_memattr_apply_permissions() [all …]
|
/kernel/linux/linux-5.10/drivers/vfio/ |
D | vfio_iommu_spapr_tce.c | 207 struct iommu_table *tbl = container->tables[i]; in tce_iommu_find_table() local 209 if (tbl) { in tce_iommu_find_table() 210 unsigned long entry = ioba >> tbl->it_page_shift; in tce_iommu_find_table() 211 unsigned long start = tbl->it_offset; in tce_iommu_find_table() 212 unsigned long end = start + tbl->it_size; in tce_iommu_find_table() 215 *ptbl = tbl; in tce_iommu_find_table() 337 struct iommu_table *tbl, 340 struct iommu_table *tbl); 360 struct iommu_table *tbl = container->tables[i]; in tce_iommu_release() local 362 if (!tbl) in tce_iommu_release() [all …]
|
/kernel/linux/linux-5.10/net/netfilter/ |
D | xt_repldata.h | 24 } *tbl; \ 26 size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \ 28 tbl = kzalloc(term_offset + sizeof(*term), GFP_KERNEL); \ 29 if (tbl == NULL) \ 31 term = (struct type##_error *)&(((char *)tbl)[term_offset]); \ 32 strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \ 34 tbl->repl.valid_hooks = hook_mask; \ 35 tbl->repl.num_entries = nhooks + 1; \ 36 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \ 41 tbl->repl.hook_entry[hooknum] = bytes; \ [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
D | book3s_64_vio_hv.c | 122 long shift = stit->tbl->it_page_shift; in kvmppc_rm_tce_validate() 171 u64 *tbl; in kvmppc_rm_tce_put() local 183 tbl = kvmppc_page_address(page); in kvmppc_rm_tce_put() 185 tbl[idx % TCES_PER_PAGE] = tce; in kvmppc_rm_tce_put() 221 struct iommu_table *tbl, in iommu_tce_xchg_no_kill_rm() argument 227 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true); in iommu_tce_xchg_no_kill_rm() 231 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); in iommu_tce_xchg_no_kill_rm() 243 static void iommu_tce_kill_rm(struct iommu_table *tbl, in iommu_tce_kill_rm() argument 246 if (tbl->it_ops->tce_kill) in iommu_tce_kill_rm() 247 tbl->it_ops->tce_kill(tbl, entry, pages, true); in iommu_tce_kill_rm() [all …]
|
D | book3s_64_vio.c | 53 iommu_tce_table_put(stit->tbl); in kvm_spapr_tce_iommu_table_free() 85 if (table_group->tables[i] != stit->tbl) in kvm_spapr_tce_release_iommu_group() 101 struct iommu_table *tbl = NULL; in kvm_spapr_tce_attach_iommu_group() local 144 tbl = iommu_tce_table_get(tbltmp); in kvm_spapr_tce_attach_iommu_group() 148 if (!tbl) in kvm_spapr_tce_attach_iommu_group() 153 if (tbl != stit->tbl) in kvm_spapr_tce_attach_iommu_group() 158 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group() 173 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group() 177 stit->tbl = tbl; in kvm_spapr_tce_attach_iommu_group() 381 long shift = stit->tbl->it_page_shift; in kvmppc_tce_validate() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | mod_hdr.c | 42 void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl) in mlx5e_mod_hdr_tbl_init() argument 44 mutex_init(&tbl->lock); in mlx5e_mod_hdr_tbl_init() 45 hash_init(tbl->hlist); in mlx5e_mod_hdr_tbl_init() 48 void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl) in mlx5e_mod_hdr_tbl_destroy() argument 50 mutex_destroy(&tbl->lock); in mlx5e_mod_hdr_tbl_destroy() 53 static struct mlx5e_mod_hdr_handle *mod_hdr_get(struct mod_hdr_tbl *tbl, in mod_hdr_get() argument 59 hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) { in mod_hdr_get() 72 struct mod_hdr_tbl *tbl, in mlx5e_mod_hdr_attach() argument 89 mutex_lock(&tbl->lock); in mlx5e_mod_hdr_attach() 90 mh = mod_hdr_get(tbl, &key, hash_key); in mlx5e_mod_hdr_attach() [all …]
|
/kernel/linux/linux-5.10/include/net/ |
D | neighbour.h | 75 struct neigh_table *tbl; member 132 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) argument 136 struct neigh_table *tbl; member 244 return p->tbl->family; in neigh_parms_family() 252 return (char *)n + n->tbl->entry_size; in neighbour_priv() 286 struct neigh_table *tbl, in ___neigh_lookup_noref() argument 294 struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); in ___neigh_lookup_noref() 309 static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, in __neigh_lookup_noref() argument 313 return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); in __neigh_lookup_noref() 316 void neigh_table_init(int index, struct neigh_table *tbl); [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/platforms/pseries/ |
D | iommu.c | 59 struct iommu_table *tbl; in iommu_pseries_alloc_group() local 66 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node); in iommu_pseries_alloc_group() 67 if (!tbl) in iommu_pseries_alloc_group() 70 INIT_LIST_HEAD_RCU(&tbl->it_group_list); in iommu_pseries_alloc_group() 71 kref_init(&tbl->it_kref); in iommu_pseries_alloc_group() 73 table_group->tables[0] = tbl; in iommu_pseries_alloc_group() 85 struct iommu_table *tbl; in iommu_pseries_free_group() local 90 tbl = table_group->tables[0]; in iommu_pseries_free_group() 97 iommu_tce_table_put(tbl); in iommu_pseries_free_group() 102 static int tce_build_pSeries(struct iommu_table *tbl, long index, in tce_build_pSeries() argument [all …]
|