/drivers/net/ethernet/chelsio/cxgb4/ |
D | l2t.c | 65 static inline unsigned int vlan_prio(const struct l2t_entry *e) in vlan_prio() argument 67 return e->vlan >> VLAN_PRIO_SHIFT; in vlan_prio() 70 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) in l2t_hold() argument 72 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ in l2t_hold() 118 static int addreq(const struct l2t_entry *e, const u32 *addr) in addreq() argument 120 if (e->v6) in addreq() 121 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | in addreq() 122 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); in addreq() 123 return e->addr[0] ^ addr[0]; in addreq() 126 static void neigh_replace(struct l2t_entry *e, struct neighbour *n) in neigh_replace() argument [all …]
|
D | smt.c | 68 struct smt_entry *e, *end; in find_or_alloc_smte() local 70 for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) { in find_or_alloc_smte() 71 if (e->refcnt == 0) { in find_or_alloc_smte() 73 first_free = e; in find_or_alloc_smte() 75 if (e->state == SMT_STATE_SWITCHING) { in find_or_alloc_smte() 79 if (memcmp(e->src_mac, smac, ETH_ALEN) == 0) in find_or_alloc_smte() 86 e = first_free; in find_or_alloc_smte() 92 e->state = SMT_STATE_UNUSED; in find_or_alloc_smte() 95 return e; in find_or_alloc_smte() 98 static void t4_smte_free(struct smt_entry *e) in t4_smte_free() argument [all …]
|
D | sched.c | 47 struct sched_class *e; in t4_sched_class_fw_cmd() local 50 e = &s->tab[p->u.params.class]; in t4_sched_class_fw_cmd() 58 p->u.params.channel, e->idx, in t4_sched_class_fw_cmd() 125 struct sched_class *e, *end; in t4_sched_entry_lookup() local 130 for (e = &s->tab[0]; e != end; ++e) { in t4_sched_entry_lookup() 131 if (e->state == SCHED_STATE_UNUSED || in t4_sched_entry_lookup() 132 e->bind_type != type) in t4_sched_entry_lookup() 139 list_for_each_entry(qe, &e->entry_list, list) { in t4_sched_entry_lookup() 150 list_for_each_entry(fe, &e->entry_list, list) { in t4_sched_entry_lookup() 190 struct sched_class *e; in t4_sched_queue_unbind() local [all …]
|
/drivers/net/ethernet/chelsio/cxgb3/ |
D | l2t.c | 63 static inline unsigned int vlan_prio(const struct l2t_entry *e) in vlan_prio() argument 65 return e->vlan >> 13; in vlan_prio() 74 static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) in neigh_replace() argument 77 if (e->neigh) in neigh_replace() 78 neigh_release(e->neigh); in neigh_replace() 79 e->neigh = n; in neigh_replace() 88 struct l2t_entry *e) in setup_l2e_send_pending() argument 101 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); in setup_l2e_send_pending() 102 req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | in setup_l2e_send_pending() 103 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | in setup_l2e_send_pending() [all …]
|
/drivers/media/test-drivers/vidtv/ |
D | vidtv_s302m.c | 165 static void vidtv_s302m_access_unit_destroy(struct vidtv_encoder *e) in vidtv_s302m_access_unit_destroy() argument 167 struct vidtv_access_unit *head = e->access_units; in vidtv_s302m_access_unit_destroy() 176 e->access_units = NULL; in vidtv_s302m_access_unit_destroy() 179 static void vidtv_s302m_alloc_au(struct vidtv_encoder *e) in vidtv_s302m_alloc_au() argument 184 if (e->sync && e->sync->is_video_encoder) { in vidtv_s302m_alloc_au() 185 sync_au = e->sync->access_units; in vidtv_s302m_alloc_au() 188 temp = vidtv_s302m_access_unit_init(e->access_units); in vidtv_s302m_alloc_au() 189 if (!e->access_units) in vidtv_s302m_alloc_au() 190 e->access_units = temp; in vidtv_s302m_alloc_au() 198 e->access_units = vidtv_s302m_access_unit_init(NULL); in vidtv_s302m_alloc_au() [all …]
|
/drivers/md/ |
D | dm-cache-policy-smq.c | 87 struct entry *e; in __get_entry() local 89 e = es->begin + block; in __get_entry() 90 BUG_ON(e >= es->end); in __get_entry() 92 return e; in __get_entry() 95 static unsigned int to_index(struct entry_space *es, struct entry *e) in to_index() argument 97 BUG_ON(e < es->begin || e >= es->end); in to_index() 98 return e - es->begin; in to_index() 132 static struct entry *l_next(struct entry_space *es, struct entry *e) in l_next() argument 134 return to_entry(es, e->next); in l_next() 137 static struct entry *l_prev(struct entry_space *es, struct entry *e) in l_prev() argument [all …]
|
D | dm-writecache.c | 238 struct wc_entry *e; member 394 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e) in memory_entry() argument 396 return &sb(wc)->entries[e->index]; in memory_entry() 399 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e) in memory_data() argument 401 return (char *)wc->block_start + (e->index << wc->block_size_bits); in memory_data() 404 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) in cache_sector() argument 407 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); in cache_sector() 410 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e) in read_original_sector() argument 413 return e->original_sector; in read_original_sector() 415 return le64_to_cpu(memory_entry(wc, e)->original_sector); in read_original_sector() [all …]
|
/drivers/mtd/ubi/ |
D | wl.c | 127 struct ubi_wl_entry *e, struct rb_root *root); 129 struct ubi_wl_entry *e); 139 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) in wl_tree_add() argument 150 if (e->ec < e1->ec) in wl_tree_add() 152 else if (e->ec > e1->ec) in wl_tree_add() 155 ubi_assert(e->pnum != e1->pnum); in wl_tree_add() 156 if (e->pnum < e1->pnum) in wl_tree_add() 163 rb_link_node(&e->u.rb, parent, p); in wl_tree_add() 164 rb_insert_color(&e->u.rb, root); in wl_tree_add() 175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e) in wl_entry_destroy() argument [all …]
|
D | fastmap-wl.c | 29 struct ubi_wl_entry *e, *victim = NULL; in find_anchor_wl_entry() local 32 ubi_rb_for_each_entry(p, e, root, u.rb) { in find_anchor_wl_entry() 33 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) { in find_anchor_wl_entry() 34 victim = e; in find_anchor_wl_entry() 35 max_ec = e->ec; in find_anchor_wl_entry() 43 struct ubi_wl_entry *e) in return_unused_peb() argument 45 wl_tree_add(e, &ubi->free); in return_unused_peb() 58 struct ubi_wl_entry *e; in return_unused_pool_pebs() local 61 e = ubi->lookuptbl[pool->pebs[i]]; in return_unused_pool_pebs() 62 return_unused_peb(ubi, e); in return_unused_pool_pebs() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/esw/ |
D | indir_table.c | 97 struct mlx5_esw_indir_table_entry *e) in mlx5_esw_indir_table_rule_get() argument 109 if (e->recirc_rule) { in mlx5_esw_indir_table_rule_get() 110 refcount_inc(&e->recirc_rule->refcnt); in mlx5_esw_indir_table_rule_get() 139 flow_act.fg = e->recirc_grp; in mlx5_esw_indir_table_rule_get() 146 handle = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1); in mlx5_esw_indir_table_rule_get() 156 e->recirc_rule = rule; in mlx5_esw_indir_table_rule_get() 172 struct mlx5_esw_indir_table_entry *e) in mlx5_esw_indir_table_rule_put() argument 174 struct mlx5_esw_indir_table_rule *rule = e->recirc_rule; in mlx5_esw_indir_table_rule_put() 187 e->recirc_rule = NULL; in mlx5_esw_indir_table_rule_put() 190 static int mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry *e) in mlx5_create_indir_recirc_group() argument [all …]
|
D | vporttbl.c | 71 struct mlx5_vport_table *e; in esw_vport_tbl_lookup() local 73 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key) in esw_vport_tbl_lookup() 74 if (!memcmp(&e->key, skey, sizeof(*skey))) in esw_vport_tbl_lookup() 75 return e; in esw_vport_tbl_lookup() 86 struct mlx5_vport_table *e; in mlx5_esw_vporttbl_get() local 93 e = esw_vport_tbl_lookup(esw, &skey, hkey); in mlx5_esw_vporttbl_get() 94 if (e) { in mlx5_esw_vporttbl_get() 95 e->num_rules++; in mlx5_esw_vporttbl_get() 99 e = kzalloc(sizeof(*e), GFP_KERNEL); in mlx5_esw_vporttbl_get() 100 if (!e) { in mlx5_esw_vporttbl_get() [all …]
|
/drivers/edac/ |
D | edac_mc.c | 56 static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e) in error_desc_to_mci() argument 58 return container_of(e, struct mem_ctl_info, error_desc); in error_desc_to_mci() 803 static void edac_inc_ce_error(struct edac_raw_error_desc *e) in edac_inc_ce_error() argument 805 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; in edac_inc_ce_error() 806 struct mem_ctl_info *mci = error_desc_to_mci(e); in edac_inc_ce_error() 809 mci->ce_mc += e->error_count; in edac_inc_ce_error() 812 dimm->ce_count += e->error_count; in edac_inc_ce_error() 814 mci->ce_noinfo_count += e->error_count; in edac_inc_ce_error() 817 static void edac_inc_ue_error(struct edac_raw_error_desc *e) in edac_inc_ue_error() argument 819 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; in edac_inc_ue_error() [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_sync.c | 135 struct amdgpu_sync_entry *e; in amdgpu_sync_add_later() local 137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later() 138 if (unlikely(e->fence->context != f->context)) in amdgpu_sync_add_later() 141 amdgpu_sync_keep_later(&e->fence, f); in amdgpu_sync_add_later() 157 struct amdgpu_sync_entry *e; in amdgpu_sync_fence() local 165 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); in amdgpu_sync_fence() 166 if (!e) in amdgpu_sync_fence() 169 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence() 170 e->fence = dma_fence_get(f); in amdgpu_sync_fence() 275 struct amdgpu_sync_entry *e; in amdgpu_sync_peek_fence() local [all …]
|
/drivers/net/fddi/skfp/ |
D | ecm.c | 97 smc->e.path_test = PT_PASSED ; in ecm_init() 98 smc->e.trace_prop = 0 ; in ecm_init() 99 smc->e.sb_flag = 0 ; in ecm_init() 101 smc->e.ecm_line_state = FALSE ; in ecm_init() 154 smc->e.DisconnectFlag = FALSE ; in ecm_fsm() 157 smc->e.DisconnectFlag = TRUE ; in ecm_fsm() 165 smc->e.path_test = PT_PASSED ; in ecm_fsm() 166 smc->e.ecm_line_state = FALSE ; in ecm_fsm() 173 && smc->e.path_test==PT_PASSED) { in ecm_fsm() 178 else if (cmd == EC_CONNECT && (smc->e.path_test==PT_PASSED) && in ecm_fsm() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | tc_tun_encap.c | 18 struct mlx5e_encap_entry *e, in mlx5e_set_int_port_tunnel() argument 24 route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex); in mlx5e_set_int_port_tunnel() 27 attr->parse_attr->filter_dev == e->out_dev) in mlx5e_set_int_port_tunnel() 30 err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex, in mlx5e_set_int_port_tunnel() 143 struct mlx5e_encap_entry *e, in mlx5e_tc_encap_flows_add() argument 155 if (e->flags & MLX5_ENCAP_ENTRY_NO_ROUTE) in mlx5e_tc_encap_flows_add() 159 reformat_params.type = e->reformat_type; in mlx5e_tc_encap_flows_add() 160 reformat_params.size = e->encap_size; in mlx5e_tc_encap_flows_add() 161 reformat_params.data = e->encap_header; in mlx5e_tc_encap_flows_add() 162 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, in mlx5e_tc_encap_flows_add() [all …]
|
D | tc_tun.c | 189 struct mlx5e_encap_entry *e) in mlx5e_gen_ip_tunnel_header() argument 191 if (!e->tunnel) { in mlx5e_gen_ip_tunnel_header() 196 return e->tunnel->generate_ip_tun_hdr(buf, ip_proto, e); in mlx5e_gen_ip_tunnel_header() 200 struct mlx5e_encap_entry *e, in gen_eth_tnl_hdr() argument 206 ether_addr_copy(eth->h_dest, e->h_dest); in gen_eth_tnl_hdr() 225 struct mlx5e_encap_entry *e) in mlx5e_tc_tun_create_header_ipv4() argument 228 const struct ip_tunnel_key *tun_key = &e->tun_info->key; in mlx5e_tc_tun_create_header_ipv4() 251 e->tunnel->calc_hlen(e); in mlx5e_tc_tun_create_header_ipv4() 268 e->out_dev = attr.out_dev; in mlx5e_tc_tun_create_header_ipv4() 269 e->route_dev_ifindex = attr.route_dev->ifindex; in mlx5e_tc_tun_create_header_ipv4() [all …]
|
/drivers/ssb/ |
D | driver_chipcommon_sflash.c | 90 const struct ssb_sflash_tbl_e *e; in ssb_sflash_init() local 107 for (e = ssb_sflash_sst_tbl; e->name; e++) { in ssb_sflash_init() 108 if (e->id == id2) in ssb_sflash_init() 115 for (e = ssb_sflash_st_tbl; e->name; e++) { in ssb_sflash_init() 116 if (e->id == id) in ssb_sflash_init() 121 if (!e->name) { in ssb_sflash_init() 132 for (e = ssb_sflash_at_tbl; e->name; e++) { in ssb_sflash_init() 133 if (e->id == id) in ssb_sflash_init() 136 if (!e->name) { in ssb_sflash_init() 149 sflash->blocksize = e->blocksize; in ssb_sflash_init() [all …]
|
/drivers/iommu/intel/ |
D | iommu.h | 182 #define ecap_rps(e) (((e) >> 49) & 0x1) argument 183 #define ecap_smpwc(e) (((e) >> 48) & 0x1) argument 184 #define ecap_flts(e) (((e) >> 47) & 0x1) argument 185 #define ecap_slts(e) (((e) >> 46) & 0x1) argument 186 #define ecap_slads(e) (((e) >> 45) & 0x1) argument 187 #define ecap_vcs(e) (((e) >> 44) & 0x1) argument 188 #define ecap_smts(e) (((e) >> 43) & 0x1) argument 189 #define ecap_dit(e) (((e) >> 41) & 0x1) argument 190 #define ecap_pds(e) (((e) >> 42) & 0x1) argument 191 #define ecap_pasid(e) (((e) >> 40) & 0x1) argument [all …]
|
/drivers/bcma/ |
D | driver_chipcommon_sflash.c | 93 const struct bcma_sflash_tbl_e *e; in bcma_sflash_init() local 110 for (e = bcma_sflash_sst_tbl; e->name; e++) { in bcma_sflash_init() 111 if (e->id == id2) in bcma_sflash_init() 118 for (e = bcma_sflash_st_tbl; e->name; e++) { in bcma_sflash_init() 119 if (e->id == id) in bcma_sflash_init() 124 if (!e->name) { in bcma_sflash_init() 134 for (e = bcma_sflash_at_tbl; e->name; e++) { in bcma_sflash_init() 135 if (e->id == id) in bcma_sflash_init() 138 if (!e->name) { in bcma_sflash_init() 149 sflash->blocksize = e->blocksize; in bcma_sflash_init() [all …]
|
/drivers/gpu/drm/ |
D | drm_file.c | 208 struct drm_pending_event *e, *et; in drm_events_release() local 214 list_for_each_entry_safe(e, et, &file_priv->pending_event_list, in drm_events_release() 216 list_del(&e->pending_link); in drm_events_release() 217 e->file_priv = NULL; in drm_events_release() 221 list_for_each_entry_safe(e, et, &file_priv->event_list, link) { in drm_events_release() 222 list_del(&e->link); in drm_events_release() 223 kfree(e); in drm_events_release() 575 struct drm_pending_event *e = NULL; in drm_read() local 579 e = list_first_entry(&file_priv->event_list, in drm_read() 581 file_priv->event_space += e->event->length; in drm_read() [all …]
|
/drivers/net/wireless/mediatek/mt7601u/ |
D | dma.c | 12 struct mt7601u_dma_buf_rx *e, gfp_t gfp); 134 mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) in mt7601u_rx_process_entry() argument 136 u32 seg_len, data_len = e->urb->actual_length; in mt7601u_rx_process_entry() 137 u8 *data = page_address(e->p); in mt7601u_rx_process_entry() 151 new_p ? e->p : NULL, &list); in mt7601u_rx_process_entry() 165 put_page(e->p); in mt7601u_rx_process_entry() 166 e->p = new_p; in mt7601u_rx_process_entry() 182 buf = &q->e[q->start]; in mt7601u_rx_get_pending_entry() 215 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch")) in mt7601u_complete_rx() 228 struct mt7601u_dma_buf_rx *e; in mt7601u_rx_tasklet() local [all …]
|
/drivers/net/ieee802154/ |
D | mac802154_hwsim.c | 114 struct hwsim_edge *e; in hwsim_hw_xmit() local 120 list_for_each_entry_rcu(e, ¤t_phy->edges, list) { in hwsim_hw_xmit() 126 if (e->endpoint->suspended) in hwsim_hw_xmit() 129 endpoint_pib = rcu_dereference(e->endpoint->pib); in hwsim_hw_xmit() 134 einfo = rcu_dereference(e->info); in hwsim_hw_xmit() 136 ieee802154_rx_irqsafe(e->endpoint->hw, newskb, in hwsim_hw_xmit() 209 struct hwsim_edge *e; in append_radio_msg() local 229 list_for_each_entry_rcu(e, &phy->edges, list) { in append_radio_msg() 239 e->endpoint->idx); in append_radio_msg() 247 einfo = rcu_dereference(e->info); in append_radio_msg() [all …]
|
/drivers/firewire/ |
D | core-cdev.c | 378 struct bus_reset_event *e; in queue_bus_reset_event() local 380 e = kzalloc(sizeof(*e), GFP_KERNEL); in queue_bus_reset_event() 381 if (e == NULL) in queue_bus_reset_event() 384 fill_bus_reset_event(&e->reset, client); in queue_bus_reset_event() 386 queue_event(client, &e->event, in queue_bus_reset_event() 387 &e->reset, sizeof(e->reset), NULL, 0); in queue_bus_reset_event() 539 struct outbound_transaction_event *e = data; in complete_transaction() local 540 struct fw_cdev_event_response *rsp = &e->response; in complete_transaction() 541 struct client *client = e->client; in complete_transaction() 550 idr_remove(&client->resource_idr, e->r.resource.handle); in complete_transaction() [all …]
|
/drivers/virt/coco/efi_secret/ |
D | efi_secret.c | 85 static size_t secret_entry_data_len(struct secret_entry *e) in secret_entry_data_len() argument 87 return e->len - sizeof(*e); in secret_entry_data_len() 99 struct secret_entry *e = file->private; in efi_secret_bin_file_show() local 101 if (e) in efi_secret_bin_file_show() 102 seq_write(file, e->data, secret_entry_data_len(e)); in efi_secret_bin_file_show() 124 struct secret_entry *e = (struct secret_entry *)inode->i_private; in efi_secret_unlink() local 127 if (e) { in efi_secret_unlink() 129 wipe_memory(e->data, secret_entry_data_len(e)); in efi_secret_unlink() 130 e->guid = NULL_GUID; in efi_secret_unlink() 219 struct secret_entry *e; in efi_secret_securityfs_setup() local [all …]
|
/drivers/net/ethernet/marvell/prestera/ |
D | prestera_acl.c | 579 struct prestera_acl_rule_entry *e) in __prestera_acl_rule_entry2hw_del() argument 581 return prestera_hw_vtcam_rule_del(sw, e->vtcam_id, e->hw_id); in __prestera_acl_rule_entry2hw_del() 585 struct prestera_acl_rule_entry *e) in __prestera_acl_rule_entry2hw_add() argument 594 if (e->accept.valid) { in __prestera_acl_rule_entry2hw_add() 599 if (e->drop.valid) { in __prestera_acl_rule_entry2hw_add() 604 if (e->trap.valid) { in __prestera_acl_rule_entry2hw_add() 609 if (e->police.valid) { in __prestera_acl_rule_entry2hw_add() 611 act_hw[act_num].police = e->police.i; in __prestera_acl_rule_entry2hw_add() 615 if (e->jump.valid) { in __prestera_acl_rule_entry2hw_add() 617 act_hw[act_num].jump = e->jump.i; in __prestera_acl_rule_entry2hw_add() [all …]
|