/drivers/scsi/csiostor/ |
D | csio_rnode.c | 89 csio_is_rnode_ready(struct csio_rnode *rn) in csio_is_rnode_ready() argument 91 return csio_match_state(rn, csio_rns_ready); in csio_is_rnode_ready() 95 csio_is_rnode_uninit(struct csio_rnode *rn) in csio_is_rnode_uninit() argument 97 return csio_match_state(rn, csio_rns_uninit); in csio_is_rnode_uninit() 125 struct csio_rnode *rn; in csio_rn_lookup() local 128 rn = (struct csio_rnode *) tmp; in csio_rn_lookup() 129 if (rn->flowid == flowid) in csio_rn_lookup() 130 return rn; in csio_rn_lookup() 149 struct csio_rnode *rn; in csio_rn_lookup_wwpn() local 152 rn = (struct csio_rnode *) tmp; in csio_rn_lookup_wwpn() [all …]
|
D | csio_rnode.h | 120 #define csio_rn_flowid(rn) ((rn)->flowid) argument 121 #define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn) argument 122 #define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn) argument 123 #define csio_rnode_to_lnode(rn) ((rn)->lnp) argument 125 int csio_is_rnode_ready(struct csio_rnode *rn); 126 void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str); 132 void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt); 134 void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn);
|
D | csio_attr.c | 59 csio_reg_rnode(struct csio_rnode *rn) in csio_reg_rnode() argument 61 struct csio_lnode *ln = csio_rnode_to_lnode(rn); in csio_reg_rnode() 67 ids.node_name = wwn_to_u64(csio_rn_wwnn(rn)); in csio_reg_rnode() 68 ids.port_name = wwn_to_u64(csio_rn_wwpn(rn)); in csio_reg_rnode() 69 ids.port_id = rn->nport_id; in csio_reg_rnode() 72 if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) { in csio_reg_rnode() 73 rport = rn->rport; in csio_reg_rnode() 78 rn->rport = fc_remote_port_add(shost, 0, &ids); in csio_reg_rnode() 79 if (!rn->rport) { in csio_reg_rnode() 81 rn->nport_id); in csio_reg_rnode() [all …]
|
D | csio_scsi.c | 204 struct csio_rnode *rn = req->rnode; in csio_scsi_init_cmd_wr() local 211 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_cmd_wr() 363 struct csio_rnode *rn = req->rnode; in csio_scsi_init_read_wr() local 372 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_read_wr() 416 struct csio_rnode *rn = req->rnode; in csio_scsi_init_write_wr() local 425 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_write_wr() 650 struct csio_rnode *rn = req->rnode; in csio_scsi_init_abrt_cls_wr() local 654 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_abrt_cls_wr() 777 struct csio_rnode *rn; in csio_scsis_io_active() local 800 rn = req->rnode; in csio_scsis_io_active() [all …]
|
D | csio_lnode.c | 939 struct csio_rnode *rn; in csio_post_event_rns() local 942 rn = (struct csio_rnode *) tmp; in csio_post_event_rns() 943 csio_post_event(&rn->sm, evt); in csio_post_event_rns() 961 struct csio_rnode *rn; in csio_cleanup_rns() local 964 rn = (struct csio_rnode *) tmp; in csio_cleanup_rns() 965 csio_put_rnode(ln, rn); in csio_cleanup_rns() 1486 struct csio_rnode *rn; in csio_fcoe_fwevt_handler() local 1562 rn = csio_confirm_rnode(ln, rdev_flowid, in csio_fcoe_fwevt_handler() 1564 if (!rn) { in csio_fcoe_fwevt_handler() 1588 csio_rnode_fwevt_handler(rn, rdev_wr->event_cause); in csio_fcoe_fwevt_handler() [all …]
|
/drivers/s390/char/ |
D | sclp_cmd.c | 173 u16 rn; member 179 u16 rn; member 189 static unsigned long long rn2addr(u16 rn) in rn2addr() argument 191 return (unsigned long long) (rn - 1) * sclp.rzm; in rn2addr() 194 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) in do_assign_storage() argument 203 sccb->rn = rn; in do_assign_storage() 213 cmd, sccb->header.response_code, rn); in do_assign_storage() 222 static int sclp_assign_storage(u16 rn) in sclp_assign_storage() argument 227 rc = do_assign_storage(0x000d0001, rn); in sclp_assign_storage() 230 start = rn2addr(rn); in sclp_assign_storage() [all …]
|
D | sclp_early_core.c | 320 unsigned long rn, rzm; in sclp_early_read_storage_info() local 347 rn = sccb->entries[sn] >> 16; in sclp_early_read_storage_info() 348 add_mem_detect_block((rn - 1) * rzm, rn * rzm); in sclp_early_read_storage_info()
|
/drivers/w1/ |
D | w1.c | 401 struct w1_reg_num *rn) in w1_atoreg_num() argument 425 rn->family = family; in w1_atoreg_num() 426 rn->id = id; in w1_atoreg_num() 428 rn64_le = cpu_to_le64(*(u64 *)rn); in w1_atoreg_num() 429 rn->crc = w1_calc_crc8((u8 *)&rn64_le, 7); in w1_atoreg_num() 433 rn->family, (unsigned long long)rn->id, rn->crc); in w1_atoreg_num() 443 struct w1_reg_num *rn) in w1_slave_search_device() argument 448 if (sl->reg_num.family == rn->family && in w1_slave_search_device() 449 sl->reg_num.id == rn->id && in w1_slave_search_device() 450 sl->reg_num.crc == rn->crc) { in w1_slave_search_device() [all …]
|
D | w1_internal.h | 44 void w1_slave_found(struct w1_master *dev, u64 rn); 48 struct w1_reg_num *rn); 57 int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
|
D | w1_netlink.c | 234 static void w1_send_slave(struct w1_master *dev, u64 rn) in w1_send_slave() argument 250 *data = rn; in w1_send_slave() 256 static void w1_found_send_slave(struct w1_master *dev, u64 rn) in w1_found_send_slave() argument 259 w1_slave_found(dev, rn); in w1_found_send_slave() 261 w1_send_slave(dev, rn); in w1_found_send_slave() 273 u64 rn; in w1_get_slaves() local 276 memcpy(&rn, &sl->reg_num, sizeof(rn)); in w1_get_slaves() 277 w1_send_slave(dev, rn); in w1_get_slaves()
|
D | w1_io.c | 396 u64 rn = le64_to_cpu(*((u64*)&sl->reg_num)); in w1_reset_select_slave() local 398 memcpy(&match[1], &rn, 8); in w1_reset_select_slave()
|
/drivers/infiniband/hw/hfi1/ |
D | ipoib_main.c | 198 struct rdma_netdev *rn = netdev_priv(netdev); in hfi1_ipoib_setup_rn() local 202 rn->send = hfi1_ipoib_send; in hfi1_ipoib_setup_rn() 203 rn->tx_timeout = hfi1_ipoib_tx_timeout; in hfi1_ipoib_setup_rn() 204 rn->attach_mcast = hfi1_ipoib_mcast_attach; in hfi1_ipoib_setup_rn() 205 rn->detach_mcast = hfi1_ipoib_mcast_detach; in hfi1_ipoib_setup_rn() 206 rn->set_id = hfi1_ipoib_set_id; in hfi1_ipoib_setup_rn() 207 rn->hca = device; in hfi1_ipoib_setup_rn() 208 rn->port_num = port_num; in hfi1_ipoib_setup_rn() 209 rn->mtu = netdev->mtu; in hfi1_ipoib_setup_rn()
|
D | vnic_main.c | 563 struct rdma_netdev *rn; in hfi1_vnic_alloc_rn() local 582 rn = netdev_priv(netdev); in hfi1_vnic_alloc_rn() 588 rn->free_rdma_netdev = hfi1_vnic_free_rn; in hfi1_vnic_alloc_rn() 589 rn->set_id = hfi1_vnic_set_vesw_id; in hfi1_vnic_alloc_rn()
|
D | ipoib.h | 118 struct rdma_netdev rn; /* keep this first */ member
|
/drivers/infiniband/ulp/opa_vnic/ |
D | opa_vnic_netdev.c | 138 struct rdma_netdev *rn = netdev_priv(adapter->netdev); in opa_vnic_process_vema_config() local 158 rn->set_id(netdev, info->vesw.vesw_id); in opa_vnic_process_vema_config() 328 struct rdma_netdev *rn; in opa_vnic_add_netdev() local 340 rn = netdev_priv(netdev); in opa_vnic_add_netdev() 347 rn->clnt_priv = adapter; in opa_vnic_add_netdev() 348 rn->hca = ibdev; in opa_vnic_add_netdev() 349 rn->port_num = port_num; in opa_vnic_add_netdev() 383 rn->free_rdma_netdev(netdev); in opa_vnic_add_netdev() 392 struct rdma_netdev *rn = netdev_priv(netdev); in opa_vnic_rem_netdev() local 400 rn->free_rdma_netdev(netdev); in opa_vnic_rem_netdev()
|
/drivers/md/persistent-data/ |
D | dm-btree.c | 650 struct btree_node *ln, *rn, *pn; in split_one_into_two() local 660 rn = dm_block_data(right); in split_one_into_two() 662 rn->header.flags = ln->header.flags; in split_one_into_two() 663 rn->header.nr_entries = cpu_to_le32(0); in split_one_into_two() 664 rn->header.max_entries = ln->header.max_entries; in split_one_into_two() 665 rn->header.value_size = ln->header.value_size; in split_one_into_two() 666 redistribute2(ln, rn); in split_one_into_two() 675 le64_to_cpu(rn->keys[0]), &location); in split_one_into_two() 682 if (key < le64_to_cpu(rn->keys[0])) { in split_one_into_two() 734 struct btree_node *ln, *rn, *mn, *pn; in split_two_into_three() local [all …]
|
/drivers/gpu/drm/i915/gt/ |
D | intel_gt_requests.c | 19 struct i915_request *rq, *rn; in retire_requests() local 21 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 243 struct i915_request *rq, *rn; in intel_gt_watchdog_work() local 250 llist_for_each_entry_safe(rq, rn, first, watchdog.link) { in intel_gt_watchdog_work()
|
D | intel_timeline.c | 421 struct i915_request *rq, *rn; in intel_gt_show_timelines() local 438 list_for_each_entry_safe(rq, rn, &tl->requests, link) { in intel_gt_show_timelines() 463 list_for_each_entry_safe(rq, rn, &tl->requests, link) in intel_gt_show_timelines()
|
/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ |
D | ipoib.c | 697 struct rdma_netdev *rn; in mlx5_rdma_setup_rn() local 732 rn = &ipriv->rn; in mlx5_rdma_setup_rn() 733 rn->hca = ibdev; in mlx5_rdma_setup_rn() 734 rn->send = mlx5i_xmit; in mlx5_rdma_setup_rn() 735 rn->attach_mcast = mlx5i_attach_mcast; in mlx5_rdma_setup_rn() 736 rn->detach_mcast = mlx5i_detach_mcast; in mlx5_rdma_setup_rn() 737 rn->set_id = mlx5i_set_pkey_index; in mlx5_rdma_setup_rn()
|
D | ipoib.h | 54 struct rdma_netdev rn; /* keep this first */ member
|
/drivers/gpu/drm/msm/ |
D | msm_gpu.h | 335 unsigned rn, sp; in msm_gpu_convert_priority() local 337 rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp); in msm_gpu_convert_priority() 344 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority() 347 *ring_nr = rn; in msm_gpu_convert_priority()
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_main.c | 952 struct rdma_netdev *rn = netdev_priv(dev); in neigh_add_path() local 1007 path->ah->last_send = rn->send(dev, skb, path->ah->ah, in neigh_add_path() 1045 struct rdma_netdev *rn = netdev_priv(dev); in unicast_arp_send() local 1085 path->ah->last_send = rn->send(dev, skb, path->ah->ah, in unicast_arp_send() 1099 struct rdma_netdev *rn = netdev_priv(dev); in ipoib_start_xmit() local 1164 neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah, in ipoib_start_xmit() 1190 struct rdma_netdev *rn = netdev_priv(dev); in ipoib_timeout() local 1192 if (rn->tx_timeout) { in ipoib_timeout() 1193 rn->tx_timeout(dev, txqueue); in ipoib_timeout() 1922 struct rdma_netdev *rn = netdev_priv(ndev); in ipoib_ndo_init() local [all …]
|
D | ipoib_multicast.c | 215 struct rdma_netdev *rn = netdev_priv(dev); in ipoib_mcast_join_finish() local 248 rn->mtu = priv->mcast_mtu; in ipoib_mcast_join_finish() 264 ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid, in ipoib_mcast_join_finish() 681 struct rdma_netdev *rn = netdev_priv(dev); in ipoib_mcast_leave() local 695 ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid, in ipoib_mcast_leave() 746 struct rdma_netdev *rn = netdev_priv(dev); in ipoib_mcast_send() local 813 mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah, in ipoib_mcast_send()
|
D | ipoib_vlan.c | 100 struct rdma_netdev *rn = netdev_priv(ndev); in __ipoib_vlan_add() local 121 rn->mtu = priv->mcast_mtu; in __ipoib_vlan_add()
|
/drivers/w1/slaves/ |
D | w1_ds2408.c | 295 u64 rn = le64_to_cpu(*((u64*)&sl->reg_num)); in w1_f29_disable_test_mode() local 297 memcpy(&magic[1], &rn, 8); in w1_f29_disable_test_mode()
|