/drivers/infiniband/hw/mthca/ |
D | mthca_mad.c | 114 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in smp_snoop() 115 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in smp_snoop() 116 mad->mad_hdr.method == IB_MGMT_METHOD_SET) { in smp_snoop() 117 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { in smp_snoop() 141 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { in smp_snoop() 153 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in node_desc_override() 154 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in node_desc_override() 155 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && in node_desc_override() 156 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { in node_desc_override() 168 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | cm.c | 86 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in set_local_comm_id() 90 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in set_local_comm_id() 101 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in get_local_comm_id() 105 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in get_local_comm_id() 116 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in set_remote_comm_id() 120 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in set_remote_comm_id() 131 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in get_remote_comm_id() 135 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in get_remote_comm_id() 308 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || in mlx4_ib_multiplex_cm_handler() 309 mad->mad_hdr.attr_id == CM_REP_ATTR_ID || in mlx4_ib_multiplex_cm_handler() [all …]
|
D | mcg.c | 265 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_join_to_wire() 266 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_join_to_wire() 286 mad.mad_hdr.base_version = 1; in send_leave_to_wire() 287 mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; in send_leave_to_wire() 288 mad.mad_hdr.class_version = 2; in send_leave_to_wire() 289 mad.mad_hdr.method = IB_SA_METHOD_DELETE; in send_leave_to_wire() 290 mad.mad_hdr.status = cpu_to_be16(0); in send_leave_to_wire() 291 mad.mad_hdr.class_specific = cpu_to_be16(0); in send_leave_to_wire() 292 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_leave_to_wire() 293 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_leave_to_wire() [all …]
|
D | mad.c | 231 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in smp_snoop() 232 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in smp_snoop() 233 mad->mad_hdr.method == IB_MGMT_METHOD_SET) in smp_snoop() 234 switch (mad->mad_hdr.attr_id) { in smp_snoop() 372 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in node_desc_override() 373 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in node_desc_override() 374 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && in node_desc_override() 375 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { in node_desc_override() 385 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() 422 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) { in mlx4_ib_demux_sa_handler() [all …]
|
/drivers/infiniband/ulp/opa_vnic/ |
D | opa_vnic_vema.c | 134 return be32_to_cpu(recvd_mad->mad_hdr.attr_mod) & 0xff; in vema_get_vport_num() 338 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; in vema_set_veswport_info() 361 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; in vema_get_mac_entries() 373 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; in vema_get_mac_entries() 396 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; in vema_set_mac_entries() 403 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR; in vema_set_mac_entries() 405 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR; in vema_set_mac_entries() 430 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; in vema_set_delete_vesw() 464 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; in vema_get_mac_list() 501 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; in vema_get_summary_counters() [all …]
|
D | opa_vnic_encap.h | 475 struct ib_mad_hdr mad_hdr; member 517 struct ib_mad_hdr mad_hdr; member
|
/drivers/infiniband/core/ |
D | mad_rmpp.c | 117 memcpy(ack, &data->mad_hdr, msg->hdr_len); in format_ack() 119 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; in format_ack() 136 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); in ack_recv() 163 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); in alloc_response_msg() 192 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; in ack_ds_ack() 226 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; in nack_recv() 280 struct ib_mad_hdr *mad_hdr; in create_rmpp_recv() local 308 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; in create_rmpp_recv() 309 rmpp_recv->tid = mad_hdr->tid; in create_rmpp_recv() 312 rmpp_recv->mgmt_class = mad_hdr->mgmt_class; in create_rmpp_recv() [all …]
|
D | agent.c | 81 void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh, in agent_send_response() argument 109 if (opa && mad_hdr->base_version != OPA_MGMT_BASE_VERSION) in agent_send_response() 116 mad_hdr->base_version); in agent_send_response() 122 memcpy(send_buf->mad, mad_hdr, resp_mad_len); in agent_send_response()
|
D | mad.c | 1542 const struct ib_mad_hdr *mad_hdr) in find_mad_agent() argument 1547 if (ib_response_mad(mad_hdr)) { in find_mad_agent() 1554 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; in find_mad_agent() 1573 if (mad_hdr->class_version >= MAX_MGMT_VERSION) in find_mad_agent() 1575 if (!is_vendor_class(mad_hdr->mgmt_class)) { in find_mad_agent() 1577 mad_hdr->class_version].class; in find_mad_agent() 1580 if (convert_mgmt_class(mad_hdr->mgmt_class) >= in find_mad_agent() 1584 mad_hdr->mgmt_class)]; in find_mad_agent() 1586 mad_agent = method->agent[mad_hdr->method & in find_mad_agent() 1590 mad_hdr->class_version].vendor; in find_mad_agent() [all …]
|
D | sa_query.c | 955 mad->mad_hdr.method |= in ib_nl_process_good_resolve_rsp() 1346 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; in init_mad() 1347 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION; in init_mad() 1349 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; in init_mad() 1350 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; in init_mad() 1352 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; in init_mad() 1354 mad->mad_hdr.tid = in init_mad() 1601 mad->mad_hdr.method = IB_MGMT_METHOD_GET; in ib_sa_path_rec_get() 1602 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); in ib_sa_path_rec_get() 1738 mad->mad_hdr.method = method; in ib_sa_service_rec_query() [all …]
|
D | agent.h | 47 extern void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
|
D | user_mad.c | 135 struct ib_mad_hdr mad_hdr; member 334 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); in copy_recv_mad() 348 trace_ib_umad_read_recv(file, &packet->mad.hdr, &recv_buf->mad->mad_hdr); in copy_recv_mad() 569 hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class); in ib_umad_write() 571 if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class) in ib_umad_write() 623 rmpp_mad_hdr->mad_hdr.tid = *tid; in ib_umad_write() 627 && ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class) in ib_umad_write()
|
D | sysfs.c | 492 in_mad->mad_hdr.base_version = 1; in get_perf_mad() 493 in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT; in get_perf_mad() 494 in_mad->mad_hdr.class_version = 1; in get_perf_mad() 495 in_mad->mad_hdr.method = IB_MGMT_METHOD_GET; in get_perf_mad() 496 in_mad->mad_hdr.attr_id = attr; in get_perf_mad()
|
D | cm.c | 4029 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { in cm_recv_handler() 4072 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); in cm_recv_handler()
|
/drivers/infiniband/hw/mlx5/ |
D | mad.c | 48 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED && in can_do_mad_ifc() 49 in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) in can_do_mad_ifc() 176 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { in process_pma_cmd() 185 if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { in process_pma_cmd() 231 u8 mgmt_class = in->mad_hdr.mgmt_class; in mlx5_ib_process_mad() 232 u8 method = in->mad_hdr.method; in mlx5_ib_process_mad() 252 if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) in mlx5_ib_process_mad() 279 out->mad_hdr.status |= cpu_to_be16(1 << 15); in mlx5_ib_process_mad()
|
/drivers/infiniband/hw/hfi1/ |
D | mad.c | 2318 struct ib_mad_hdr mad_hdr; member 2610 if (pmp->mad_hdr.attr_mod != 0) in pma_get_opa_classportinfo() 2611 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; in pma_get_opa_classportinfo() 2734 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; in pma_get_opa_portstatus() 2747 pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE; in pma_get_opa_portstatus() 2753 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; in pma_get_opa_portstatus() 2997 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; in pma_get_opa_datacounters() 3006 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; in pma_get_opa_datacounters() 3014 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; in pma_get_opa_datacounters() 3027 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; in pma_get_opa_datacounters() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_mad.c | 1160 if (pmp->mad_hdr.attr_mod != 0) in pma_get_classportinfo() 1161 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; in pma_get_classportinfo() 1197 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { in pma_get_portsamplescontrol() 1198 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; in pma_get_portsamplescontrol() 1233 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { in pma_set_portsamplescontrol() 1234 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; in pma_set_portsamplescontrol() 1464 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) in pma_get_portcounters() 1465 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; in pma_get_portcounters() 1538 u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF; in pma_get_portcounters_cong() 1547 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; in pma_get_portcounters_cong() [all …]
|
/drivers/infiniband/ulp/srpt/ |
D | ib_dm_mad.h | 79 struct ib_mad_hdr mad_hdr; member
|
D | ib_srpt.c | 286 mad->mad_hdr.status = 0; in srpt_get_class_port_info() 311 mad->mad_hdr.status = 0; in srpt_get_iou() 334 mad->mad_hdr.status in srpt_get_ioc() 340 mad->mad_hdr.status in srpt_get_ioc() 372 mad->mad_hdr.status = 0; in srpt_get_ioc() 394 mad->mad_hdr.status in srpt_get_svc_entries() 400 mad->mad_hdr.status in srpt_get_svc_entries() 414 mad->mad_hdr.status = 0; in srpt_get_svc_entries() 430 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id); in srpt_mgmt_method_get() 439 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); in srpt_mgmt_method_get() [all …]
|
/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_ah.c | 261 if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { in ocrdma_process_mad()
|