• Home
  • Raw
  • Download

Lines Matching refs:slave

323 static void update_pkey_index(struct mlx4_dev *dev, int slave,  in update_pkey_index()  argument
334 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; in update_pkey_index()
339 u8 slave) in update_gid() argument
346 qp_ctx->pri_path.mgid_index = 0x80 | slave; in update_gid()
350 qp_ctx->pri_path.mgid_index = slave & 0x7F; in update_gid()
352 qp_ctx->alt_path.mgid_index = slave & 0x7F; in update_gid()
358 u8 slave) in update_vport_qp_param() argument
368 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; in update_vport_qp_param()
416 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, in get_res() argument
435 if (r->owner != slave) { in get_res()
453 u64 res_id, int *slave) in mlx4_get_slave_from_resource_id() argument
466 *slave = r->owner; in mlx4_get_slave_from_resource_id()
474 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, in put_res() argument
621 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, in alloc_tr() argument
661 ret->owner = slave; in alloc_tr()
666 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, in add_res_range() argument
681 res_arr[i] = alloc_tr(base + i, type, slave, extra); in add_res_range()
701 &tracker->slave_list[slave].res_list[type]); in add_res_range()
851 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, in rem_res_range() argument
867 if (r->owner != slave) { in rem_res_range()
890 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, in qp_res_start_move_to() argument
903 else if (r->com.owner != slave) in qp_res_start_move_to()
955 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, in mr_res_start_move_to() argument
967 else if (r->com.owner != slave) in mr_res_start_move_to()
1008 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, in eq_res_start_move_to() argument
1020 else if (r->com.owner != slave) in eq_res_start_move_to()
1056 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, in cq_res_start_move_to() argument
1068 else if (r->com.owner != slave) in cq_res_start_move_to()
1110 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, in srq_res_start_move_to() argument
1122 else if (r->com.owner != slave) in srq_res_start_move_to()
1160 static void res_abort_move(struct mlx4_dev *dev, int slave, in res_abort_move() argument
1169 if (r && (r->owner == slave)) in res_abort_move()
1174 static void res_end_move(struct mlx4_dev *dev, int slave, in res_end_move() argument
1183 if (r && (r->owner == slave)) in res_end_move()
1188 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) in valid_reserved() argument
1191 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn)); in valid_reserved()
1199 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in qp_alloc_res() argument
1216 err = add_res_range(dev, slave, base, count, RES_QP, 0); in qp_alloc_res()
1225 if (valid_reserved(dev, slave, qpn)) { in qp_alloc_res()
1226 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); in qp_alloc_res()
1231 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, in qp_alloc_res()
1239 res_abort_move(dev, slave, RES_QP, qpn); in qp_alloc_res()
1244 res_end_move(dev, slave, RES_QP, qpn); in qp_alloc_res()
1254 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mtt_alloc_res() argument
1269 err = add_res_range(dev, slave, base, 1, RES_MTT, order); in mtt_alloc_res()
1278 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mpt_alloc_res() argument
1293 err = add_res_range(dev, slave, id, 1, RES_MPT, index); in mpt_alloc_res()
1303 err = mr_res_start_move_to(dev, slave, id, in mpt_alloc_res()
1310 res_abort_move(dev, slave, RES_MPT, id); in mpt_alloc_res()
1314 res_end_move(dev, slave, RES_MPT, id); in mpt_alloc_res()
1320 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in cq_alloc_res() argument
1332 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); in cq_alloc_res()
1348 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in srq_alloc_res() argument
1360 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); in srq_alloc_res()
1376 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) in mac_add_to_slave() argument
1388 &tracker->slave_list[slave].res_list[RES_MAC]); in mac_add_to_slave()
1392 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, in mac_del_from_slave() argument
1398 &tracker->slave_list[slave].res_list[RES_MAC]; in mac_del_from_slave()
1410 static void rem_slave_macs(struct mlx4_dev *dev, int slave) in rem_slave_macs() argument
1415 &tracker->slave_list[slave].res_list[RES_MAC]; in rem_slave_macs()
1425 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mac_alloc_res() argument
1445 err = mac_add_to_slave(dev, slave, mac, port); in mac_alloc_res()
1452 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in vlan_alloc_res() argument
1458 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in counter_alloc_res() argument
1471 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); in counter_alloc_res()
1480 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in xrcdn_alloc_res() argument
1493 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); in xrcdn_alloc_res()
1502 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, in mlx4_ALLOC_RES_wrapper() argument
1513 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
1518 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
1523 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
1528 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
1533 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
1538 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
1543 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
1548 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
1553 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
1565 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in qp_free_res() argument
1577 err = rem_res_range(dev, slave, base, count, RES_QP, 0); in qp_free_res()
1584 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, in qp_free_res()
1592 res_end_move(dev, slave, RES_QP, qpn); in qp_free_res()
1594 if (valid_reserved(dev, slave, qpn)) in qp_free_res()
1595 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); in qp_free_res()
1604 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mtt_free_res() argument
1616 err = rem_res_range(dev, slave, base, 1, RES_MTT, order); in mtt_free_res()
1622 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mpt_free_res() argument
1634 err = get_res(dev, slave, id, RES_MPT, &mpt); in mpt_free_res()
1638 put_res(dev, slave, id, RES_MPT); in mpt_free_res()
1640 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); in mpt_free_res()
1648 err = mr_res_start_move_to(dev, slave, id, in mpt_free_res()
1654 res_end_move(dev, slave, RES_MPT, id); in mpt_free_res()
1664 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in cq_free_res() argument
1673 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); in cq_free_res()
1688 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in srq_free_res() argument
1697 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); in srq_free_res()
1712 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mac_free_res() argument
1721 mac_del_from_slave(dev, slave, in_param, port); in mac_free_res()
1733 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in vlan_free_res() argument
1739 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in counter_free_res() argument
1749 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); in counter_free_res()
1758 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in xrcdn_free_res() argument
1768 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); in xrcdn_free_res()
1777 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, in mlx4_FREE_RES_wrapper() argument
1788 err = qp_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
1793 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
1798 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
1803 err = cq_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
1808 err = srq_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
1813 err = mac_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
1818 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
1823 err = counter_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
1828 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
1909 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, in check_mtt_range() argument
1920 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SW2HW_MPT_wrapper() argument
1937 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); in mlx4_SW2HW_MPT_wrapper()
1950 if (pd_slave != 0 && pd_slave != slave) { in mlx4_SW2HW_MPT_wrapper()
1970 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_SW2HW_MPT_wrapper()
1974 err = check_mtt_range(dev, slave, mtt_base, in mlx4_SW2HW_MPT_wrapper()
1982 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_MPT_wrapper()
1988 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_MPT_wrapper()
1991 res_end_move(dev, slave, RES_MPT, id); in mlx4_SW2HW_MPT_wrapper()
1996 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_MPT_wrapper()
1998 res_abort_move(dev, slave, RES_MPT, id); in mlx4_SW2HW_MPT_wrapper()
2003 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_HW2SW_MPT_wrapper() argument
2015 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); in mlx4_HW2SW_MPT_wrapper()
2019 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_MPT_wrapper()
2026 res_end_move(dev, slave, RES_MPT, id); in mlx4_HW2SW_MPT_wrapper()
2030 res_abort_move(dev, slave, RES_MPT, id); in mlx4_HW2SW_MPT_wrapper()
2035 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_MPT_wrapper() argument
2047 err = get_res(dev, slave, id, RES_MPT, &mpt); in mlx4_QUERY_MPT_wrapper()
2056 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_MPT_wrapper()
2059 put_res(dev, slave, id, RES_MPT); in mlx4_QUERY_MPT_wrapper()
2091 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_RST2INIT_QP_wrapper() argument
2113 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); in mlx4_RST2INIT_QP_wrapper()
2118 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_RST2INIT_QP_wrapper()
2122 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); in mlx4_RST2INIT_QP_wrapper()
2126 err = get_res(dev, slave, rcqn, RES_CQ, &rcq); in mlx4_RST2INIT_QP_wrapper()
2131 err = get_res(dev, slave, scqn, RES_CQ, &scq); in mlx4_RST2INIT_QP_wrapper()
2138 err = get_res(dev, slave, srqn, RES_SRQ, &srq); in mlx4_RST2INIT_QP_wrapper()
2144 update_pkey_index(dev, slave, inbox); in mlx4_RST2INIT_QP_wrapper()
2145 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RST2INIT_QP_wrapper()
2156 put_res(dev, slave, scqn, RES_CQ); in mlx4_RST2INIT_QP_wrapper()
2160 put_res(dev, slave, srqn, RES_SRQ); in mlx4_RST2INIT_QP_wrapper()
2163 put_res(dev, slave, rcqn, RES_CQ); in mlx4_RST2INIT_QP_wrapper()
2164 put_res(dev, slave, mtt_base, RES_MTT); in mlx4_RST2INIT_QP_wrapper()
2165 res_end_move(dev, slave, RES_QP, qpn); in mlx4_RST2INIT_QP_wrapper()
2171 put_res(dev, slave, srqn, RES_SRQ); in mlx4_RST2INIT_QP_wrapper()
2174 put_res(dev, slave, scqn, RES_CQ); in mlx4_RST2INIT_QP_wrapper()
2176 put_res(dev, slave, rcqn, RES_CQ); in mlx4_RST2INIT_QP_wrapper()
2178 put_res(dev, slave, mtt_base, RES_MTT); in mlx4_RST2INIT_QP_wrapper()
2180 res_abort_move(dev, slave, RES_QP, qpn); in mlx4_RST2INIT_QP_wrapper()
2217 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SW2HW_EQ_wrapper() argument
2225 int res_id = (slave << 8) | eqn; in mlx4_SW2HW_EQ_wrapper()
2232 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); in mlx4_SW2HW_EQ_wrapper()
2235 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); in mlx4_SW2HW_EQ_wrapper()
2239 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_SW2HW_EQ_wrapper()
2243 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); in mlx4_SW2HW_EQ_wrapper()
2247 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_EQ_wrapper()
2253 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_EQ_wrapper()
2254 res_end_move(dev, slave, RES_EQ, res_id); in mlx4_SW2HW_EQ_wrapper()
2258 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_EQ_wrapper()
2260 res_abort_move(dev, slave, RES_EQ, res_id); in mlx4_SW2HW_EQ_wrapper()
2262 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); in mlx4_SW2HW_EQ_wrapper()
2266 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, in get_containing_mtt() argument
2275 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], in get_containing_mtt()
2277 if (!check_mtt_range(dev, slave, start, len, mtt)) { in get_containing_mtt()
2292 enum qp_transition transition, u8 slave) in verify_qp_parameters() argument
2311 if (slave != mlx4_master_func_num(dev)) in verify_qp_parameters()
2332 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_WRITE_MTT_wrapper() argument
2347 err = get_containing_mtt(dev, slave, start, npages, &rmtt); in mlx4_WRITE_MTT_wrapper()
2365 put_res(dev, slave, rmtt->com.res_id, RES_MTT); in mlx4_WRITE_MTT_wrapper()
2370 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_HW2SW_EQ_wrapper() argument
2377 int res_id = eqn | (slave << 8); in mlx4_HW2SW_EQ_wrapper()
2381 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); in mlx4_HW2SW_EQ_wrapper()
2385 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); in mlx4_HW2SW_EQ_wrapper()
2389 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_EQ_wrapper()
2394 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); in mlx4_HW2SW_EQ_wrapper()
2395 res_end_move(dev, slave, RES_EQ, res_id); in mlx4_HW2SW_EQ_wrapper()
2396 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); in mlx4_HW2SW_EQ_wrapper()
2401 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); in mlx4_HW2SW_EQ_wrapper()
2403 res_abort_move(dev, slave, RES_EQ, res_id); in mlx4_HW2SW_EQ_wrapper()
2408 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) in mlx4_GEN_EQE() argument
2421 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; in mlx4_GEN_EQE()
2427 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); in mlx4_GEN_EQE()
2428 res_id = (slave << 8) | event_eq->eqn; in mlx4_GEN_EQE()
2429 err = get_res(dev, slave, res_id, RES_EQ, &req); in mlx4_GEN_EQE()
2451 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16); in mlx4_GEN_EQE()
2457 put_res(dev, slave, res_id, RES_EQ); in mlx4_GEN_EQE()
2458 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); in mlx4_GEN_EQE()
2463 put_res(dev, slave, res_id, RES_EQ); in mlx4_GEN_EQE()
2466 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); in mlx4_GEN_EQE()
2470 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_EQ_wrapper() argument
2477 int res_id = eqn | (slave << 8); in mlx4_QUERY_EQ_wrapper()
2481 err = get_res(dev, slave, res_id, RES_EQ, &eq); in mlx4_QUERY_EQ_wrapper()
2490 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_EQ_wrapper()
2493 put_res(dev, slave, res_id, RES_EQ); in mlx4_QUERY_EQ_wrapper()
2497 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SW2HW_CQ_wrapper() argument
2510 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); in mlx4_SW2HW_CQ_wrapper()
2513 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_SW2HW_CQ_wrapper()
2516 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); in mlx4_SW2HW_CQ_wrapper()
2519 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_CQ_wrapper()
2524 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_CQ_wrapper()
2525 res_end_move(dev, slave, RES_CQ, cqn); in mlx4_SW2HW_CQ_wrapper()
2529 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_CQ_wrapper()
2531 res_abort_move(dev, slave, RES_CQ, cqn); in mlx4_SW2HW_CQ_wrapper()
2535 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_HW2SW_CQ_wrapper() argument
2545 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); in mlx4_HW2SW_CQ_wrapper()
2548 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_CQ_wrapper()
2552 res_end_move(dev, slave, RES_CQ, cqn); in mlx4_HW2SW_CQ_wrapper()
2556 res_abort_move(dev, slave, RES_CQ, cqn); in mlx4_HW2SW_CQ_wrapper()
2560 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_CQ_wrapper() argument
2570 err = get_res(dev, slave, cqn, RES_CQ, &cq); in mlx4_QUERY_CQ_wrapper()
2577 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_CQ_wrapper()
2579 put_res(dev, slave, cqn, RES_CQ); in mlx4_QUERY_CQ_wrapper()
2584 static int handle_resize(struct mlx4_dev *dev, int slave, in handle_resize() argument
2597 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); in handle_resize()
2606 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in handle_resize()
2610 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); in handle_resize()
2613 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in handle_resize()
2617 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); in handle_resize()
2620 put_res(dev, slave, mtt->com.res_id, RES_MTT); in handle_resize()
2624 put_res(dev, slave, mtt->com.res_id, RES_MTT); in handle_resize()
2626 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); in handle_resize()
2632 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_MODIFY_CQ_wrapper() argument
2642 err = get_res(dev, slave, cqn, RES_CQ, &cq); in mlx4_MODIFY_CQ_wrapper()
2650 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); in mlx4_MODIFY_CQ_wrapper()
2654 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_MODIFY_CQ_wrapper()
2656 put_res(dev, slave, cqn, RES_CQ); in mlx4_MODIFY_CQ_wrapper()
2673 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SW2HW_SRQ_wrapper() argument
2689 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); in mlx4_SW2HW_SRQ_wrapper()
2692 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_SW2HW_SRQ_wrapper()
2695 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), in mlx4_SW2HW_SRQ_wrapper()
2700 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_SRQ_wrapper()
2706 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_SRQ_wrapper()
2707 res_end_move(dev, slave, RES_SRQ, srqn); in mlx4_SW2HW_SRQ_wrapper()
2711 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_SRQ_wrapper()
2713 res_abort_move(dev, slave, RES_SRQ, srqn); in mlx4_SW2HW_SRQ_wrapper()
2718 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_HW2SW_SRQ_wrapper() argument
2728 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); in mlx4_HW2SW_SRQ_wrapper()
2731 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_SRQ_wrapper()
2737 res_end_move(dev, slave, RES_SRQ, srqn); in mlx4_HW2SW_SRQ_wrapper()
2742 res_abort_move(dev, slave, RES_SRQ, srqn); in mlx4_HW2SW_SRQ_wrapper()
2747 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_SRQ_wrapper() argument
2757 err = get_res(dev, slave, srqn, RES_SRQ, &srq); in mlx4_QUERY_SRQ_wrapper()
2764 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_SRQ_wrapper()
2766 put_res(dev, slave, srqn, RES_SRQ); in mlx4_QUERY_SRQ_wrapper()
2770 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_ARM_SRQ_wrapper() argument
2780 err = get_res(dev, slave, srqn, RES_SRQ, &srq); in mlx4_ARM_SRQ_wrapper()
2789 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_ARM_SRQ_wrapper()
2791 put_res(dev, slave, srqn, RES_SRQ); in mlx4_ARM_SRQ_wrapper()
2795 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_GEN_QP_wrapper() argument
2805 err = get_res(dev, slave, qpn, RES_QP, &qp); in mlx4_GEN_QP_wrapper()
2813 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_GEN_QP_wrapper()
2815 put_res(dev, slave, qpn, RES_QP); in mlx4_GEN_QP_wrapper()
2819 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_INIT2INIT_QP_wrapper() argument
2827 update_pkey_index(dev, slave, inbox); in mlx4_INIT2INIT_QP_wrapper()
2828 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_INIT2INIT_QP_wrapper()
2831 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_INIT2RTR_QP_wrapper() argument
2840 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); in mlx4_INIT2RTR_QP_wrapper()
2844 update_pkey_index(dev, slave, inbox); in mlx4_INIT2RTR_QP_wrapper()
2845 update_gid(dev, inbox, (u8)slave); in mlx4_INIT2RTR_QP_wrapper()
2847 err = update_vport_qp_param(dev, inbox, slave); in mlx4_INIT2RTR_QP_wrapper()
2851 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_INIT2RTR_QP_wrapper()
2854 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_RTR2RTS_QP_wrapper() argument
2863 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave); in mlx4_RTR2RTS_QP_wrapper()
2867 update_pkey_index(dev, slave, inbox); in mlx4_RTR2RTS_QP_wrapper()
2868 update_gid(dev, inbox, (u8)slave); in mlx4_RTR2RTS_QP_wrapper()
2870 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RTR2RTS_QP_wrapper()
2873 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_RTS2RTS_QP_wrapper() argument
2882 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave); in mlx4_RTS2RTS_QP_wrapper()
2886 update_pkey_index(dev, slave, inbox); in mlx4_RTS2RTS_QP_wrapper()
2887 update_gid(dev, inbox, (u8)slave); in mlx4_RTS2RTS_QP_wrapper()
2889 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RTS2RTS_QP_wrapper()
2893 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SQERR2RTS_QP_wrapper() argument
2901 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQERR2RTS_QP_wrapper()
2904 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SQD2SQD_QP_wrapper() argument
2913 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave); in mlx4_SQD2SQD_QP_wrapper()
2918 update_gid(dev, inbox, (u8)slave); in mlx4_SQD2SQD_QP_wrapper()
2919 update_pkey_index(dev, slave, inbox); in mlx4_SQD2SQD_QP_wrapper()
2920 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQD2SQD_QP_wrapper()
2923 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SQD2RTS_QP_wrapper() argument
2932 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave); in mlx4_SQD2RTS_QP_wrapper()
2937 update_gid(dev, inbox, (u8)slave); in mlx4_SQD2RTS_QP_wrapper()
2938 update_pkey_index(dev, slave, inbox); in mlx4_SQD2RTS_QP_wrapper()
2939 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQD2RTS_QP_wrapper()
2942 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_2RST_QP_wrapper() argument
2952 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); in mlx4_2RST_QP_wrapper()
2955 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_2RST_QP_wrapper()
2964 res_end_move(dev, slave, RES_QP, qpn); in mlx4_2RST_QP_wrapper()
2968 res_abort_move(dev, slave, RES_QP, qpn); in mlx4_2RST_QP_wrapper()
2973 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, in find_gid() argument
2985 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, in add_mcg_res() argument
2997 if (find_gid(dev, slave, rqp, gid)) { in add_mcg_res()
3013 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, in rem_mcg_res() argument
3021 res = find_gid(dev, slave, rqp, gid); in rem_mcg_res()
3066 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QP_ATTACH_wrapper() argument
3085 err = get_res(dev, slave, qpn, RES_QP, &rqp); in mlx4_QP_ATTACH_wrapper()
3097 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id); in mlx4_QP_ATTACH_wrapper()
3101 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id); in mlx4_QP_ATTACH_wrapper()
3110 put_res(dev, slave, qpn, RES_QP); in mlx4_QP_ATTACH_wrapper()
3116 put_res(dev, slave, qpn, RES_QP); in mlx4_QP_ATTACH_wrapper()
3124 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, in validate_eth_header_mac() argument
3139 eth_header->eth.dst_mac, slave); in validate_eth_header_mac()
3149 static int add_eth_header(struct mlx4_dev *dev, int slave, in add_eth_header() argument
3205 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QP_FLOW_STEERING_ATTACH_wrapper() argument
3214 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
3228 err = get_res(dev, slave, qpn, RES_QP, &rqp); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
3238 if (validate_eth_header_mac(slave, rule_header, rlist)) { in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
3249 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
3269 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
3280 put_res(dev, slave, qpn, RES_QP); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
3284 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QP_FLOW_STEERING_DETACH_wrapper() argument
3298 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
3302 put_res(dev, slave, vhcr->in_param, RES_FS_RULE); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
3303 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
3307 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
3319 put_res(dev, slave, rrule->qpn, RES_QP); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
3327 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_IF_STAT_wrapper() argument
3336 err = get_res(dev, slave, index, RES_COUNTER, NULL); in mlx4_QUERY_IF_STAT_wrapper()
3340 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_IF_STAT_wrapper()
3341 put_res(dev, slave, index, RES_COUNTER); in mlx4_QUERY_IF_STAT_wrapper()
3345 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) in detach_qp() argument
3367 static int _move_all_busy(struct mlx4_dev *dev, int slave, in _move_all_busy() argument
3373 struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; in _move_all_busy()
3381 if (r->owner == slave) { in _move_all_busy()
3403 static int move_all_busy(struct mlx4_dev *dev, int slave, in move_all_busy() argument
3411 busy = _move_all_busy(dev, slave, type, 0); in move_all_busy()
3419 busy = _move_all_busy(dev, slave, type, 1); in move_all_busy()
3423 static void rem_slave_qps(struct mlx4_dev *dev, int slave) in rem_slave_qps() argument
3428 &tracker->slave_list[slave].res_list[RES_QP]; in rem_slave_qps()
3436 err = move_all_busy(dev, slave, RES_QP); in rem_slave_qps()
3439 "for slave %d\n", slave); in rem_slave_qps()
3444 if (qp->com.owner == slave) { in rem_slave_qps()
3446 detach_qp(dev, slave, qp); in rem_slave_qps()
3460 if (!valid_reserved(dev, slave, qpn)) in rem_slave_qps()
3465 in_param = slave; in rem_slave_qps()
3474 " reset\n", slave, in rem_slave_qps()
3493 static void rem_slave_srqs(struct mlx4_dev *dev, int slave) in rem_slave_srqs() argument
3498 &tracker->slave_list[slave].res_list[RES_SRQ]; in rem_slave_srqs()
3507 err = move_all_busy(dev, slave, RES_SRQ); in rem_slave_srqs()
3510 "busy for slave %d\n", slave); in rem_slave_srqs()
3515 if (srq->com.owner == slave) { in rem_slave_srqs()
3532 in_param = slave; in rem_slave_srqs()
3541 slave, srqn); in rem_slave_srqs()
3559 static void rem_slave_cqs(struct mlx4_dev *dev, int slave) in rem_slave_cqs() argument
3564 &tracker->slave_list[slave].res_list[RES_CQ]; in rem_slave_cqs()
3573 err = move_all_busy(dev, slave, RES_CQ); in rem_slave_cqs()
3576 "busy for slave %d\n", slave); in rem_slave_cqs()
3581 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { in rem_slave_cqs()
3598 in_param = slave; in rem_slave_cqs()
3607 slave, cqn); in rem_slave_cqs()
3622 static void rem_slave_mrs(struct mlx4_dev *dev, int slave) in rem_slave_mrs() argument
3627 &tracker->slave_list[slave].res_list[RES_MPT]; in rem_slave_mrs()
3636 err = move_all_busy(dev, slave, RES_MPT); in rem_slave_mrs()
3639 "busy for slave %d\n", slave); in rem_slave_mrs()
3644 if (mpt->com.owner == slave) { in rem_slave_mrs()
3666 in_param = slave; in rem_slave_mrs()
3675 slave, mptn); in rem_slave_mrs()
3690 static void rem_slave_mtts(struct mlx4_dev *dev, int slave) in rem_slave_mtts() argument
3696 &tracker->slave_list[slave].res_list[RES_MTT]; in rem_slave_mtts()
3704 err = move_all_busy(dev, slave, RES_MTT); in rem_slave_mtts()
3707 "busy for slave %d\n", slave); in rem_slave_mtts()
3712 if (mtt->com.owner == slave) { in rem_slave_mtts()
3739 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) in rem_slave_fs_rule() argument
3745 &tracker->slave_list[slave].res_list[RES_FS_RULE]; in rem_slave_fs_rule()
3752 err = move_all_busy(dev, slave, RES_FS_RULE); in rem_slave_fs_rule()
3755 slave); in rem_slave_fs_rule()
3760 if (fs_rule->com.owner == slave) { in rem_slave_fs_rule()
3791 static void rem_slave_eqs(struct mlx4_dev *dev, int slave) in rem_slave_eqs() argument
3796 &tracker->slave_list[slave].res_list[RES_EQ]; in rem_slave_eqs()
3805 err = move_all_busy(dev, slave, RES_EQ); in rem_slave_eqs()
3808 "busy for slave %d\n", slave); in rem_slave_eqs()
3813 if (eq->com.owner == slave) { in rem_slave_eqs()
3834 err = mlx4_cmd_box(dev, slave, 0, in rem_slave_eqs()
3842 " SW ownership\n", slave, eqn); in rem_slave_eqs()
3858 static void rem_slave_counters(struct mlx4_dev *dev, int slave) in rem_slave_counters() argument
3863 &tracker->slave_list[slave].res_list[RES_COUNTER]; in rem_slave_counters()
3869 err = move_all_busy(dev, slave, RES_COUNTER); in rem_slave_counters()
3872 "busy for slave %d\n", slave); in rem_slave_counters()
3876 if (counter->com.owner == slave) { in rem_slave_counters()
3888 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) in rem_slave_xrcdns() argument
3893 &tracker->slave_list[slave].res_list[RES_XRCD]; in rem_slave_xrcdns()
3899 err = move_all_busy(dev, slave, RES_XRCD); in rem_slave_xrcdns()
3902 "busy for slave %d\n", slave); in rem_slave_xrcdns()
3906 if (xrcd->com.owner == slave) { in rem_slave_xrcdns()
3917 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) in mlx4_delete_all_resources_for_slave() argument
3921 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); in mlx4_delete_all_resources_for_slave()
3923 rem_slave_macs(dev, slave); in mlx4_delete_all_resources_for_slave()
3924 rem_slave_fs_rule(dev, slave); in mlx4_delete_all_resources_for_slave()
3925 rem_slave_qps(dev, slave); in mlx4_delete_all_resources_for_slave()
3926 rem_slave_srqs(dev, slave); in mlx4_delete_all_resources_for_slave()
3927 rem_slave_cqs(dev, slave); in mlx4_delete_all_resources_for_slave()
3928 rem_slave_mrs(dev, slave); in mlx4_delete_all_resources_for_slave()
3929 rem_slave_eqs(dev, slave); in mlx4_delete_all_resources_for_slave()
3930 rem_slave_mtts(dev, slave); in mlx4_delete_all_resources_for_slave()
3931 rem_slave_counters(dev, slave); in mlx4_delete_all_resources_for_slave()
3932 rem_slave_xrcdns(dev, slave); in mlx4_delete_all_resources_for_slave()
3933 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); in mlx4_delete_all_resources_for_slave()