/kernel/linux/linux-5.10/net/smc/ |
D | smc_core.c | 47 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, 49 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft); 54 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr, in smc_lgr_list_head() argument 57 if (lgr->is_smcd) { in smc_lgr_list_head() 58 *lgr_lock = &lgr->smcd->lgr_lock; in smc_lgr_list_head() 59 return &lgr->smcd->lgr_list; in smc_lgr_list_head() 66 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) in smc_lgr_schedule_free_work() argument 72 if (!lgr->freeing) { in smc_lgr_schedule_free_work() 73 mod_delayed_work(system_wq, &lgr->free_work, in smc_lgr_schedule_free_work() 74 (!lgr->is_smcd && lgr->role == SMC_CLNT) ? in smc_lgr_schedule_free_work() [all …]
|
D | smc_llc.c | 189 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type, in smc_llc_flow_parallel() argument 195 flow_type != msg_type && !lgr->delayed_event) { in smc_llc_flow_parallel() 196 lgr->delayed_event = qentry; in smc_llc_flow_parallel() 203 SMC_LGR_ID_SIZE, &lgr->id, in smc_llc_flow_parallel() 205 flow_type, lgr->role); in smc_llc_flow_parallel() 213 struct smc_link_group *lgr = qentry->link->lgr; in smc_llc_flow_start() local 215 spin_lock_bh(&lgr->llc_flow_lock); in smc_llc_flow_start() 218 smc_llc_flow_parallel(lgr, flow->type, qentry); in smc_llc_flow_start() 219 spin_unlock_bh(&lgr->llc_flow_lock); in smc_llc_flow_start() 237 spin_unlock_bh(&lgr->llc_flow_lock); in smc_llc_flow_start() [all …]
|
D | smc_llc.h | 53 static inline struct smc_link *smc_llc_usable_link(struct smc_link_group *lgr) in smc_llc_usable_link() argument 58 if (smc_link_usable(&lgr->lnk[i])) in smc_llc_usable_link() 59 return &lgr->lnk[i]; in smc_llc_usable_link() 64 static inline void smc_llc_set_termination_rsn(struct smc_link_group *lgr, in smc_llc_set_termination_rsn() argument 67 if (!lgr->llc_termination_rsn) in smc_llc_set_termination_rsn() 68 lgr->llc_termination_rsn = rsn; in smc_llc_set_termination_rsn() 81 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc); 82 void smc_llc_lgr_clear(struct smc_link_group *lgr); 88 int smc_llc_do_delete_rkey(struct smc_link_group *lgr, 90 int smc_llc_flow_initiate(struct smc_link_group *lgr, [all …]
|
D | smc_ism.c | 59 spin_lock_irqsave(&conn->lgr->smcd->lock, flags); in smc_ism_set_conn() 60 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn; in smc_ism_set_conn() 61 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); in smc_ism_set_conn() 72 spin_lock_irqsave(&conn->lgr->smcd->lock, flags); in smc_ism_unset_conn() 73 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL; in smc_ism_unset_conn() 74 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); in smc_ism_unset_conn() 182 int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, in smc_ism_register_dmb() argument 191 dmb.vlan_id = lgr->vlan_id; in smc_ism_register_dmb() 192 dmb.rgid = lgr->peer_gid; in smc_ism_register_dmb() 193 rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb); in smc_ism_register_dmb() [all …]
|
D | smc_diag.c | 105 else if (smc->conn.lgr && smc->conn.lgr->is_smcd) in __smc_diag_dump() 158 if (smc->conn.lgr && !smc->conn.lgr->is_smcd && in __smc_diag_dump() 160 !list_empty(&smc->conn.lgr->list)) { in __smc_diag_dump() 162 .role = smc->conn.lgr->role, in __smc_diag_dump() 163 .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport, in __smc_diag_dump() 164 .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id, in __smc_diag_dump() 168 smc->conn.lgr->lnk[0].smcibdev->ibdev->name, in __smc_diag_dump() 169 sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name)); in __smc_diag_dump() 171 smc->conn.lgr->lnk[0].gid); in __smc_diag_dump() 173 smc->conn.lgr->lnk[0].peer_gid); in __smc_diag_dump() [all …]
|
D | smc_core.h | 127 struct smc_link_group *lgr; /* parent link group */ member 329 u32 token, struct smc_link_group *lgr) in smc_lgr_find_conn() argument 334 node = lgr->conns_all.rb_node; in smc_lgr_find_conn() 378 void smc_lgr_terminate_sched(struct smc_link_group *lgr); 391 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new, 393 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id, 403 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr); 407 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, 412 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type); 413 void smcr_lgr_set_type_asym(struct smc_link_group *lgr, [all …]
|
D | smc_cdc.c | 200 if (!conn->lgr || (conn->lgr->is_smcd && conn->lgr->peer_shutdown)) in smc_cdc_get_slot_and_msg_send() 203 if (conn->lgr->is_smcd) { in smc_cdc_get_slot_and_msg_send() 425 struct smc_link_group *lgr; in smc_cdc_rx_handler() local 434 lgr = smc_get_lgr(link); in smc_cdc_rx_handler() 435 read_lock_bh(&lgr->conns_lock); in smc_cdc_rx_handler() 436 conn = smc_lgr_find_conn(ntohl(cdc->token), lgr); in smc_cdc_rx_handler() 437 read_unlock_bh(&lgr->conns_lock); in smc_cdc_rx_handler()
|
D | smc_tx.c | 231 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, in smc_tx_sendmsg() 261 rc = smc_ism_write(conn->lgr->smcd, &pos, data, len); in smcd_tx_ism_write() 271 struct smc_link_group *lgr = conn->lgr; in smc_tx_rdma_write() local 278 lgr->rtokens[conn->rtoken_idx][link->link_idx].dma_addr + in smc_tx_rdma_write() 283 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][link->link_idx].rkey; in smc_tx_rdma_write() 459 if (conn->lgr->is_smcd) in smc_tx_rdma_writes() 505 mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work, in smcr_tx_sndbuf_nonempty() 566 if (conn->lgr->is_smcd) in smc_tx_sndbuf_nonempty() 630 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, in smc_tx_consumer_update()
|
D | af_smc.c | 362 struct smc_link_group *lgr = link->lgr; in smcr_lgr_reg_rmbs() local 365 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY); in smcr_lgr_reg_rmbs() 371 mutex_lock(&lgr->llc_conf_mutex); in smcr_lgr_reg_rmbs() 373 if (!smc_link_active(&lgr->lnk[i])) in smcr_lgr_reg_rmbs() 375 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc); in smcr_lgr_reg_rmbs() 388 mutex_unlock(&lgr->llc_conf_mutex); in smcr_lgr_reg_rmbs() 389 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); in smcr_lgr_reg_rmbs() 400 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME, in smcr_clnt_conf_first_link() 411 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl); in smcr_clnt_conf_first_link() 433 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE); in smcr_clnt_conf_first_link() [all …]
|
D | smc_ism.h | 47 int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size, 52 int smc_ism_signal_shutdown(struct smc_link_group *lgr);
|
D | smc_clc.c | 436 smc->conn.lgr->sync_err = 1; in smc_clc_wait_msg() 437 smc_lgr_terminate_sched(smc->conn.lgr); in smc_clc_wait_msg() 462 if ((!smc->conn.lgr || !smc->conn.lgr->is_smcd) && in smc_clc_send_decline() 654 if (conn->lgr->is_smcd) { in smc_clc_send_confirm_accept() 659 clc->d0.gid = conn->lgr->smcd->local_gid; in smc_clc_send_confirm_accept() 663 memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); in smc_clc_send_confirm_accept() 669 clc_v2->chid = htons(smc_ism_get_chid(conn->lgr->smcd)); in smc_clc_send_confirm_accept() 670 smc_ism_get_system_eid(conn->lgr->smcd, &eid); in smc_clc_send_confirm_accept()
|
D | smc_pnet.h | 65 void smc_pnet_find_alt_roce(struct smc_link_group *lgr,
|
D | smc.h | 134 struct smc_link_group *lgr; /* link group of connection */ member
|
D | smc_wr.c | 186 struct smc_link_group *lgr = smc_get_lgr(link); in smc_wr_tx_get_free_slot() local 195 if (in_softirq() || lgr->terminating) { in smc_wr_tx_get_free_slot() 203 lgr->terminating || in smc_wr_tx_get_free_slot()
|
D | smc_ib.c | 114 struct smc_link_group *lgr = smc_get_lgr(lnk); in smc_ib_ready_link() local 134 if (lgr->role == SMC_SERV) { in smc_ib_ready_link()
|
/kernel/linux/linux-5.10/arch/s390/purgatory/ |
D | head.S | 37 lgr %r0,\dst 38 lgr %r1,\len 39 lgr %r2,\src 40 lgr %r3,\len 49 lgr %r4,\len 138 lgr %r8,%r13 159 lgr %r7,%r9 189 lgr %r12,%r7 190 lgr %r11,%r9
|
/kernel/linux/linux-5.10/arch/s390/lib/ |
D | mem.S | 20 lgr %r1,%r2 78 lgr %r1,%r2 91 lgr %r1,%r2 129 lgr %r1,%r2 163 lgr %r1,%r2
|
/kernel/linux/linux-5.10/arch/s390/boot/ |
D | text_dma.S | 30 lgr %r1,%r2 31 lgr %r2,%r3 32 lgr %r3,%r4 50 lgr %r1,%r2
|
D | head_kdump.S | 62 lgr %r11,%r2 # Save kdump base address 63 lgr %r12,%r2
|
/kernel/linux/linux-5.10/arch/s390/kernel/ |
D | relocate_kernel.S | 38 lgr %r6,%r5 # r6 = r5 45 lgr %r2,%r5 # move it into the right register, 54 lgr %r8,%r5 # r8 = r5
|
D | entry.S | 96 lgr %r14,%r15 118 lgr %r14,%r9 437 lgr %r2,%r11 524 lgr %r2,%r11 # pass pointer to pt_regs 538 lgr %r2,%r11 # pass pointer to pt_regs 547 lgr %r2,%r11 # pass pointer to pt_regs 556 lgr %r2,%r11 # pass pointer to pt_regs 574 lgr %r2,%r11 # pass pointer to pt_regs 592 lgr %r2,%r11 # pass pointer to pt_regs 610 lgr %r2,%r11 # pass pointer to pt_regs [all …]
|
D | reipl.S | 73 lgr %r9,%r2 74 lgr %r2,%r3
|
D | Makefile | 39 obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
|
/kernel/linux/linux-5.10/fs/nfs/filelayout/ |
D | filelayout.c | 601 struct nfs4_layoutget_res *lgr, in filelayout_check_layout() argument 609 if (lgr->range.offset != 0 || in filelayout_check_layout() 610 lgr->range.length != NFS4_MAX_UINT64) { in filelayout_check_layout() 616 if (fl->pattern_offset > lgr->range.offset) { in filelayout_check_layout() 652 struct nfs4_layoutget_res *lgr, in filelayout_decode_layout() argument 668 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len); in filelayout_decode_layout() 767 struct nfs4_layoutget_res *lgr, in filelayout_alloc_lseg() argument 778 rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags); in filelayout_alloc_lseg() 779 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) { in filelayout_alloc_lseg()
|
/kernel/linux/linux-5.10/fs/nfs/blocklayout/ |
D | blocklayout.c | 668 bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr, in bl_alloc_lseg() argument 672 .mode = lgr->range.iomode, in bl_alloc_lseg() 673 .start = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg() 674 .inval = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg() 675 .cowread = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg() 699 lgr->layoutp->pages, lgr->layoutp->len); in bl_alloc_lseg() 720 if (lgr->range.offset + lgr->range.length != in bl_alloc_lseg()
|