• Home
  • Raw
  • Download

Lines Matching defs:qib_qp

420 struct qib_qp {  struct
425 struct qib_qp __rcu *next; /* link list for QPN hash table */ argument
426 struct qib_swqe *s_wq; /* send work queue */
427 struct qib_mmap_info *ip;
428 struct qib_ib_header *s_hdr; /* next packet header to send */
429 unsigned long timeout_jiffies; /* computed from timeout */
431 enum ib_mtu path_mtu;
432 u32 remote_qpn;
433 u32 pmtu; /* decoded from path_mtu */
434 u32 qkey; /* QKEY for this QP (for UD or RD) */
435 u32 s_size; /* send work queue size */
436 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
438 u8 state; /* QP state */
439 u8 qp_access_flags;
440 u8 alt_timeout; /* Alternate path timeout for this QP */
441 u8 timeout; /* Timeout for this QP */
442 u8 s_srate;
443 u8 s_mig_state;
444 u8 port_num;
445 u8 s_pkey_index; /* PKEY index to use */
446 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
447 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
448 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
449 u8 s_retry_cnt; /* number of times to retry */
450 u8 s_rnr_retry_cnt;
451 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
452 u8 s_max_sge; /* size of s_wq->sg_list */
453 u8 s_draining;
457 atomic_t refcount ____cacheline_aligned_in_smp;
458 wait_queue_head_t wait;
461 struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
463 struct qib_sge_state s_rdma_read_sge;
465 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
466 unsigned long r_aflags;
467 u64 r_wr_id; /* ID for current receive WQE */
468 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
469 u32 r_len; /* total length of r_sge */
470 u32 r_rcv_len; /* receive data len processed */
471 u32 r_psn; /* expected rcv packet sequence number */
472 u32 r_msn; /* message sequence number */
474 u8 r_state; /* opcode of last packet received */
475 u8 r_flags;
476 u8 r_head_ack_queue; /* index into s_ack_queue[] */
478 struct list_head rspwait; /* link for waititing to respond */
480 struct qib_sge_state r_sge; /* current receive data */
481 struct qib_rq r_rq; /* receive work queue */
483 spinlock_t s_lock ____cacheline_aligned_in_smp;
484 struct qib_sge_state *s_cur_sge;
485 u32 s_flags;
486 struct qib_verbs_txreq *s_tx;
487 struct qib_swqe *s_wqe;
488 struct qib_sge_state s_sge; /* current send request data */
489 struct qib_mregion *s_rdma_mr;
490 atomic_t s_dma_busy;
491 u32 s_cur_size; /* size of send packet in bytes */
492 u32 s_len; /* total length of s_sge */
493 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
494 u32 s_next_psn; /* PSN for next request */
495 u32 s_last_psn; /* last response PSN processed */
496 u32 s_sending_psn; /* lowest PSN that is being sent */
497 u32 s_sending_hpsn; /* highest PSN that is being sent */
498 u32 s_psn; /* current packet sequence number */
499 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
500 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
501 u32 s_head; /* new entries added here */
502 u32 s_tail; /* next entry to process */
503 u32 s_cur; /* current work queue entry */
504 u32 s_acked; /* last un-ACK'ed entry */
505 u32 s_last; /* last completed entry */
506 u32 s_ssn; /* SSN of tail entry */
507 u32 s_lsn; /* limit sequence number (credit) */
508 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
509 u16 s_rdma_ack_cnt;
510 u8 s_state; /* opcode of last packet sent */
511 u8 s_ack_state; /* opcode of packet to ACK */
512 u8 s_nak_state; /* non-zero if NAK is pending */
513 u8 r_nak_state; /* non-zero if NAK is pending */
514 u8 s_retry; /* requester retry counter */
515 u8 s_rnr_retry; /* requester RNR retry counter */
516 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
517 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
519 struct qib_sge_state s_ack_rdma_sge;
520 struct timer_list s_timer;
521 struct list_head iowait; /* link for wait PIO buf */
523 struct work_struct s_work;
525 wait_queue_head_t wait_dma;
527 struct qib_sge r_sg_list[0] /* verified SGEs */