Lines Matching refs:fq
268 struct qm_mcc_fq fq; member
957 static inline void fq_set(struct qman_fq *fq, u32 mask) in fq_set() argument
959 fq->flags |= mask; in fq_set()
962 static inline void fq_clear(struct qman_fq *fq, u32 mask) in fq_clear() argument
964 fq->flags &= ~mask; in fq_clear()
967 static inline int fq_isset(struct qman_fq *fq, u32 mask) in fq_isset() argument
969 return fq->flags & mask; in fq_isset()
972 static inline int fq_isclear(struct qman_fq *fq, u32 mask) in fq_isclear() argument
974 return !(fq->flags & mask); in fq_isclear()
1121 struct qman_fq *fq; in idx_to_fq() local
1127 fq = fq_table[idx]; in idx_to_fq()
1128 DPAA_ASSERT(!fq || idx == fq->idx); in idx_to_fq()
1130 return fq; in idx_to_fq()
1151 static u32 fq_to_tag(struct qman_fq *fq) in fq_to_tag() argument
1154 return fq->idx; in fq_to_tag()
1156 return (u32)fq; in fq_to_tag()
1425 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, in fq_state_change() argument
1430 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); in fq_state_change()
1431 fq_clear(fq, QMAN_FQ_STATE_ORL); in fq_state_change()
1434 DPAA_ASSERT(fq->state == qman_fq_state_parked || in fq_state_change()
1435 fq->state == qman_fq_state_sched); in fq_state_change()
1436 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); in fq_state_change()
1437 fq_clear(fq, QMAN_FQ_STATE_CHANGING); in fq_state_change()
1438 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) in fq_state_change()
1439 fq_set(fq, QMAN_FQ_STATE_NE); in fq_state_change()
1440 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) in fq_state_change()
1441 fq_set(fq, QMAN_FQ_STATE_ORL); in fq_state_change()
1442 fq->state = qman_fq_state_retired; in fq_state_change()
1445 DPAA_ASSERT(fq->state == qman_fq_state_sched); in fq_state_change()
1446 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); in fq_state_change()
1447 fq->state = qman_fq_state_parked; in fq_state_change()
1488 struct qman_fq *fq; in qm_mr_process_task() local
1509 fq = fqid_to_fq(qm_fqid_get(&msg->fq)); in qm_mr_process_task()
1510 if (WARN_ON(!fq)) in qm_mr_process_task()
1512 fq_state_change(p, fq, msg, verb); in qm_mr_process_task()
1513 if (fq->cb.fqs) in qm_mr_process_task()
1514 fq->cb.fqs(p, fq, msg); in qm_mr_process_task()
1518 fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); in qm_mr_process_task()
1519 fq_state_change(p, fq, msg, verb); in qm_mr_process_task()
1520 if (fq->cb.fqs) in qm_mr_process_task()
1521 fq->cb.fqs(p, fq, msg); in qm_mr_process_task()
1532 fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); in qm_mr_process_task()
1533 fq->cb.ern(p, fq, msg); in qm_mr_process_task()
1571 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) in clear_vdqcr() argument
1574 fq_clear(fq, QMAN_FQ_STATE_VDQCR); in clear_vdqcr()
1608 struct qman_fq *fq; in __poll_portal_fast() local
1624 fq = p->vdqcr_owned; in __poll_portal_fast()
1632 fq_clear(fq, QMAN_FQ_STATE_NE); in __poll_portal_fast()
1639 res = fq->cb.dqrr(p, fq, dq, sched_napi); in __poll_portal_fast()
1644 clear_vdqcr(p, fq); in __poll_portal_fast()
1647 fq = tag_to_fq(be32_to_cpu(dq->context_b)); in __poll_portal_fast()
1649 res = fq->cb.dqrr(p, fq, dq, sched_napi); in __poll_portal_fast()
1795 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) in qman_create_fq() argument
1803 fq->fqid = fqid; in qman_create_fq()
1804 fq->flags = flags; in qman_create_fq()
1805 fq->state = qman_fq_state_oos; in qman_create_fq()
1806 fq->cgr_groupid = 0; in qman_create_fq()
1814 fq->idx = fqid * 2; in qman_create_fq()
1816 fq->idx++; in qman_create_fq()
1818 WARN_ON(fq_table[fq->idx]); in qman_create_fq()
1819 fq_table[fq->idx] = fq; in qman_create_fq()
1825 void qman_destroy_fq(struct qman_fq *fq) in qman_destroy_fq() argument
1831 switch (fq->state) { in qman_destroy_fq()
1834 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) in qman_destroy_fq()
1835 qman_release_fqid(fq->fqid); in qman_destroy_fq()
1837 DPAA_ASSERT(fq_table[fq->idx]); in qman_destroy_fq()
1838 fq_table[fq->idx] = NULL; in qman_destroy_fq()
1847 u32 qman_fq_fqid(struct qman_fq *fq) in qman_fq_fqid() argument
1849 return fq->fqid; in qman_fq_fqid()
1853 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) in qman_init_fq() argument
1864 if (fq->state != qman_fq_state_oos && in qman_init_fq()
1865 fq->state != qman_fq_state_parked) in qman_init_fq()
1868 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) in qman_init_fq()
1878 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || in qman_init_fq()
1879 (fq->state != qman_fq_state_oos && in qman_init_fq()
1880 fq->state != qman_fq_state_parked)) { in qman_init_fq()
1887 qm_fqid_set(&mcc->fq, fq->fqid); in qman_init_fq()
1894 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { in qman_init_fq()
1898 mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); in qman_init_fq()
1912 phys_fq = dma_map_single(p->config->dev, fq, in qman_init_fq()
1913 sizeof(*fq), DMA_TO_DEVICE); in qman_init_fq()
1950 fq_set(fq, QMAN_FQ_STATE_CGR_EN); in qman_init_fq()
1952 fq_clear(fq, QMAN_FQ_STATE_CGR_EN); in qman_init_fq()
1955 fq->cgr_groupid = opts->fqd.cgid; in qman_init_fq()
1957 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? in qman_init_fq()
1966 int qman_schedule_fq(struct qman_fq *fq) in qman_schedule_fq() argument
1973 if (fq->state != qman_fq_state_parked) in qman_schedule_fq()
1976 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) in qman_schedule_fq()
1981 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || in qman_schedule_fq()
1982 fq->state != qman_fq_state_parked) { in qman_schedule_fq()
1987 qm_fqid_set(&mcc->fq, fq->fqid); in qman_schedule_fq()
2000 fq->state = qman_fq_state_sched; in qman_schedule_fq()
2007 int qman_retire_fq(struct qman_fq *fq, u32 *flags) in qman_retire_fq() argument
2015 if (fq->state != qman_fq_state_parked && in qman_retire_fq()
2016 fq->state != qman_fq_state_sched) in qman_retire_fq()
2019 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) in qman_retire_fq()
2023 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || in qman_retire_fq()
2024 fq->state == qman_fq_state_retired || in qman_retire_fq()
2025 fq->state == qman_fq_state_oos) { in qman_retire_fq()
2030 qm_fqid_set(&mcc->fq, fq->fqid); in qman_retire_fq()
2053 fq_set(fq, QMAN_FQ_STATE_NE); in qman_retire_fq()
2055 fq_set(fq, QMAN_FQ_STATE_ORL); in qman_retire_fq()
2057 *flags = fq->flags; in qman_retire_fq()
2058 fq->state = qman_fq_state_retired; in qman_retire_fq()
2059 if (fq->cb.fqs) { in qman_retire_fq()
2072 msg.fq.fqs = mcr->alterfq.fqs; in qman_retire_fq()
2073 qm_fqid_set(&msg.fq, fq->fqid); in qman_retire_fq()
2074 msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); in qman_retire_fq()
2075 fq->cb.fqs(p, fq, &msg); in qman_retire_fq()
2079 fq_set(fq, QMAN_FQ_STATE_CHANGING); in qman_retire_fq()
2089 int qman_oos_fq(struct qman_fq *fq) in qman_oos_fq() argument
2096 if (fq->state != qman_fq_state_retired) in qman_oos_fq()
2099 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) in qman_oos_fq()
2103 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || in qman_oos_fq()
2104 fq->state != qman_fq_state_retired) { in qman_oos_fq()
2109 qm_fqid_set(&mcc->fq, fq->fqid); in qman_oos_fq()
2120 fq->state = qman_fq_state_oos; in qman_oos_fq()
2127 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) in qman_query_fq() argument
2135 qm_fqid_set(&mcc->fq, fq->fqid); in qman_query_fq()
2152 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) in qman_query_fq_np() argument
2160 qm_fqid_set(&mcc->fq, fq->fqid); in qman_query_fq_np()
2223 static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) in set_p_vdqcr() argument
2231 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) in set_p_vdqcr()
2234 fq_set(fq, QMAN_FQ_STATE_VDQCR); in set_p_vdqcr()
2235 p->vdqcr_owned = fq; in set_p_vdqcr()
2243 static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) in set_vdqcr() argument
2248 ret = set_p_vdqcr(*p, fq, vdqcr); in set_vdqcr()
2253 static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, in wait_vdqcr_start() argument
2260 !set_vdqcr(p, fq, vdqcr)); in wait_vdqcr_start()
2262 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); in wait_vdqcr_start()
2266 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) in qman_volatile_dequeue() argument
2271 if (fq->state != qman_fq_state_parked && in qman_volatile_dequeue()
2272 fq->state != qman_fq_state_retired) in qman_volatile_dequeue()
2276 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) in qman_volatile_dequeue()
2278 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; in qman_volatile_dequeue()
2280 ret = wait_vdqcr_start(&p, fq, vdqcr, flags); in qman_volatile_dequeue()
2282 ret = set_vdqcr(&p, fq, vdqcr); in qman_volatile_dequeue()
2295 !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); in qman_volatile_dequeue()
2298 !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); in qman_volatile_dequeue()
2312 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) in qman_enqueue() argument
2342 qm_fqid_set(eq, fq->fqid); in qman_enqueue()
2343 eq->tag = cpu_to_be32(fq_to_tag(fq)); in qman_enqueue()
2689 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2704 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2735 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2832 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2852 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2949 struct qman_fq fq = { in qpool_cleanup() local
2957 err = qman_query_fq_np(&fq, &np); in qpool_cleanup()
2967 err = qman_query_fq(&fq, &fqd); in qpool_cleanup()
2972 err = qman_shutdown_fq(fq.fqid); in qpool_cleanup()
2982 fq.fqid++; in qpool_cleanup()
3007 struct qman_fq fq = { in cgr_cleanup() local
3015 err = qman_query_fq_np(&fq, &np); in cgr_cleanup()
3025 err = qman_query_fq(&fq, &fqd); in cgr_cleanup()
3031 cgrid, fq.fqid); in cgr_cleanup()
3036 fq.fqid++; in cgr_cleanup()