Home
last modified time | relevance | path

Searched refs:backlog (Results 1 – 25 of 65) sorted by relevance

123

/drivers/crypto/ccp/
Dccp-crypto-main.c55 struct list_head *backlog; member
97 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) in ccp_crypto_cmd_complete() argument
102 *backlog = NULL; in ccp_crypto_cmd_complete()
121 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete()
123 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete()
124 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete()
126 *backlog = container_of(req_queue.backlog, in ccp_crypto_cmd_complete()
128 req_queue.backlog = req_queue.backlog->next; in ccp_crypto_cmd_complete()
131 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete()
132 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete()
[all …]
Dccp-dev.c315 list_add_tail(&cmd->entry, &ccp->backlog); in ccp_enqueue_cmd()
378 struct ccp_cmd *backlog = NULL; in ccp_dequeue_cmd() local
403 if (!list_empty(&ccp->backlog)) { in ccp_dequeue_cmd()
404 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, in ccp_dequeue_cmd()
406 list_del(&backlog->entry); in ccp_dequeue_cmd()
411 if (backlog) { in ccp_dequeue_cmd()
412 INIT_WORK(&backlog->work, ccp_do_cmd_backlog); in ccp_dequeue_cmd()
413 schedule_work(&backlog->work); in ccp_dequeue_cmd()
488 INIT_LIST_HEAD(&ccp->backlog); in ccp_alloc_struct()
/drivers/net/ethernet/mellanox/mlxsw/
Dspectrum_qdisc.c65 u64 backlog; member
264 return xstats->backlog[tclass_num] + in mlxsw_sp_xstats_backlog()
265 xstats->backlog[tclass_num + 8]; in mlxsw_sp_xstats_backlog()
318 u64 drops, u64 backlog, in mlxsw_sp_qdisc_update_stats() argument
326 backlog -= stats_base->backlog; in mlxsw_sp_qdisc_update_stats()
330 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog); in mlxsw_sp_qdisc_update_stats()
332 stats_base->backlog += backlog; in mlxsw_sp_qdisc_update_stats()
345 u64 backlog = 0; in mlxsw_sp_qdisc_get_tc_stats() local
350 &drops, &backlog); in mlxsw_sp_qdisc_get_tc_stats()
352 tx_bytes, tx_packets, drops, backlog, in mlxsw_sp_qdisc_get_tc_stats()
[all …]
/drivers/crypto/qce/
Dcore.c80 struct crypto_async_request *async_req, *backlog; in qce_handle_queue() local
95 backlog = crypto_get_backlog(&qce->queue); in qce_handle_queue()
105 if (backlog) { in qce_handle_queue()
107 backlog->complete(backlog, -EINPROGRESS); in qce_handle_queue()
/drivers/crypto/
Dmxs-dcp.c394 struct crypto_async_request *backlog; in dcp_chan_thread_aes() local
403 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_aes()
407 if (!backlog && !arq) { in dcp_chan_thread_aes()
414 if (backlog) in dcp_chan_thread_aes()
415 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_aes()
691 struct crypto_async_request *backlog; in dcp_chan_thread_sha() local
699 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_sha()
703 if (!backlog && !arq) { in dcp_chan_thread_sha()
710 if (backlog) in dcp_chan_thread_sha()
711 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_sha()
Ds5p-sss.c1376 struct crypto_async_request *async_req, *backlog; in s5p_hash_handle_queue() local
1391 backlog = crypto_get_backlog(&dd->hash_queue); in s5p_hash_handle_queue()
1401 if (backlog) in s5p_hash_handle_queue()
1402 backlog->complete(backlog, -EINPROGRESS); in s5p_hash_handle_queue()
1980 struct crypto_async_request *async_req, *backlog; in s5p_tasklet_cb() local
1985 backlog = crypto_get_backlog(&dev->queue); in s5p_tasklet_cb()
1995 if (backlog) in s5p_tasklet_cb()
1996 backlog->complete(backlog, -EINPROGRESS); in s5p_tasklet_cb()
Dimg-hash.c502 struct crypto_async_request *async_req, *backlog; in img_hash_handle_queue() local
517 backlog = crypto_get_backlog(&hdev->queue); in img_hash_handle_queue()
527 if (backlog) in img_hash_handle_queue()
528 backlog->complete(backlog, -EINPROGRESS); in img_hash_handle_queue()
/drivers/crypto/marvell/cesa/
Dtdma.c136 struct crypto_async_request *backlog = NULL; in mv_cesa_tdma_process() local
149 &backlog); in mv_cesa_tdma_process()
170 if (backlog) in mv_cesa_tdma_process()
171 backlog->complete(backlog, -EINPROGRESS); in mv_cesa_tdma_process()
Dcesa.c39 struct crypto_async_request **backlog) in mv_cesa_dequeue_req_locked() argument
43 *backlog = crypto_get_backlog(&engine->queue); in mv_cesa_dequeue_req_locked()
54 struct crypto_async_request *req = NULL, *backlog = NULL; in mv_cesa_rearm_engine() local
60 req = mv_cesa_dequeue_req_locked(engine, &backlog); in mv_cesa_rearm_engine()
68 if (backlog) in mv_cesa_rearm_engine()
69 backlog->complete(backlog, -EINPROGRESS); in mv_cesa_rearm_engine()
/drivers/net/ipvlan/
Dipvlan_core.c240 spin_lock_bh(&port->backlog.lock); in ipvlan_process_multicast()
241 skb_queue_splice_tail_init(&port->backlog, &list); in ipvlan_process_multicast()
242 spin_unlock_bh(&port->backlog.lock); in ipvlan_process_multicast()
566 spin_lock(&port->backlog.lock); in ipvlan_multicast_enqueue()
567 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { in ipvlan_multicast_enqueue()
570 __skb_queue_tail(&port->backlog, skb); in ipvlan_multicast_enqueue()
571 spin_unlock(&port->backlog.lock); in ipvlan_multicast_enqueue()
574 spin_unlock(&port->backlog.lock); in ipvlan_multicast_enqueue()
/drivers/crypto/cavium/nitrox/
Dnitrox_reqmgr.c230 INIT_LIST_HEAD(&sr->backlog); in backlog_list_add()
233 list_add_tail(&sr->backlog, &cmdq->backlog_head); in backlog_list_add()
326 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { in post_backlog_cmds()
333 list_del(&sr->backlog); in post_backlog_cmds()
/drivers/crypto/ccree/
Dcc_request_mgr.c34 struct list_head backlog; member
130 INIT_LIST_HEAD(&req_mgr_h->backlog); in cc_req_mgr_init()
338 list_add_tail(&bli->list, &mgr->backlog); in cc_enqueue_backlog()
357 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list); in cc_proc_backlog()
/drivers/infiniband/hw/qedr/
Dqedr_iw_cm.h37 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
Dqedr_iw_cm.c657 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) in qedr_iw_create_listen() argument
681 listener->backlog = backlog; in qedr_iw_create_listen()
685 iparams.max_backlog = backlog; in qedr_iw_create_listen()
/drivers/xen/
Dpvcalls-front.h12 int pvcalls_front_listen(struct socket *sock, int backlog);
/drivers/net/ethernet/mellanox/mlx5/core/fpga/
Dconn.h75 struct list_head backlog; member
/drivers/crypto/inside-secure/
Dsafexcel.c814 struct crypto_async_request *req, *backlog; in safexcel_dequeue() local
822 backlog = priv->ring[ring].backlog; in safexcel_dequeue()
828 backlog = crypto_get_backlog(&priv->ring[ring].queue); in safexcel_dequeue()
834 priv->ring[ring].backlog = NULL; in safexcel_dequeue()
844 if (backlog) in safexcel_dequeue()
845 backlog->complete(backlog, -EINPROGRESS); in safexcel_dequeue()
864 priv->ring[ring].backlog = backlog; in safexcel_dequeue()
/drivers/infiniband/core/
Ducma.c90 atomic_t backlog; member
303 if (!atomic_add_unless(&listen_ctx->backlog, -1, 0)) in ucma_connect_event_handler()
328 atomic_inc(&listen_ctx->backlog); in ucma_connect_event_handler()
414 atomic_inc(&uevent->ctx->backlog); in ucma_get_event()
1100 if (cmd.backlog <= 0 || cmd.backlog > max_backlog) in ucma_listen()
1101 cmd.backlog = max_backlog; in ucma_listen()
1102 atomic_set(&ctx->backlog, cmd.backlog); in ucma_listen()
1105 ret = rdma_listen(ctx->cm_id, cmd.backlog); in ucma_listen()
Diwcm.c562 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) in iw_cm_listen() argument
570 if (!backlog) in iw_cm_listen()
571 backlog = default_backlog; in iw_cm_listen()
573 ret = alloc_work_entries(cm_id_priv, backlog); in iw_cm_listen()
585 backlog); in iw_cm_listen()
Dcma_priv.h72 int backlog; member
/drivers/atm/
Dzatm.h55 struct sk_buff_head backlog; /* list of buffers waiting for ring */ member
Deni.h50 struct sk_buff_head backlog; /* queue of waiting TX buffers */ member
/drivers/net/ipa/
Dipa_endpoint.c904 u32 backlog; in ipa_endpoint_replenish() local
934 backlog = atomic_add_return(count + 1, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
943 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
966 u32 backlog; in ipa_endpoint_replenish_disable() local
969 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) in ipa_endpoint_replenish_disable()
970 atomic_add(backlog, &endpoint->replenish_saved); in ipa_endpoint_replenish_disable()
/drivers/infiniband/hw/i40iw/
Di40iw_cm.h297 int backlog; member
373 int backlog; member
/drivers/crypto/hisilicon/sec/
Dsec_algs.c496 } else if (!list_empty(&ctx->backlog)) { in sec_skcipher_alg_callback()
498 backlog_req = list_first_entry(&ctx->backlog, in sec_skcipher_alg_callback()
813 !list_empty(&ctx->backlog)) { in sec_alg_skcipher_crypto()
816 list_add_tail(&sec_req->backlog_head, &ctx->backlog); in sec_alg_skcipher_crypto()
877 INIT_LIST_HEAD(&ctx->backlog); in sec_alg_skcipher_init()

123