/kernel/linux/linux-5.10/crypto/ |
D | crypto_engine.c | 3 * Handle async block request by crypto hardware engine. 13 #include <crypto/engine.h> 21 * @engine: the hardware engine 25 static void crypto_finalize_request(struct crypto_engine *engine, in crypto_finalize_request() argument 38 if (!engine->retry_support) { in crypto_finalize_request() 39 spin_lock_irqsave(&engine->queue_lock, flags); in crypto_finalize_request() 40 if (engine->cur_req == req) { in crypto_finalize_request() 42 engine->cur_req = NULL; in crypto_finalize_request() 44 spin_unlock_irqrestore(&engine->queue_lock, flags); in crypto_finalize_request() 47 if (finalize_req || engine->retry_support) { in crypto_finalize_request() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/disp/ |
D | Kbuild | 2 nvkm-y += nvkm/engine/disp/base.o 3 nvkm-y += nvkm/engine/disp/nv04.o 4 nvkm-y += nvkm/engine/disp/nv50.o 5 nvkm-y += nvkm/engine/disp/g84.o 6 nvkm-y += nvkm/engine/disp/g94.o 7 nvkm-y += nvkm/engine/disp/gt200.o 8 nvkm-y += nvkm/engine/disp/mcp77.o 9 nvkm-y += nvkm/engine/disp/gt215.o 10 nvkm-y += nvkm/engine/disp/mcp89.o 11 nvkm-y += nvkm/engine/disp/gf119.o [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
D | Kbuild | 2 nvkm-y += nvkm/engine/gr/base.o 3 nvkm-y += nvkm/engine/gr/nv04.o 4 nvkm-y += nvkm/engine/gr/nv10.o 5 nvkm-y += nvkm/engine/gr/nv15.o 6 nvkm-y += nvkm/engine/gr/nv17.o 7 nvkm-y += nvkm/engine/gr/nv20.o 8 nvkm-y += nvkm/engine/gr/nv25.o 9 nvkm-y += nvkm/engine/gr/nv2a.o 10 nvkm-y += nvkm/engine/gr/nv30.o 11 nvkm-y += nvkm/engine/gr/nv34.o [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
D | intel_ring_submission.c | 48 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) in set_hwstam() argument 54 if (engine->class == RENDER_CLASS) { in set_hwstam() 55 if (INTEL_GEN(engine->i915) >= 6) in set_hwstam() 61 intel_engine_set_hwsp_writemask(engine, mask); in set_hwstam() 64 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) in set_hws_pga() argument 69 if (INTEL_GEN(engine->i915) >= 4) in set_hws_pga() 72 intel_uncore_write(engine->uncore, HWS_PGA, addr); in set_hws_pga() 75 static struct page *status_page(struct intel_engine_cs *engine) in status_page() argument 77 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; in status_page() 83 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) in ring_setup_phys_status_page() argument [all …]
|
D | intel_engine_heartbeat.c | 18 * While the engine is active, we send a periodic pulse along the engine 20 * is stuck, and we fail to preempt it, we declare the engine hung and 24 static bool next_heartbeat(struct intel_engine_cs *engine) in next_heartbeat() argument 28 delay = READ_ONCE(engine->props.heartbeat_interval_ms); in next_heartbeat() 35 mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay); in next_heartbeat() 40 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) in idle_pulse() argument 42 engine->wakeref_serial = READ_ONCE(engine->serial) + 1; in idle_pulse() 47 struct intel_engine_cs *engine) in show_heartbeat() argument 51 intel_engine_dump(engine, &p, in show_heartbeat() 53 engine->name, in show_heartbeat() [all …]
|
D | intel_engine_cs.c | 144 * intel_engine_context_size() - return the size of the context for an engine 146 * @class: engine class 148 * Each engine class may require a different amount of space for a context 151 * Return: size (in bytes) of an engine class specific context image 242 static void __sprint_engine_name(struct intel_engine_cs *engine) in __sprint_engine_name() argument 245 * Before we know what the uABI name for this engine will be, in __sprint_engine_name() 246 * we still would like to keep track of this engine in the debug logs. in __sprint_engine_name() 249 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u", in __sprint_engine_name() 250 intel_engine_class_repr(engine->class), in __sprint_engine_name() 251 engine->instance) >= sizeof(engine->name)); in __sprint_engine_name() [all …]
|
D | mock_engine.c | 46 static struct intel_ring *mock_ring(struct intel_engine_cs *engine) in mock_ring() argument 84 static struct i915_request *first_request(struct mock_engine *engine) in first_request() argument 86 return list_first_entry_or_null(&engine->hw_queue, in first_request() 97 intel_engine_signal_breadcrumbs(request->engine); in advance() 102 struct mock_engine *engine = from_timer(engine, t, hw_delay); in hw_delay_complete() local 106 spin_lock_irqsave(&engine->hw_lock, flags); in hw_delay_complete() 109 request = first_request(engine); in hw_delay_complete() 117 while ((request = first_request(engine))) { in hw_delay_complete() 119 mod_timer(&engine->hw_delay, in hw_delay_complete() 127 spin_unlock_irqrestore(&engine->hw_lock, flags); in hw_delay_complete() [all …]
|
D | selftest_engine_heartbeat.c | 32 static int engine_sync_barrier(struct intel_engine_cs *engine) in engine_sync_barrier() argument 34 return timeline_sync(engine->kernel_context->timeline); in engine_sync_barrier() 85 static int __live_idle_pulse(struct intel_engine_cs *engine, in __live_idle_pulse() argument 91 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); in __live_idle_pulse() 101 err = i915_active_acquire_preallocate_barrier(&p->active, engine); in __live_idle_pulse() 111 GEM_BUG_ON(llist_empty(&engine->barrier_tasks)); in __live_idle_pulse() 113 err = fn(engine); in __live_idle_pulse() 117 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); in __live_idle_pulse() 119 if (engine_sync_barrier(engine)) { in __live_idle_pulse() 122 pr_err("%s: no heartbeat pulse?\n", engine->name); in __live_idle_pulse() [all …]
|
D | intel_engine_user.c | 39 void intel_engine_add_user(struct intel_engine_cs *engine) in intel_engine_add_user() argument 41 llist_add((struct llist_node *)&engine->uabi_node, in intel_engine_add_user() 42 (struct llist_head *)&engine->i915->uabi_engines); in intel_engine_add_user() 84 struct intel_engine_cs *engine = in sort_engines() local 85 container_of((struct rb_node *)pos, typeof(*engine), in sort_engines() 87 list_add((struct list_head *)&engine->uabi_node, engines); in sort_engines() 95 u8 engine; in set_scheduler_caps() member 104 struct intel_engine_cs *engine; in set_scheduler_caps() local 109 for_each_uabi_engine(engine, i915) { /* all engines must agree! */ in set_scheduler_caps() 112 if (engine->schedule) in set_scheduler_caps() [all …]
|
D | intel_engine_pm.c | 22 struct intel_engine_cs *engine = in __engine_unpark() local 23 container_of(wf, typeof(*engine), wakeref); in __engine_unpark() 26 ENGINE_TRACE(engine, "\n"); in __engine_unpark() 28 intel_gt_pm_get(engine->gt); in __engine_unpark() 31 ce = engine->kernel_context; in __engine_unpark() 38 int type = i915_coherent_map_type(engine->i915); in __engine_unpark() 52 if (engine->unpark) in __engine_unpark() 53 engine->unpark(engine); in __engine_unpark() 55 intel_engine_unpark_heartbeat(engine); in __engine_unpark() 96 ewma__engine_latency_add(&rq->engine->latency, in duration() [all …]
|
D | intel_engine_pm.h | 15 intel_engine_pm_is_awake(const struct intel_engine_cs *engine) in intel_engine_pm_is_awake() argument 17 return intel_wakeref_is_active(&engine->wakeref); in intel_engine_pm_is_awake() 20 static inline void intel_engine_pm_get(struct intel_engine_cs *engine) in intel_engine_pm_get() argument 22 intel_wakeref_get(&engine->wakeref); in intel_engine_pm_get() 25 static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) in intel_engine_pm_get_if_awake() argument 27 return intel_wakeref_get_if_active(&engine->wakeref); in intel_engine_pm_get_if_awake() 30 static inline void intel_engine_pm_put(struct intel_engine_cs *engine) in intel_engine_pm_put() argument 32 intel_wakeref_put(&engine->wakeref); in intel_engine_pm_put() 35 static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine) in intel_engine_pm_put_async() argument 37 intel_wakeref_put_async(&engine->wakeref); in intel_engine_pm_put_async() [all …]
|
D | intel_lrc.c | 49 * shouldn't we just need a set of those per engine command streamer? This is 51 * rings, the engine cs shifts to a new "ring buffer" with every context 66 * Now that ringbuffers belong per-context (and not per-engine, like before) 67 * and that contexts are uniquely tied to a given engine (and not reusable, 70 * - One ringbuffer per-engine inside each context. 71 * - One backing object per-engine inside each context. 75 * more complex, because we don't know at creation time which engine is going 80 * gets populated for a given engine once we receive an execbuffer. If later 82 * engine, we allocate/populate a new ringbuffer and context backing object and 99 * for the appropriate engine: this structure contains a copy of the context's [all …]
|
D | selftest_context.c | 76 static int __live_context_size(struct intel_engine_cs *engine) in __live_context_size() argument 83 ce = intel_context_create(engine); in __live_context_size() 92 i915_coherent_map_type(engine->i915)); in __live_context_size() 111 vaddr += engine->context_size - I915_GTT_PAGE_SIZE; in __live_context_size() 126 rq = intel_engine_create_kernel_request(engine); in __live_context_size() 136 pr_err("%s context overwrote trailing red-zone!", engine->name); in __live_context_size() 150 struct intel_engine_cs *engine; in live_context_size() local 159 for_each_engine(engine, gt, id) { in live_context_size() 162 if (!engine->context_size) in live_context_size() 165 intel_engine_pm_get(engine); in live_context_size() [all …]
|
D | selftest_workarounds.c | 33 } engine[I915_NUM_ENGINES]; member 63 struct intel_engine_cs *engine; in reference_lists_init() local 72 for_each_engine(engine, gt, id) { in reference_lists_init() 73 struct i915_wa_list *wal = &lists->engine[id].wa_list; in reference_lists_init() 75 wa_init_start(wal, "REF", engine->name); in reference_lists_init() 76 engine_init_workarounds(engine, wal); in reference_lists_init() 79 __intel_engine_init_ctx_wa(engine, in reference_lists_init() 80 &lists->engine[id].ctx_wa_list, in reference_lists_init() 88 struct intel_engine_cs *engine; in reference_lists_fini() local 91 for_each_engine(engine, gt, id) in reference_lists_fini() [all …]
|
D | intel_reset.c | 44 struct intel_engine_cs *engine = rq->engine; in engine_skip_context() local 50 lockdep_assert_held(&engine->active.lock); in engine_skip_context() 51 list_for_each_entry_continue(rq, &engine->active.requests, sched.link) in engine_skip_context() 307 * engine register state is not cleared until shortly after in gen6_hw_domain_reset() 314 * leaving the second reset, the internal engine state in gen6_hw_domain_reset() 328 * As we have observed that the engine state is still volatile in gen6_hw_domain_reset() 347 struct intel_engine_cs *engine; in gen6_reset_engines() local 356 for_each_engine_masked(engine, gt, engine_mask, tmp) { in gen6_reset_engines() 357 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); in gen6_reset_engines() 358 hw_mask |= hw_engine_mask[engine->id]; in gen6_reset_engines() [all …]
|
D | sysfs_engines.c | 16 struct intel_engine_cs *engine; member 21 return container_of(kobj, struct kobj_engine, base)->engine; in kobj_to_engine() 81 __caps_show(struct intel_engine_cs *engine, in __caps_show() argument 88 BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities)); in __caps_show() 90 switch (engine->class) { in __caps_show() 129 struct intel_engine_cs *engine = kobj_to_engine(kobj); in caps_show() local 131 return __caps_show(engine, engine->uabi_capabilities, buf, true); in caps_show() 150 struct intel_engine_cs *engine = kobj_to_engine(kobj); in max_spin_store() local 178 WRITE_ONCE(engine->props.max_busywait_duration_ns, duration); in max_spin_store() 186 struct intel_engine_cs *engine = kobj_to_engine(kobj); in max_spin_show() local [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/core/ |
D | engine.c | 24 #include <core/engine.h> 31 nvkm_engine_chsw_load(struct nvkm_engine *engine) in nvkm_engine_chsw_load() argument 33 if (engine->func->chsw_load) in nvkm_engine_chsw_load() 34 return engine->func->chsw_load(engine); in nvkm_engine_chsw_load() 41 struct nvkm_engine *engine = *pengine; in nvkm_engine_unref() local 42 if (engine) { in nvkm_engine_unref() 43 mutex_lock(&engine->subdev.mutex); in nvkm_engine_unref() 44 if (--engine->usecount == 0) in nvkm_engine_unref() 45 nvkm_subdev_fini(&engine->subdev, false); in nvkm_engine_unref() 46 mutex_unlock(&engine->subdev.mutex); in nvkm_engine_unref() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/sun4i/ |
D | sunxi_engine.h | 25 * This callback allows to prepare our engine for an atomic 32 void (*atomic_begin)(struct sunxi_engine *engine, 49 int (*atomic_check)(struct sunxi_engine *engine, 61 void (*commit)(struct sunxi_engine *engine); 67 * the layers supported by that engine. 77 struct sunxi_engine *engine); 83 * engine. This is useful only for the composite output. 87 void (*apply_color_correction)(struct sunxi_engine *engine); 93 * engine. This is useful only for the composite output. 97 void (*disable_color_correction)(struct sunxi_engine *engine); [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
D | Kbuild | 2 nvkm-y += nvkm/engine/fifo/base.o 3 nvkm-y += nvkm/engine/fifo/nv04.o 4 nvkm-y += nvkm/engine/fifo/nv10.o 5 nvkm-y += nvkm/engine/fifo/nv17.o 6 nvkm-y += nvkm/engine/fifo/nv40.o 7 nvkm-y += nvkm/engine/fifo/nv50.o 8 nvkm-y += nvkm/engine/fifo/g84.o 9 nvkm-y += nvkm/engine/fifo/gf100.o 10 nvkm-y += nvkm/engine/fifo/gk104.o 11 nvkm-y += nvkm/engine/fifo/gk110.o [all …]
|
/kernel/linux/linux-5.10/drivers/crypto/ |
D | picoxcell_crypto.c | 81 struct spacc_engine *engine; member 96 struct spacc_engine *engine; member 139 struct spacc_engine *engine; member 147 struct spacc_engine *engine; member 187 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) in spacc_fifo_cmd_full() argument 189 u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); in spacc_fifo_cmd_full() 205 return is_cipher_ctx ? ctx->engine->cipher_ctx_base + in spacc_ctx_page_addr() 206 (indx * ctx->engine->cipher_pg_sz) : in spacc_ctx_page_addr() 207 ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); in spacc_ctx_page_addr() 241 unsigned indx = ctx->engine->next_ctx++; in spacc_load_ctx() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/ |
D | Kbuild | 2 nvkm-y += nvkm/engine/falcon.o 3 nvkm-y += nvkm/engine/xtensa.o 5 include $(src)/nvkm/engine/bsp/Kbuild 6 include $(src)/nvkm/engine/ce/Kbuild 7 include $(src)/nvkm/engine/cipher/Kbuild 8 include $(src)/nvkm/engine/device/Kbuild 9 include $(src)/nvkm/engine/disp/Kbuild 10 include $(src)/nvkm/engine/dma/Kbuild 11 include $(src)/nvkm/engine/fifo/Kbuild 12 include $(src)/nvkm/engine/gr/Kbuild [all …]
|
/kernel/linux/linux-5.10/drivers/crypto/marvell/cesa/ |
D | cesa.c | 3 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA) 5 * driver supports the TDMA engine on platforms on which it is available. 38 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine, in mv_cesa_dequeue_req_locked() argument 43 *backlog = crypto_get_backlog(&engine->queue); in mv_cesa_dequeue_req_locked() 44 req = crypto_dequeue_request(&engine->queue); in mv_cesa_dequeue_req_locked() 52 static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) in mv_cesa_rearm_engine() argument 58 spin_lock_bh(&engine->lock); in mv_cesa_rearm_engine() 59 if (!engine->req) { in mv_cesa_rearm_engine() 60 req = mv_cesa_dequeue_req_locked(engine, &backlog); in mv_cesa_rearm_engine() 61 engine->req = req; in mv_cesa_rearm_engine() [all …]
|
/kernel/linux/linux-5.10/drivers/video/fbdev/via/ |
D | accel.c | 13 static int viafb_set_bpp(void __iomem *engine, u8 bpp) in viafb_set_bpp() argument 19 gemode = readl(engine + VIA_REG_GEMODE) & 0xfffffcfc; in viafb_set_bpp() 34 writel(gemode, engine + VIA_REG_GEMODE); in viafb_set_bpp() 39 static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height, in hw_bitblt_1() argument 79 ret = viafb_set_bpp(engine, dst_bpp); in hw_bitblt_1() 91 writel(tmp, engine + 0x08); in hw_bitblt_1() 100 writel(tmp, engine + 0x0C); in hw_bitblt_1() 108 writel(tmp, engine + 0x10); in hw_bitblt_1() 111 writel(fg_color, engine + 0x18); in hw_bitblt_1() 114 writel(bg_color, engine + 0x1C); in hw_bitblt_1() [all …]
|
/kernel/linux/linux-5.10/include/crypto/ |
D | engine.h | 3 * Crypto engine API 22 * struct crypto_engine - crypto hardware engine 23 * @name: the engine name 24 * @idling: the engine is entering idle state 26 * @running: the engine is on working 29 * crypto-engine, in head position to keep order 30 * @list: link with the global crypto engine list 32 * @queue: the crypto queue of the engine 44 * @priv_data: the engine private data 62 int (*prepare_crypt_hardware)(struct crypto_engine *engine); [all …]
|
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/display/ |
D | allwinner,sun4i-a10-display-engine.yaml | 4 $id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-display-engine.yaml# 7 title: Allwinner A10 Display Engine Pipeline Device Tree Bindings 14 The display engine pipeline (and its entry point, since it can be 52 - allwinner,sun4i-a10-display-engine 53 - allwinner,sun5i-a10s-display-engine 54 - allwinner,sun5i-a13-display-engine 55 - allwinner,sun6i-a31-display-engine 56 - allwinner,sun6i-a31s-display-engine 57 - allwinner,sun7i-a20-display-engine 58 - allwinner,sun8i-a23-display-engine [all …]
|