/drivers/gpu/drm/i915/gt/ |
D | intel_context.h | 21 #define CE_TRACE(ce, fmt, ...) do { \ argument 22 const struct intel_context *ce__ = (ce); \ 30 void intel_context_init(struct intel_context *ce, 32 void intel_context_fini(struct intel_context *ce); 40 int intel_context_alloc_state(struct intel_context *ce); 42 void intel_context_free(struct intel_context *ce); 44 int intel_context_reconfigure_sseu(struct intel_context *ce, 55 static inline int intel_context_lock_pinned(struct intel_context *ce) in intel_context_lock_pinned() argument 56 __acquires(ce->pin_mutex) in intel_context_lock_pinned() 58 return mutex_lock_interruptible(&ce->pin_mutex); in intel_context_lock_pinned() [all …]
|
D | intel_context.c | 26 struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); in rcu_context_free() local 28 trace_intel_context_free(ce); in rcu_context_free() 29 kmem_cache_free(slab_ce, ce); in rcu_context_free() 32 void intel_context_free(struct intel_context *ce) in intel_context_free() argument 34 call_rcu(&ce->rcu, rcu_context_free); in intel_context_free() 40 struct intel_context *ce; in intel_context_create() local 42 ce = intel_context_alloc(); in intel_context_create() 43 if (!ce) in intel_context_create() 46 intel_context_init(ce, engine); in intel_context_create() 47 trace_intel_context_create(ce); in intel_context_create() [all …]
|
D | intel_engine_pm.c | 19 static void dbg_poison_ce(struct intel_context *ce) in dbg_poison_ce() argument 24 if (ce->state) { in dbg_poison_ce() 25 struct drm_i915_gem_object *obj = ce->state->obj; in dbg_poison_ce() 26 int type = i915_coherent_map_type(ce->engine->i915, obj, true); in dbg_poison_ce() 46 struct intel_context *ce; in __engine_unpark() local 53 ce = engine->kernel_context; in __engine_unpark() 54 if (ce) { in __engine_unpark() 55 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); in __engine_unpark() 58 while (unlikely(intel_context_inflight(ce))) in __engine_unpark() 62 dbg_poison_ce(ce); in __engine_unpark() [all …]
|
D | intel_lrc.c | 681 const struct intel_context *ce, in init_common_regs() argument 696 regs[CTX_TIMESTAMP] = ce->runtime.last; in init_common_regs() 756 const struct intel_context *ce, in __lrc_init_regs() argument 776 init_common_regs(regs, ce, engine, inhibit); in __lrc_init_regs() 777 init_ppgtt_regs(regs, vm_alias(ce->vm)); in __lrc_init_regs() 784 void lrc_init_regs(const struct intel_context *ce, in lrc_init_regs() argument 788 __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit); in lrc_init_regs() 791 void lrc_reset_regs(const struct intel_context *ce, in lrc_reset_regs() argument 794 __reset_stop_ring(ce->lrc_reg_state, engine); in lrc_reset_regs() 822 void lrc_init_state(struct intel_context *ce, in lrc_init_state() argument [all …]
|
D | intel_breadcrumbs.c | 80 struct intel_context *ce) in add_signaling_context() argument 82 lockdep_assert_held(&ce->signal_lock); in add_signaling_context() 85 list_add_rcu(&ce->signal_link, &b->signalers); in add_signaling_context() 90 struct intel_context *ce) in remove_signaling_context() argument 92 lockdep_assert_held(&ce->signal_lock); in remove_signaling_context() 94 if (!list_empty(&ce->signals)) in remove_signaling_context() 98 list_del_rcu(&ce->signal_link); in remove_signaling_context() 105 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument 107 if (rq->context != ce) in check_signal_order() 110 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order() [all …]
|
D | selftest_mocs.c | 25 struct intel_context *ce; in mocs_context_create() local 27 ce = intel_context_create(engine); in mocs_context_create() 28 if (IS_ERR(ce)) in mocs_context_create() 29 return ce; in mocs_context_create() 32 ce->ring_size = SZ_16K; in mocs_context_create() 34 return ce; in mocs_context_create() 216 struct intel_context *ce) in check_mocs_engine() argument 226 rq = intel_context_create_request(ce); in check_mocs_engine() 240 if (!err && ce->engine->class == RENDER_CLASS) in check_mocs_engine() 252 err = check_mocs_table(ce->engine, arg->mocs, &vaddr); in check_mocs_engine() [all …]
|
D | intel_lrc.h | 32 int lrc_alloc(struct intel_context *ce, 34 void lrc_reset(struct intel_context *ce); 35 void lrc_fini(struct intel_context *ce); 39 lrc_pre_pin(struct intel_context *ce, 44 lrc_pin(struct intel_context *ce, 47 void lrc_unpin(struct intel_context *ce); 48 void lrc_post_unpin(struct intel_context *ce); 50 void lrc_init_state(struct intel_context *ce, 54 void lrc_init_regs(const struct intel_context *ce, 57 void lrc_reset_regs(const struct intel_context *ce, [all …]
|
D | intel_context_sseu.c | 16 const struct intel_context *ce, in gen8_emit_rpcs_config() argument 26 offset = i915_ggtt_offset(ce->state) + in gen8_emit_rpcs_config() 40 gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu) in gen8_modify_rpcs() argument 45 lockdep_assert_held(&ce->pin_mutex); in gen8_modify_rpcs() 53 if (!intel_context_pin_if_active(ce)) in gen8_modify_rpcs() 56 rq = intel_engine_create_kernel_request(ce->engine); in gen8_modify_rpcs() 63 ret = intel_context_prepare_remote_request(ce, rq); in gen8_modify_rpcs() 65 ret = gen8_emit_rpcs_config(rq, ce, sseu); in gen8_modify_rpcs() 69 intel_context_unpin(ce); in gen8_modify_rpcs() 74 intel_context_reconfigure_sseu(struct intel_context *ce, in intel_context_reconfigure_sseu() argument [all …]
|
D | selftest_lrc.c | 73 static int emit_semaphore_signal(struct intel_context *ce, void *slot) in emit_semaphore_signal() argument 76 i915_ggtt_offset(ce->engine->status_page.vma) + in emit_semaphore_signal() 81 rq = intel_context_create_request(ce); in emit_semaphore_signal() 103 static int context_flush(struct intel_context *ce, long timeout) in context_flush() argument 109 rq = intel_engine_create_kernel_request(ce->engine); in context_flush() 113 fence = i915_active_fence_get(&ce->timeline->last_request); in context_flush() 361 struct intel_context *ce; in __live_lrc_state() local 374 ce = intel_context_create(engine); in __live_lrc_state() 375 if (IS_ERR(ce)) in __live_lrc_state() 376 return PTR_ERR(ce); in __live_lrc_state() [all …]
|
D | intel_migrate.c | 165 struct intel_context *ce; in pinned_context() local 175 ce = intel_engine_create_pinned_context(engine, vm, SZ_512K, in pinned_context() 179 return ce; in pinned_context() 184 struct intel_context *ce; in intel_migrate_init() local 188 ce = pinned_context(gt); in intel_migrate_init() 189 if (IS_ERR(ce)) in intel_migrate_init() 190 return PTR_ERR(ce); in intel_migrate_init() 192 m->context = ce; in intel_migrate_init() 219 struct intel_context *ce; in intel_migrate_create_context() local 229 ce = __migrate_engines(m->context->engine->gt); in intel_migrate_create_context() [all …]
|
/drivers/crypto/gemini/ |
D | sl3516-ce-core.c | 28 static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce) in sl3516_ce_desc_init() argument 33 ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL); in sl3516_ce_desc_init() 34 if (!ce->tx) in sl3516_ce_desc_init() 36 ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL); in sl3516_ce_desc_init() 37 if (!ce->rx) in sl3516_ce_desc_init() 41 ce->tx[i].frame_ctrl.bits.own = CE_CPU; in sl3516_ce_desc_init() 42 ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor); in sl3516_ce_desc_init() 44 ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx; in sl3516_ce_desc_init() 47 ce->rx[i].frame_ctrl.bits.own = CE_CPU; in sl3516_ce_desc_init() 48 ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor); in sl3516_ce_desc_init() [all …]
|
D | sl3516-ce-rng.c | 15 struct sl3516_ce_dev *ce; in sl3516_ce_rng_read() local 20 ce = container_of(rng, struct sl3516_ce_dev, trng); in sl3516_ce_rng_read() 23 ce->hwrng_stat_req++; in sl3516_ce_rng_read() 24 ce->hwrng_stat_bytes += max; in sl3516_ce_rng_read() 27 err = pm_runtime_get_sync(ce->dev); in sl3516_ce_rng_read() 29 pm_runtime_put_noidle(ce->dev); in sl3516_ce_rng_read() 34 *data = readl(ce->base + IPSEC_RAND_NUM_REG); in sl3516_ce_rng_read() 39 pm_runtime_put(ce->dev); in sl3516_ce_rng_read() 44 int sl3516_ce_rng_register(struct sl3516_ce_dev *ce) in sl3516_ce_rng_register() argument 48 ce->trng.name = "SL3516 Crypto Engine RNG"; in sl3516_ce_rng_register() [all …]
|
D | sl3516-ce-cipher.c | 25 struct sl3516_ce_dev *ce = op->ce; in sl3516_ce_need_fallback() local 31 ce->fallback_mod16++; in sl3516_ce_need_fallback() 40 ce->fallback_sg_count_tx++; in sl3516_ce_need_fallback() 45 ce->fallback_sg_count_rx++; in sl3516_ce_need_fallback() 52 ce->fallback_mod16++; in sl3516_ce_need_fallback() 56 ce->fallback_mod16++; in sl3516_ce_need_fallback() 60 ce->fallback_align16++; in sl3516_ce_need_fallback() 68 ce->fallback_mod16++; in sl3516_ce_need_fallback() 72 ce->fallback_mod16++; in sl3516_ce_need_fallback() 76 ce->fallback_align16++; in sl3516_ce_need_fallback() [all …]
|
/drivers/gpu/drm/i915/gt/uc/ |
D | intel_guc_submission.c | 85 static inline bool context_enabled(struct intel_context *ce) in context_enabled() argument 87 return (atomic_read(&ce->guc_sched_state_no_lock) & in context_enabled() 91 static inline void set_context_enabled(struct intel_context *ce) in set_context_enabled() argument 93 atomic_or(SCHED_STATE_NO_LOCK_ENABLED, &ce->guc_sched_state_no_lock); in set_context_enabled() 96 static inline void clr_context_enabled(struct intel_context *ce) in clr_context_enabled() argument 99 &ce->guc_sched_state_no_lock); in clr_context_enabled() 102 static inline bool context_pending_enable(struct intel_context *ce) in context_pending_enable() argument 104 return (atomic_read(&ce->guc_sched_state_no_lock) & in context_pending_enable() 108 static inline void set_context_pending_enable(struct intel_context *ce) in set_context_pending_enable() argument 111 &ce->guc_sched_state_no_lock); in set_context_pending_enable() [all …]
|
/drivers/crypto/allwinner/sun8i-ce/ |
D | sun8i-ce-core.c | 131 int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce) in sun8i_ce_get_engine_number() argument 133 return atomic_inc_return(&ce->flow) % (MAXFLOW - 1); in sun8i_ce_get_engine_number() 136 int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) in sun8i_ce_run_task() argument 140 struct ce_task *cet = ce->chanlist[flow].tl; in sun8i_ce_run_task() 143 ce->chanlist[flow].stat_req++; in sun8i_ce_run_task() 146 mutex_lock(&ce->mlock); in sun8i_ce_run_task() 148 v = readl(ce->base + CE_ICR); in sun8i_ce_run_task() 150 writel(v, ce->base + CE_ICR); in sun8i_ce_run_task() 152 reinit_completion(&ce->chanlist[flow].complete); in sun8i_ce_run_task() 153 writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ); in sun8i_ce_run_task() [all …]
|
D | sun8i-ce-trng.c | 25 struct sun8i_ce_dev *ce; in sun8i_ce_trng_read() local 35 ce = container_of(rng, struct sun8i_ce_dev, trng); in sun8i_ce_trng_read() 46 ce->hwrng_stat_req++; in sun8i_ce_trng_read() 47 ce->hwrng_stat_bytes += todo; in sun8i_ce_trng_read() 50 dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); in sun8i_ce_trng_read() 51 if (dma_mapping_error(ce->dev, dma_dst)) { in sun8i_ce_trng_read() 52 dev_err(ce->dev, "Cannot DMA MAP DST\n"); in sun8i_ce_trng_read() 57 err = pm_runtime_get_sync(ce->dev); in sun8i_ce_trng_read() 59 pm_runtime_put_noidle(ce->dev); in sun8i_ce_trng_read() 63 mutex_lock(&ce->rnglock); in sun8i_ce_trng_read() [all …]
|
D | sun8i-ce-prng.c | 61 struct sun8i_ce_dev *ce; in sun8i_ce_prng_generate() local 72 ce = algt->ce; in sun8i_ce_prng_generate() 75 dev_err(ce->dev, "not seeded\n"); in sun8i_ce_prng_generate() 89 dev_dbg(ce->dev, "%s PRNG slen=%u dlen=%u todo=%u multi=%u\n", __func__, in sun8i_ce_prng_generate() 97 dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); in sun8i_ce_prng_generate() 98 if (dma_mapping_error(ce->dev, dma_iv)) { in sun8i_ce_prng_generate() 99 dev_err(ce->dev, "Cannot DMA MAP IV\n"); in sun8i_ce_prng_generate() 104 dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); in sun8i_ce_prng_generate() 105 if (dma_mapping_error(ce->dev, dma_dst)) { in sun8i_ce_prng_generate() 106 dev_err(ce->dev, "Cannot DMA MAP DST\n"); in sun8i_ce_prng_generate() [all …]
|
D | sun8i-ce-cipher.c | 84 struct sun8i_ce_dev *ce = op->ce; in sun8i_ce_cipher_prepare() local 100 dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, in sun8i_ce_cipher_prepare() 112 chan = &ce->chanlist[flow]; in sun8i_ce_cipher_prepare() 118 common = ce->variant->alg_cipher[algt->ce_algo_id]; in sun8i_ce_cipher_prepare() 122 if (ce->variant->cipher_t_dlen_in_bytes) in sun8i_ce_cipher_prepare() 127 sym = ce->variant->op_mode[algt->ce_blockmode]; in sun8i_ce_cipher_prepare() 144 rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE); in sun8i_ce_cipher_prepare() 145 if (dma_mapping_error(ce->dev, rctx->addr_key)) { in sun8i_ce_cipher_prepare() 146 dev_err(ce->dev, "Cannot DMA MAP KEY\n"); in sun8i_ce_cipher_prepare() 171 rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen, in sun8i_ce_cipher_prepare() [all …]
|
/drivers/of/ |
D | dynamic.c | 464 static void __of_changeset_entry_destroy(struct of_changeset_entry *ce) in __of_changeset_entry_destroy() argument 466 if (ce->action == OF_RECONFIG_ATTACH_NODE && in __of_changeset_entry_destroy() 467 of_node_check_flag(ce->np, OF_OVERLAY)) { in __of_changeset_entry_destroy() 468 if (kref_read(&ce->np->kobj.kref) > 1) { in __of_changeset_entry_destroy() 470 kref_read(&ce->np->kobj.kref), ce->np); in __of_changeset_entry_destroy() 472 of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET); in __of_changeset_entry_destroy() 476 of_node_put(ce->np); in __of_changeset_entry_destroy() 477 list_del(&ce->node); in __of_changeset_entry_destroy() 478 kfree(ce); in __of_changeset_entry_destroy() 482 static void __of_changeset_entry_dump(struct of_changeset_entry *ce) in __of_changeset_entry_dump() argument [all …]
|
/drivers/base/power/ |
D | clock_ops.c | 147 static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) in __pm_clk_enable() argument 151 switch (ce->status) { in __pm_clk_enable() 153 ret = clk_prepare_enable(ce->clk); in __pm_clk_enable() 156 ret = clk_enable(ce->clk); in __pm_clk_enable() 162 ce->status = PCE_STATUS_ENABLED; in __pm_clk_enable() 165 __func__, ce->clk, ret); in __pm_clk_enable() 173 static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) in pm_clk_acquire() argument 175 if (!ce->clk) in pm_clk_acquire() 176 ce->clk = clk_get(dev, ce->con_id); in pm_clk_acquire() 177 if (IS_ERR(ce->clk)) { in pm_clk_acquire() [all …]
|
/drivers/clocksource/ |
D | timer-sun5i.c | 73 static void sun5i_clkevt_sync(struct sun5i_timer_clkevt *ce) in sun5i_clkevt_sync() argument 75 u32 old = readl(ce->timer.base + TIMER_CNTVAL_LO_REG(1)); in sun5i_clkevt_sync() 77 while ((old - readl(ce->timer.base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS) in sun5i_clkevt_sync() 81 static void sun5i_clkevt_time_stop(struct sun5i_timer_clkevt *ce, u8 timer) in sun5i_clkevt_time_stop() argument 83 u32 val = readl(ce->timer.base + TIMER_CTL_REG(timer)); in sun5i_clkevt_time_stop() 84 writel(val & ~TIMER_CTL_ENABLE, ce->timer.base + TIMER_CTL_REG(timer)); in sun5i_clkevt_time_stop() 86 sun5i_clkevt_sync(ce); in sun5i_clkevt_time_stop() 89 static void sun5i_clkevt_time_setup(struct sun5i_timer_clkevt *ce, u8 timer, u32 delay) in sun5i_clkevt_time_setup() argument 91 writel(delay, ce->timer.base + TIMER_INTVAL_LO_REG(timer)); in sun5i_clkevt_time_setup() 94 static void sun5i_clkevt_time_start(struct sun5i_timer_clkevt *ce, u8 timer, bool periodic) in sun5i_clkevt_time_start() argument [all …]
|
D | mps2-timer.c | 54 static int mps2_timer_shutdown(struct clock_event_device *ce) in mps2_timer_shutdown() argument 56 clockevent_mps2_writel(0, ce, TIMER_RELOAD); in mps2_timer_shutdown() 57 clockevent_mps2_writel(0, ce, TIMER_CTRL); in mps2_timer_shutdown() 62 static int mps2_timer_set_next_event(unsigned long next, struct clock_event_device *ce) in mps2_timer_set_next_event() argument 64 clockevent_mps2_writel(next, ce, TIMER_VALUE); in mps2_timer_set_next_event() 65 clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL); in mps2_timer_set_next_event() 70 static int mps2_timer_set_periodic(struct clock_event_device *ce) in mps2_timer_set_periodic() argument 72 u32 clock_count_per_tick = to_mps2_clkevt(ce)->clock_count_per_tick; in mps2_timer_set_periodic() 74 clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_RELOAD); in mps2_timer_set_periodic() 75 clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_VALUE); in mps2_timer_set_periodic() [all …]
|
D | timer-digicolor.c | 61 struct clock_event_device ce; member 67 static struct digicolor_timer *dc_timer(struct clock_event_device *ce) in dc_timer() argument 69 return container_of(ce, struct digicolor_timer, ce); in dc_timer() 72 static inline void dc_timer_disable(struct clock_event_device *ce) in dc_timer_disable() argument 74 struct digicolor_timer *dt = dc_timer(ce); in dc_timer_disable() 78 static inline void dc_timer_enable(struct clock_event_device *ce, u32 mode) in dc_timer_enable() argument 80 struct digicolor_timer *dt = dc_timer(ce); in dc_timer_enable() 84 static inline void dc_timer_set_count(struct clock_event_device *ce, in dc_timer_set_count() argument 87 struct digicolor_timer *dt = dc_timer(ce); in dc_timer_set_count() 91 static int digicolor_clkevt_shutdown(struct clock_event_device *ce) in digicolor_clkevt_shutdown() argument [all …]
|
D | timer-rockchip.c | 43 struct clock_event_device ce; member 50 static inline struct rk_timer *rk_timer(struct clock_event_device *ce) in rk_timer() argument 52 return &container_of(ce, struct rk_clkevt, ce)->timer; in rk_timer() 78 struct clock_event_device *ce) in rk_timer_set_next_event() argument 80 struct rk_timer *timer = rk_timer(ce); in rk_timer_set_next_event() 89 static int rk_timer_shutdown(struct clock_event_device *ce) in rk_timer_shutdown() argument 91 struct rk_timer *timer = rk_timer(ce); in rk_timer_shutdown() 97 static int rk_timer_set_periodic(struct clock_event_device *ce) in rk_timer_set_periodic() argument 99 struct rk_timer *timer = rk_timer(ce); in rk_timer_set_periodic() 109 struct clock_event_device *ce = dev_id; in rk_timer_interrupt() local [all …]
|
/drivers/gpu/drm/i915/selftests/ |
D | i915_request.c | 208 struct intel_context *ce; in igt_request_rewind() local 213 ce = i915_gem_context_get_engine(ctx[0], RCS0); in igt_request_rewind() 214 GEM_BUG_ON(IS_ERR(ce)); in igt_request_rewind() 215 request = mock_request(ce, 2 * HZ); in igt_request_rewind() 216 intel_context_put(ce); in igt_request_rewind() 227 ce = i915_gem_context_get_engine(ctx[1], RCS0); in igt_request_rewind() 228 GEM_BUG_ON(IS_ERR(ce)); in igt_request_rewind() 229 vip = mock_request(ce, 0); in igt_request_rewind() 230 intel_context_put(ce); in igt_request_rewind() 276 struct i915_request *(*request_alloc)(struct intel_context *ce); [all …]
|