/drivers/gpu/drm/i915/gt/ |
D | gen8_engine_cs.c | 16 u32 *cs, flags = 0; in gen8_emit_flush_rcs() local 58 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs() 59 if (IS_ERR(cs)) in gen8_emit_flush_rcs() 60 return PTR_ERR(cs); in gen8_emit_flush_rcs() 63 cs = gen8_emit_pipe_control(cs, 0, 0); in gen8_emit_flush_rcs() 66 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, in gen8_emit_flush_rcs() 69 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); in gen8_emit_flush_rcs() 72 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); in gen8_emit_flush_rcs() 74 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs() 81 u32 cmd, *cs; in gen8_emit_flush_xcs() local [all …]
|
D | gen6_engine_cs.c | 60 u32 *cs; in gen6_emit_post_sync_nonzero_flush() local 62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush() 63 if (IS_ERR(cs)) in gen6_emit_post_sync_nonzero_flush() 64 return PTR_ERR(cs); in gen6_emit_post_sync_nonzero_flush() 66 *cs++ = GFX_OP_PIPE_CONTROL(5); in gen6_emit_post_sync_nonzero_flush() 67 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; in gen6_emit_post_sync_nonzero_flush() 68 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; in gen6_emit_post_sync_nonzero_flush() 69 *cs++ = 0; /* low dword */ in gen6_emit_post_sync_nonzero_flush() 70 *cs++ = 0; /* high dword */ in gen6_emit_post_sync_nonzero_flush() 71 *cs++ = MI_NOOP; in gen6_emit_post_sync_nonzero_flush() [all …]
|
D | gen2_engine_cs.c | 19 u32 cmd, *cs; in gen2_emit_flush() local 25 cs = intel_ring_begin(rq, 2 + 4 * num_store_dw); in gen2_emit_flush() 26 if (IS_ERR(cs)) in gen2_emit_flush() 27 return PTR_ERR(cs); in gen2_emit_flush() 29 *cs++ = cmd; in gen2_emit_flush() 31 *cs++ = MI_STORE_DWORD_INDEX; in gen2_emit_flush() 32 *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32); in gen2_emit_flush() 33 *cs++ = 0; in gen2_emit_flush() 34 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; in gen2_emit_flush() 36 *cs++ = cmd; in gen2_emit_flush() [all …]
|
D | gen7_renderclear.c | 102 static u32 batch_offset(const struct batch_chunk *bc, u32 *cs) in batch_offset() argument 104 return (cs - bc->start) * sizeof(*bc->start) + bc->offset; in batch_offset() 148 u32 *cs = batch_alloc_items(state, 32, 8); in gen7_fill_surface_state() local 149 u32 offset = batch_offset(state, cs); in gen7_fill_surface_state() 155 *cs++ = SURFACE_2D << 29 | in gen7_fill_surface_state() 159 *cs++ = batch_addr(state) + dst_offset; in gen7_fill_surface_state() 161 *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1); in gen7_fill_surface_state() 162 *cs++ = surface_w; in gen7_fill_surface_state() 163 *cs++ = 0; in gen7_fill_surface_state() 164 *cs++ = 0; in gen7_fill_surface_state() [all …]
|
D | intel_migrate.c | 336 u32 *cs; in emit_no_arbitration() local 338 cs = intel_ring_begin(rq, 2); in emit_no_arbitration() 339 if (IS_ERR(cs)) in emit_no_arbitration() 340 return PTR_ERR(cs); in emit_no_arbitration() 343 *cs++ = MI_ARB_ON_OFF; in emit_no_arbitration() 344 *cs++ = MI_NOOP; in emit_no_arbitration() 345 intel_ring_advance(rq, cs); in emit_no_arbitration() 376 u32 *hdr, *cs; in emit_pte() local 403 cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS); in emit_pte() 404 if (IS_ERR(cs)) in emit_pte() [all …]
|
D | gen8_engine_cs.h | 43 u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs); 44 u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs); 46 u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); 47 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); 48 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); 50 u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs); 79 __gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1) in __gen8_emit_write_rcs() argument 81 *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0; in __gen8_emit_write_rcs() 82 *cs++ = flags1 | PIPE_CONTROL_QW_WRITE; in __gen8_emit_write_rcs() 83 *cs++ = offset; in __gen8_emit_write_rcs() [all …]
|
D | selftest_lrc.c | 84 u32 *cs; in emit_semaphore_signal() local 90 cs = intel_ring_begin(rq, 4); in emit_semaphore_signal() 91 if (IS_ERR(cs)) { in emit_semaphore_signal() 93 return PTR_ERR(cs); in emit_semaphore_signal() 96 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in emit_semaphore_signal() 97 *cs++ = offset; in emit_semaphore_signal() 98 *cs++ = 0; in emit_semaphore_signal() 99 *cs++ = 1; in emit_semaphore_signal() 101 intel_ring_advance(rq, cs); in emit_semaphore_signal() 414 u32 *cs; in __live_lrc_state() local [all …]
|
D | selftest_engine_pm.c | 34 static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value) in emit_wait() argument 36 *cs++ = MI_SEMAPHORE_WAIT | in emit_wait() 40 *cs++ = value; in emit_wait() 41 *cs++ = offset; in emit_wait() 42 *cs++ = 0; in emit_wait() 44 return cs; in emit_wait() 47 static u32 *emit_store(u32 *cs, u32 offset, u32 value) in emit_store() argument 49 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in emit_store() 50 *cs++ = offset; in emit_store() 51 *cs++ = 0; in emit_store() [all …]
|
D | intel_lrc.c | 1047 static u32 *setup_predicate_disable_wa(const struct intel_context *ce, u32 *cs) in setup_predicate_disable_wa() argument 1050 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2); in setup_predicate_disable_wa() 1051 *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA; in setup_predicate_disable_wa() 1052 *cs++ = 0; in setup_predicate_disable_wa() 1053 *cs++ = 0; /* No predication */ in setup_predicate_disable_wa() 1056 *cs++ = MI_BATCH_BUFFER_END | BIT(15); in setup_predicate_disable_wa() 1057 *cs++ = MI_SET_PREDICATE | MI_SET_PREDICATE_DISABLE; in setup_predicate_disable_wa() 1060 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2); in setup_predicate_disable_wa() 1061 *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA; in setup_predicate_disable_wa() 1062 *cs++ = 0; in setup_predicate_disable_wa() [all …]
|
/drivers/scsi/ |
D | myrs.c | 104 static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) in myrs_qcmd() argument 106 void __iomem *base = cs->io_base; in myrs_qcmd() 108 union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox; in myrs_qcmd() 110 cs->write_cmd_mbox(next_mbox, mbox); in myrs_qcmd() 112 if (cs->prev_cmd_mbox1->words[0] == 0 || in myrs_qcmd() 113 cs->prev_cmd_mbox2->words[0] == 0) in myrs_qcmd() 114 cs->get_cmd_mbox(base); in myrs_qcmd() 116 cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1; in myrs_qcmd() 117 cs->prev_cmd_mbox1 = next_mbox; in myrs_qcmd() 119 if (++next_mbox > cs->last_cmd_mbox) in myrs_qcmd() [all …]
|
/drivers/gpu/drm/i915/pxp/ |
D | intel_pxp_cmd.c | 23 static u32 *pxp_emit_session_selection(u32 *cs, u32 idx) in pxp_emit_session_selection() argument 25 *cs++ = MFX_WAIT_PXP; in pxp_emit_session_selection() 28 *cs++ = MI_FLUSH_DW; in pxp_emit_session_selection() 29 *cs++ = 0; in pxp_emit_session_selection() 30 *cs++ = 0; in pxp_emit_session_selection() 33 *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx); in pxp_emit_session_selection() 35 *cs++ = MFX_WAIT_PXP; in pxp_emit_session_selection() 38 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN | in pxp_emit_session_selection() 40 *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT; in pxp_emit_session_selection() 41 *cs++ = 0; in pxp_emit_session_selection() [all …]
|
/drivers/memory/ |
D | stm32-fmc2-ebi.c | 172 const struct stm32_fmc2_prop *prop, int cs); 173 u32 (*calculate)(struct stm32_fmc2_ebi *ebi, int cs, u32 setup); 176 int cs, u32 setup); 181 int cs) in stm32_fmc2_ebi_check_mux() argument 186 ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); in stm32_fmc2_ebi_check_mux() 198 int cs) in stm32_fmc2_ebi_check_waitcfg() argument 203 ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); in stm32_fmc2_ebi_check_waitcfg() 215 int cs) in stm32_fmc2_ebi_check_sync_trans() argument 220 ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); in stm32_fmc2_ebi_check_sync_trans() 232 int cs) in stm32_fmc2_ebi_check_async_trans() argument [all …]
|
D | omap-gpmc.c | 277 void gpmc_cs_write_reg(int cs, int idx, u32 val) in gpmc_cs_write_reg() argument 281 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; in gpmc_cs_write_reg() 285 static u32 gpmc_cs_read_reg(int cs, int idx) in gpmc_cs_read_reg() argument 289 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; in gpmc_cs_read_reg() 312 static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd) in gpmc_get_clk_period() argument 321 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); in gpmc_get_clk_period() 334 static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs, in gpmc_ns_to_clk_ticks() argument 340 tick_ps = gpmc_get_clk_period(cs, cd); in gpmc_ns_to_clk_ticks() 360 static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs, in gpmc_clk_ticks_to_ns() argument 363 return ticks * gpmc_get_clk_period(cs, cd) / 1000; in gpmc_clk_ticks_to_ns() [all …]
|
/drivers/accel/habanalabs/common/ |
D | command_submission.c | 232 void cs_get(struct hl_cs *cs) in cs_get() argument 234 kref_get(&cs->refcount); in cs_get() 237 static int cs_get_unless_zero(struct hl_cs *cs) in cs_get_unless_zero() argument 239 return kref_get_unless_zero(&cs->refcount); in cs_get_unless_zero() 242 static void cs_put(struct hl_cs *cs) in cs_put() argument 244 kref_put(&cs->refcount, cs_do_release); in cs_put() 259 bool cs_needs_completion(struct hl_cs *cs) in cs_needs_completion() argument 264 if (cs->staged_cs && !cs->staged_last) in cs_needs_completion() 270 bool cs_needs_timeout(struct hl_cs *cs) in cs_needs_timeout() argument 275 if (cs->staged_cs && !cs->staged_first) in cs_needs_timeout() [all …]
|
D | hw_queue.c | 41 void hl_hw_queue_update_ci(struct hl_cs *cs) in hl_hw_queue_update_ci() argument 43 struct hl_device *hdev = cs->ctx->hdev; in hl_hw_queue_update_ci() 62 if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT) in hl_hw_queue_update_ci() 63 atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); in hl_hw_queue_update_ci() 270 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job() 291 if (!cs_needs_completion(job->cs)) in ext_queue_schedule_job() 335 struct hl_device *hdev = job->cs->ctx->hdev; in int_queue_schedule_job() 371 struct hl_device *hdev = job->cs->ctx->hdev; in hw_queue_schedule_job() 382 offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1); in hw_queue_schedule_job() 433 job->cs->sob_addr_offset = hw_sob->sob_addr; in init_signal_cs() [all …]
|
/drivers/net/slip/ |
D | slhc.c | 232 struct cstate *cs = lcs->next; in slhc_compress() local 297 if( ip->saddr == cs->cs_ip.saddr in slhc_compress() 298 && ip->daddr == cs->cs_ip.daddr in slhc_compress() 299 && th->source == cs->cs_tcp.source in slhc_compress() 300 && th->dest == cs->cs_tcp.dest) in slhc_compress() 304 if ( cs == ocs ) in slhc_compress() 306 lcs = cs; in slhc_compress() 307 cs = cs->next; in slhc_compress() 329 } else if (cs == ocs) { in slhc_compress() 334 lcs->next = cs->next; in slhc_compress() [all …]
|
/drivers/clocksource/ |
D | timer-pistachio.c | 48 struct clocksource cs; member 53 #define to_pistachio_clocksource(cs) \ argument 54 container_of(cs, struct pistachio_clocksource, cs) 68 pistachio_clocksource_read_cycles(struct clocksource *cs) in pistachio_clocksource_read_cycles() argument 70 struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); in pistachio_clocksource_read_cycles() 90 return pistachio_clocksource_read_cycles(&pcs_gpt.cs); in pistachio_read_sched_clock() 93 static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx, in pistachio_clksrc_set_mode() argument 96 struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); in pistachio_clksrc_set_mode() 108 static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx) in pistachio_clksrc_enable() argument 110 struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); in pistachio_clksrc_enable() [all …]
|
D | em_sti.c | 33 struct clocksource cs; member 181 static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) in cs_to_em_sti() argument 183 return container_of(cs, struct em_sti_priv, cs); in cs_to_em_sti() 186 static u64 em_sti_clocksource_read(struct clocksource *cs) in em_sti_clocksource_read() argument 188 return em_sti_count(cs_to_em_sti(cs)); in em_sti_clocksource_read() 191 static int em_sti_clocksource_enable(struct clocksource *cs) in em_sti_clocksource_enable() argument 193 struct em_sti_priv *p = cs_to_em_sti(cs); in em_sti_clocksource_enable() 198 static void em_sti_clocksource_disable(struct clocksource *cs) in em_sti_clocksource_disable() argument 200 em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE); in em_sti_clocksource_disable() 203 static void em_sti_clocksource_resume(struct clocksource *cs) in em_sti_clocksource_resume() argument [all …]
|
/drivers/gpu/drm/i915/gvt/ |
D | mmio_context.c | 204 u32 *cs; in restore_context_mmio_for_inhibit() local 218 cs = intel_ring_begin(req, count * 2 + 2); in restore_context_mmio_for_inhibit() 219 if (IS_ERR(cs)) in restore_context_mmio_for_inhibit() 220 return PTR_ERR(cs); in restore_context_mmio_for_inhibit() 222 *cs++ = MI_LOAD_REGISTER_IMM(count); in restore_context_mmio_for_inhibit() 228 *cs++ = i915_mmio_reg_offset(mmio->reg); in restore_context_mmio_for_inhibit() 229 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16); in restore_context_mmio_for_inhibit() 231 *(cs-2), *(cs-1), vgpu->id, ring_id); in restore_context_mmio_for_inhibit() 234 *cs++ = MI_NOOP; in restore_context_mmio_for_inhibit() 235 intel_ring_advance(req, cs); in restore_context_mmio_for_inhibit() [all …]
|
/drivers/mfd/ |
D | atmel-smc.c | 245 void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs, in atmel_smc_cs_conf_apply() argument 248 regmap_write(regmap, ATMEL_SMC_SETUP(cs), conf->setup); in atmel_smc_cs_conf_apply() 249 regmap_write(regmap, ATMEL_SMC_PULSE(cs), conf->pulse); in atmel_smc_cs_conf_apply() 250 regmap_write(regmap, ATMEL_SMC_CYCLE(cs), conf->cycle); in atmel_smc_cs_conf_apply() 251 regmap_write(regmap, ATMEL_SMC_MODE(cs), conf->mode); in atmel_smc_cs_conf_apply() 267 int cs, const struct atmel_smc_cs_conf *conf) in atmel_hsmc_cs_conf_apply() argument 269 regmap_write(regmap, ATMEL_HSMC_SETUP(layout, cs), conf->setup); in atmel_hsmc_cs_conf_apply() 270 regmap_write(regmap, ATMEL_HSMC_PULSE(layout, cs), conf->pulse); in atmel_hsmc_cs_conf_apply() 271 regmap_write(regmap, ATMEL_HSMC_CYCLE(layout, cs), conf->cycle); in atmel_hsmc_cs_conf_apply() 272 regmap_write(regmap, ATMEL_HSMC_TIMINGS(layout, cs), conf->timings); in atmel_hsmc_cs_conf_apply() [all …]
|
/drivers/spi/ |
D | spi-fsl-spi.c | 92 struct spi_mpc8xxx_cs *cs = spi->controller_state; in fsl_spi_change_mode() local 97 if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) in fsl_spi_change_mode() 104 mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE); in fsl_spi_change_mode() 110 mpc8xxx_spi_write_reg(mode, cs->hw_mode); in fsl_spi_change_mode() 148 static void mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, in mspi_apply_cpu_mode_quirks() argument 153 cs->rx_shift = 0; in mspi_apply_cpu_mode_quirks() 154 cs->tx_shift = 0; in mspi_apply_cpu_mode_quirks() 156 cs->get_rx = mpc8xxx_spi_rx_buf_u8; in mspi_apply_cpu_mode_quirks() 157 cs->get_tx = mpc8xxx_spi_tx_buf_u8; in mspi_apply_cpu_mode_quirks() 159 cs->get_rx = mpc8xxx_spi_rx_buf_u16; in mspi_apply_cpu_mode_quirks() [all …]
|
D | spi-omap2-mcspi.c | 115 struct list_head cs; member 164 struct omap2_mcspi_cs *cs = spi->controller_state; in mcspi_write_cs_reg() local 166 writel_relaxed(val, cs->base + idx); in mcspi_write_cs_reg() 171 struct omap2_mcspi_cs *cs = spi->controller_state; in mcspi_read_cs_reg() local 173 return readl_relaxed(cs->base + idx); in mcspi_read_cs_reg() 178 struct omap2_mcspi_cs *cs = spi->controller_state; in mcspi_cached_chconf0() local 180 return cs->chconf0; in mcspi_cached_chconf0() 185 struct omap2_mcspi_cs *cs = spi->controller_state; in mcspi_write_chconf0() local 187 cs->chconf0 = val; in mcspi_write_chconf0() 224 struct omap2_mcspi_cs *cs = spi->controller_state; in omap2_mcspi_set_enable() local [all …]
|
D | spi-xlp.c | 98 int cs; /* slave device chip select */ member 105 int cs, int regoff) in xlp_spi_reg_read() argument 107 return readl(priv->base + regoff + cs * SPI_CS_OFFSET); in xlp_spi_reg_read() 110 static inline void xlp_spi_reg_write(struct xlp_spi_priv *priv, int cs, in xlp_spi_reg_write() argument 113 writel(val, priv->base + regoff + cs * SPI_CS_OFFSET); in xlp_spi_reg_write() 127 int cs; in xlp_spi_sysctl_setup() local 129 for (cs = 0; cs < XLP_SPI_MAX_CS; cs++) in xlp_spi_sysctl_setup() 131 XLP_SPI_SYS_RESET << cs); in xlp_spi_sysctl_setup() 139 int cs; in xlp_spi_setup() local 142 cs = spi_get_chipselect(spi, 0); in xlp_spi_setup() [all …]
|
/drivers/gpu/drm/i915/selftests/ |
D | i915_perf.c | 163 u32 *cs; in write_timestamp() local 166 cs = intel_ring_begin(rq, 6); in write_timestamp() 167 if (IS_ERR(cs)) in write_timestamp() 168 return PTR_ERR(cs); in write_timestamp() 174 *cs++ = GFX_OP_PIPE_CONTROL(len); in write_timestamp() 175 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | in write_timestamp() 178 *cs++ = slot * sizeof(u32); in write_timestamp() 179 *cs++ = 0; in write_timestamp() 180 *cs++ = 0; in write_timestamp() 181 *cs++ = 0; in write_timestamp() [all …]
|
/drivers/media/v4l2-core/ |
D | v4l2-ctrls-api.c | 214 struct v4l2_ext_controls *cs, in prepare_ext_ctrls() argument 223 for (i = 0, h = helpers; i < cs->count; i++, h++) { in prepare_ext_ctrls() 224 struct v4l2_ext_control *c = &cs->controls[i]; in prepare_ext_ctrls() 229 cs->error_idx = i; in prepare_ext_ctrls() 231 if (cs->which && in prepare_ext_ctrls() 232 cs->which != V4L2_CTRL_WHICH_DEF_VAL && in prepare_ext_ctrls() 233 cs->which != V4L2_CTRL_WHICH_REQUEST_VAL && in prepare_ext_ctrls() 234 V4L2_CTRL_ID2WHICH(id) != cs->which) { in prepare_ext_ctrls() 237 cs->which, id); in prepare_ext_ctrls() 270 if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) in prepare_ext_ctrls() [all …]
|