| /kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/ |
| D | gen8_engine_cs.c | 16 u32 *cs, flags = 0; in gen8_emit_flush_rcs() local 58 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs() 59 if (IS_ERR(cs)) in gen8_emit_flush_rcs() 60 return PTR_ERR(cs); in gen8_emit_flush_rcs() 63 cs = gen8_emit_pipe_control(cs, 0, 0); in gen8_emit_flush_rcs() 66 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, in gen8_emit_flush_rcs() 69 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); in gen8_emit_flush_rcs() 72 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); in gen8_emit_flush_rcs() 74 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs() 81 u32 cmd, *cs; in gen8_emit_flush_xcs() local [all …]
|
| D | gen2_engine_cs.c | 19 u32 cmd, *cs; in gen2_emit_flush() local 25 cs = intel_ring_begin(rq, 2 + 4 * num_store_dw); in gen2_emit_flush() 26 if (IS_ERR(cs)) in gen2_emit_flush() 27 return PTR_ERR(cs); in gen2_emit_flush() 29 *cs++ = cmd; in gen2_emit_flush() 31 *cs++ = MI_STORE_DWORD_INDEX; in gen2_emit_flush() 32 *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32); in gen2_emit_flush() 33 *cs++ = 0; in gen2_emit_flush() 34 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; in gen2_emit_flush() 36 *cs++ = cmd; in gen2_emit_flush() [all …]
|
| D | gen6_engine_cs.c | 32 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 60 u32 *cs; in gen6_emit_post_sync_nonzero_flush() local 62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush() 63 if (IS_ERR(cs)) in gen6_emit_post_sync_nonzero_flush() 64 return PTR_ERR(cs); in gen6_emit_post_sync_nonzero_flush() 66 *cs++ = GFX_OP_PIPE_CONTROL(5); in gen6_emit_post_sync_nonzero_flush() 67 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; in gen6_emit_post_sync_nonzero_flush() 68 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; in gen6_emit_post_sync_nonzero_flush() 69 *cs++ = 0; /* low dword */ in gen6_emit_post_sync_nonzero_flush() 70 *cs++ = 0; /* high dword */ in gen6_emit_post_sync_nonzero_flush() [all …]
|
| D | gen7_renderclear.c | 12 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) argument 102 static u32 batch_offset(const struct batch_chunk *bc, u32 *cs) in batch_offset() argument 104 return (cs - bc->start) * sizeof(*bc->start) + bc->offset; in batch_offset() 148 u32 *cs = batch_alloc_items(state, 32, 8); in gen7_fill_surface_state() local 149 u32 offset = batch_offset(state, cs); in gen7_fill_surface_state() 155 *cs++ = SURFACE_2D << 29 | in gen7_fill_surface_state() 159 *cs++ = batch_addr(state) + dst_offset; in gen7_fill_surface_state() 161 *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1); in gen7_fill_surface_state() 162 *cs++ = surface_w; in gen7_fill_surface_state() 163 *cs++ = 0; in gen7_fill_surface_state() [all …]
|
| D | gen8_engine_cs.h | 43 u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs); 44 u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs); 46 u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); 47 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); 48 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); 50 u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs); 79 __gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1) in __gen8_emit_write_rcs() argument 81 *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0; in __gen8_emit_write_rcs() 82 *cs++ = flags1 | PIPE_CONTROL_QW_WRITE; in __gen8_emit_write_rcs() 83 *cs++ = offset; in __gen8_emit_write_rcs() [all …]
|
| D | intel_migrate.c | 336 u32 *cs; in emit_no_arbitration() local 338 cs = intel_ring_begin(rq, 2); in emit_no_arbitration() 339 if (IS_ERR(cs)) in emit_no_arbitration() 340 return PTR_ERR(cs); in emit_no_arbitration() 343 *cs++ = MI_ARB_ON_OFF; in emit_no_arbitration() 344 *cs++ = MI_NOOP; in emit_no_arbitration() 345 intel_ring_advance(rq, cs); in emit_no_arbitration() 376 u32 *hdr, *cs; in emit_pte() local 403 cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS); in emit_pte() 404 if (IS_ERR(cs)) in emit_pte() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
| D | gen2_engine_cs.c | 17 u32 cmd, *cs; in gen2_emit_flush() local 23 cs = intel_ring_begin(rq, 2 + 4 * num_store_dw); in gen2_emit_flush() 24 if (IS_ERR(cs)) in gen2_emit_flush() 25 return PTR_ERR(cs); in gen2_emit_flush() 27 *cs++ = cmd; in gen2_emit_flush() 29 *cs++ = MI_STORE_DWORD_INDEX; in gen2_emit_flush() 30 *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32); in gen2_emit_flush() 31 *cs++ = 0; in gen2_emit_flush() 32 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; in gen2_emit_flush() 34 *cs++ = cmd; in gen2_emit_flush() [all …]
|
| D | gen6_engine_cs.c | 31 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 59 u32 *cs; in gen6_emit_post_sync_nonzero_flush() local 61 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush() 62 if (IS_ERR(cs)) in gen6_emit_post_sync_nonzero_flush() 63 return PTR_ERR(cs); in gen6_emit_post_sync_nonzero_flush() 65 *cs++ = GFX_OP_PIPE_CONTROL(5); in gen6_emit_post_sync_nonzero_flush() 66 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; in gen6_emit_post_sync_nonzero_flush() 67 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; in gen6_emit_post_sync_nonzero_flush() 68 *cs++ = 0; /* low dword */ in gen6_emit_post_sync_nonzero_flush() 69 *cs++ = 0; /* high dword */ in gen6_emit_post_sync_nonzero_flush() [all …]
|
| D | gen7_renderclear.c | 11 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) argument 101 static u32 batch_offset(const struct batch_chunk *bc, u32 *cs) in batch_offset() argument 103 return (cs - bc->start) * sizeof(*bc->start) + bc->offset; in batch_offset() 147 u32 *cs = batch_alloc_items(state, 32, 8); in gen7_fill_surface_state() local 148 u32 offset = batch_offset(state, cs); in gen7_fill_surface_state() 154 *cs++ = SURFACE_2D << 29 | in gen7_fill_surface_state() 158 *cs++ = batch_addr(state) + dst_offset; in gen7_fill_surface_state() 160 *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1); in gen7_fill_surface_state() 161 *cs++ = surface_w; in gen7_fill_surface_state() 162 *cs++ = 0; in gen7_fill_surface_state() [all …]
|
| /kernel/linux/linux-6.6/kernel/time/ |
| D | clocksource.c | 23 static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end) in cycles_to_nsec_safe() argument 25 u64 delta = clocksource_delta(end, start, cs->mask); in cycles_to_nsec_safe() 27 if (likely(delta < cs->max_cycles)) in cycles_to_nsec_safe() 28 return clocksource_cyc2ns(delta, cs->mult, cs->shift); in cycles_to_nsec_safe() 30 return mul_u64_u32_shr(delta, cs->mult, cs->shift); in cycles_to_nsec_safe() 116 * Also a default for cs->uncertainty_margin when registering clocks. 124 * a lower bound for cs->uncertainty_margin values when registering clocks. 161 static void __clocksource_change_rating(struct clocksource *cs, int rating); 181 static void __clocksource_unstable(struct clocksource *cs) in __clocksource_unstable() argument 183 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable() [all …]
|
| /kernel/linux/linux-5.10/kernel/time/ |
| D | clocksource.c | 98 * Also a default for cs->uncertainty_margin when registering clocks. 106 * a lower bound for cs->uncertainty_margin values when registering clocks. 134 static void __clocksource_change_rating(struct clocksource *cs, int rating); 160 static void __clocksource_unstable(struct clocksource *cs) in __clocksource_unstable() argument 162 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable() 163 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable() 169 if (list_empty(&cs->list)) { in __clocksource_unstable() 170 cs->rating = 0; in __clocksource_unstable() 174 if (cs->mark_unstable) in __clocksource_unstable() 175 cs->mark_unstable(cs); in __clocksource_unstable() [all …]
|
| /kernel/linux/linux-6.6/drivers/scsi/ |
| D | myrs.c | 104 static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) in myrs_qcmd() argument 106 void __iomem *base = cs->io_base; in myrs_qcmd() 108 union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox; in myrs_qcmd() 110 cs->write_cmd_mbox(next_mbox, mbox); in myrs_qcmd() 112 if (cs->prev_cmd_mbox1->words[0] == 0 || in myrs_qcmd() 113 cs->prev_cmd_mbox2->words[0] == 0) in myrs_qcmd() 114 cs->get_cmd_mbox(base); in myrs_qcmd() 116 cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1; in myrs_qcmd() 117 cs->prev_cmd_mbox1 = next_mbox; in myrs_qcmd() 119 if (++next_mbox > cs->last_cmd_mbox) in myrs_qcmd() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/i915/pxp/ |
| D | intel_pxp_cmd.c | 23 static u32 *pxp_emit_session_selection(u32 *cs, u32 idx) in pxp_emit_session_selection() argument 25 *cs++ = MFX_WAIT_PXP; in pxp_emit_session_selection() 28 *cs++ = MI_FLUSH_DW; in pxp_emit_session_selection() 29 *cs++ = 0; in pxp_emit_session_selection() 30 *cs++ = 0; in pxp_emit_session_selection() 33 *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx); in pxp_emit_session_selection() 35 *cs++ = MFX_WAIT_PXP; in pxp_emit_session_selection() 38 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN | in pxp_emit_session_selection() 40 *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT; in pxp_emit_session_selection() 41 *cs++ = 0; in pxp_emit_session_selection() [all …]
|
| /kernel/linux/linux-5.10/drivers/misc/habanalabs/common/ |
| D | command_submission.c | 49 /* EBUSY means the CS was never submitted and hence we don't have in hl_fence_release() 59 "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n", in hl_fence_release() 66 * A signal CS can get completion while the corresponding wait in hl_fence_release() 67 * for signal CS is on its way to the PQ. The wait for signal CS in hl_fence_release() 68 * will get stuck if the signal CS incremented the SOB to its in hl_fence_release() 72 * 1. The wait for signal CS must get a ref for the signal CS as in hl_fence_release() 76 * 2. Signal/Wait for signal CS will decrement the SOB refcnt in hl_fence_release() 78 * These two measures guarantee that the wait for signal CS will in hl_fence_release() 79 * reset the SOB upon completion rather than the signal CS and in hl_fence_release() 108 static void cs_get(struct hl_cs *cs) in cs_get() argument [all …]
|
| /kernel/linux/linux-5.10/drivers/scsi/ |
| D | myrs.c | 104 static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) in myrs_qcmd() argument 106 void __iomem *base = cs->io_base; in myrs_qcmd() 108 union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox; in myrs_qcmd() 110 cs->write_cmd_mbox(next_mbox, mbox); in myrs_qcmd() 112 if (cs->prev_cmd_mbox1->words[0] == 0 || in myrs_qcmd() 113 cs->prev_cmd_mbox2->words[0] == 0) in myrs_qcmd() 114 cs->get_cmd_mbox(base); in myrs_qcmd() 116 cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1; in myrs_qcmd() 117 cs->prev_cmd_mbox1 = next_mbox; in myrs_qcmd() 119 if (++next_mbox > cs->last_cmd_mbox) in myrs_qcmd() [all …]
|
| /kernel/linux/linux-6.6/drivers/accel/habanalabs/common/ |
| D | command_submission.c | 23 * enum hl_cs_wait_status - cs wait status 24 * @CS_WAIT_STATUS_BUSY: cs was not completed yet 25 * @CS_WAIT_STATUS_COMPLETED: cs completed 26 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone 47 * CS outcome store supports the following operations: in hl_push_cs_outcome() 48 * push outcome - store a recent CS outcome in the store in hl_push_cs_outcome() 49 * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store in hl_push_cs_outcome() 52 * a single CS outcome. in hl_push_cs_outcome() 72 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq); in hl_push_cs_outcome() 232 void cs_get(struct hl_cs *cs) in cs_get() argument [all …]
|
| /kernel/linux/linux-6.6/kernel/cgroup/ |
| D | cpuset.c | 229 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() argument 231 return css_cs(cs->css.parent); in parent_cs() 236 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() local 238 cs->nr_deadline_tasks++; in inc_dl_tasks_cs() 243 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local 245 cs->nr_deadline_tasks--; in dec_dl_tasks_cs() 261 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument 263 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online() 266 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument 268 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive() [all …]
|
| /kernel/linux/linux-6.6/drivers/memory/ |
| D | stm32-fmc2-ebi.c | 172 const struct stm32_fmc2_prop *prop, int cs); 173 u32 (*calculate)(struct stm32_fmc2_ebi *ebi, int cs, u32 setup); 176 int cs, u32 setup); 181 int cs) in stm32_fmc2_ebi_check_mux() argument 186 ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); in stm32_fmc2_ebi_check_mux() 198 int cs) in stm32_fmc2_ebi_check_waitcfg() argument 203 ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); in stm32_fmc2_ebi_check_waitcfg() 215 int cs) in stm32_fmc2_ebi_check_sync_trans() argument 220 ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); in stm32_fmc2_ebi_check_sync_trans() 232 int cs) in stm32_fmc2_ebi_check_async_trans() argument [all …]
|
| /kernel/linux/linux-5.10/drivers/memory/ |
| D | stm32-fmc2-ebi.c | 170 const struct stm32_fmc2_prop *prop, int cs); 171 u32 (*calculate)(struct stm32_fmc2_ebi *ebi, int cs, u32 setup); 174 int cs, u32 setup); 179 int cs) in stm32_fmc2_ebi_check_mux() argument 183 regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); in stm32_fmc2_ebi_check_mux() 193 int cs) in stm32_fmc2_ebi_check_waitcfg() argument 197 regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); in stm32_fmc2_ebi_check_waitcfg() 207 int cs) in stm32_fmc2_ebi_check_sync_trans() argument 211 regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); in stm32_fmc2_ebi_check_sync_trans() 221 int cs) in stm32_fmc2_ebi_check_async_trans() argument [all …]
|
| /kernel/linux/linux-5.10/kernel/cgroup/ |
| D | cpuset.c | 214 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() argument 216 return css_cs(cs->css.parent); in parent_cs() 221 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() local 223 cs->nr_deadline_tasks++; in inc_dl_tasks_cs() 228 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local 230 cs->nr_deadline_tasks--; in dec_dl_tasks_cs() 246 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument 248 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online() 251 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument 253 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive() [all …]
|
| /kernel/linux/linux-6.6/sound/core/ |
| D | pcm_iec958.c | 14 * @cs: channel status buffer, at least four bytes 17 * Create the consumer format channel status data in @cs of maximum size 29 int snd_pcm_create_iec958_consumer_default(u8 *cs, size_t len) in snd_pcm_create_iec958_consumer_default() argument 34 memset(cs, 0, len); in snd_pcm_create_iec958_consumer_default() 36 cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE; in snd_pcm_create_iec958_consumer_default() 37 cs[1] = IEC958_AES1_CON_GENERAL; in snd_pcm_create_iec958_consumer_default() 38 cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC; in snd_pcm_create_iec958_consumer_default() 39 cs[3] = IEC958_AES3_CON_CLOCK_1000PPM | IEC958_AES3_CON_FS_NOTID; in snd_pcm_create_iec958_consumer_default() 42 cs[4] = IEC958_AES4_CON_WORDLEN_NOTID; in snd_pcm_create_iec958_consumer_default() 49 u8 *cs, size_t len) in fill_iec958_consumer() argument [all …]
|
| /kernel/linux/linux-5.10/drivers/mfd/ |
| D | atmel-smc.c | 15 * atmel_smc_cs_conf_init - initialize a SMC CS conf 16 * @conf: the SMC CS conf to initialize 79 * atmel_smc_cs_conf_set_timing - set the SMC CS conf Txx parameter to a 81 * @conf: SMC CS conf descriptor 121 * atmel_smc_cs_conf_set_setup - set the SMC CS conf xx_SETUP parameter to a 123 * @conf: SMC CS conf descriptor 160 * atmel_smc_cs_conf_set_pulse - set the SMC CS conf xx_PULSE parameter to a 162 * @conf: SMC CS conf descriptor 199 * atmel_smc_cs_conf_set_cycle - set the SMC CS conf xx_CYCLE parameter to a 201 * @conf: SMC CS conf descriptor [all …]
|
| /kernel/linux/linux-6.6/drivers/mfd/ |
| D | atmel-smc.c | 15 * atmel_smc_cs_conf_init - initialize a SMC CS conf 16 * @conf: the SMC CS conf to initialize 79 * atmel_smc_cs_conf_set_timing - set the SMC CS conf Txx parameter to a 81 * @conf: SMC CS conf descriptor 121 * atmel_smc_cs_conf_set_setup - set the SMC CS conf xx_SETUP parameter to a 123 * @conf: SMC CS conf descriptor 160 * atmel_smc_cs_conf_set_pulse - set the SMC CS conf xx_PULSE parameter to a 162 * @conf: SMC CS conf descriptor 199 * atmel_smc_cs_conf_set_cycle - set the SMC CS conf xx_CYCLE parameter to a 201 * @conf: SMC CS conf descriptor [all …]
|
| /kernel/linux/linux-5.10/Documentation/devicetree/bindings/memory-controllers/ |
| D | ti-aemif.txt | 34 - CS-specific partition/range. If continuous, must be 38 - control partition which is common for all CS 56 Child chip-select (cs) nodes contain the memory devices nodes connected to 60 Required child cs node properties: 73 - ti,cs-chipselect: number of chipselect. Indicates on the aemif driver 79 Optional child cs node properties: 81 - ti,cs-bus-width: width of the asynchronous device's data bus 84 - ti,cs-select-strobe-mode: enable/disable select strobe mode 89 - ti,cs-extended-wait-mode: enable/disable extended wait mode 95 - ti,cs-min-turnaround-ns: minimum turn around time, ns [all …]
|
| /kernel/linux/linux-6.6/Documentation/devicetree/bindings/memory-controllers/ |
| D | ti-aemif.txt | 34 - CS-specific partition/range. If continuous, must be 38 - control partition which is common for all CS 56 Child chip-select (cs) nodes contain the memory devices nodes connected to 60 Required child cs node properties: 73 - ti,cs-chipselect: number of chipselect. Indicates on the aemif driver 79 Optional child cs node properties: 81 - ti,cs-bus-width: width of the asynchronous device's data bus 84 - ti,cs-select-strobe-mode: enable/disable select strobe mode 89 - ti,cs-extended-wait-mode: enable/disable extended wait mode 95 - ti,cs-min-turnaround-ns: minimum turn around time, ns [all …]
|