Home
last modified time | relevance | path

Searched full:layers (Results 1 – 25 of 979) sorted by relevance

12345678910>>...40

/kernel/linux/linux-4.19/include/linux/
Dedac.h357 * Maximum number of layers used by the memory controller to uniquely
361 * some code there that are optimized for 3 layers.
370 * @layers: a struct edac_mc_layer array, describing how many elements
372 * @nlayers: Number of layers at the @layers array
379 * For 2 layers, this macro is similar to allocate a bi-dimensional array
382 * For 3 layers, this macro is similar to allocate a tri-dimensional array
386 * 3 layers, this is a little faster.
388 * By design, layers can never be 0 or more than 3. If that ever happens,
392 #define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \ argument
397 __i = (layer1) + ((layers[1]).size * (layer0)); \
[all …]
/kernel/linux/linux-5.10/drivers/edac/
Dpasemi_edac.c183 struct edac_mc_layer layers[2]; in pasemi_edac_probe() local
200 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in pasemi_edac_probe()
201 layers[0].size = PASEMI_EDAC_NR_CSROWS; in pasemi_edac_probe()
202 layers[0].is_virt_csrow = true; in pasemi_edac_probe()
203 layers[1].type = EDAC_MC_LAYER_CHANNEL; in pasemi_edac_probe()
204 layers[1].size = PASEMI_EDAC_NR_CHANS; in pasemi_edac_probe()
205 layers[1].is_virt_csrow = false; in pasemi_edac_probe()
206 mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers, in pasemi_edac_probe()
Dhighbank_mc_edac.c148 struct edac_mc_layer layers[2]; in highbank_mc_probe() local
162 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in highbank_mc_probe()
163 layers[0].size = 1; in highbank_mc_probe()
164 layers[0].is_virt_csrow = true; in highbank_mc_probe()
165 layers[1].type = EDAC_MC_LAYER_CHANNEL; in highbank_mc_probe()
166 layers[1].size = 1; in highbank_mc_probe()
167 layers[1].is_virt_csrow = false; in highbank_mc_probe()
168 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, in highbank_mc_probe()
Dcell_edac.c172 struct edac_mc_layer layers[2]; in cell_edac_probe() local
202 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in cell_edac_probe()
203 layers[0].size = 1; in cell_edac_probe()
204 layers[0].is_virt_csrow = true; in cell_edac_probe()
205 layers[1].type = EDAC_MC_LAYER_CHANNEL; in cell_edac_probe()
206 layers[1].size = num_chans; in cell_edac_probe()
207 layers[1].is_virt_csrow = false; in cell_edac_probe()
208 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers, in cell_edac_probe()
Damd76x_edac.c238 struct edac_mc_layer layers[2]; in amd76x_probe1() local
247 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in amd76x_probe1()
248 layers[0].size = AMD76X_NR_CSROWS; in amd76x_probe1()
249 layers[0].is_virt_csrow = true; in amd76x_probe1()
250 layers[1].type = EDAC_MC_LAYER_CHANNEL; in amd76x_probe1()
251 layers[1].size = 1; in amd76x_probe1()
252 layers[1].is_virt_csrow = false; in amd76x_probe1()
253 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in amd76x_probe1()
Di82860_edac.c188 struct edac_mc_layer layers[2]; in i82860_probe1() local
201 layers[0].type = EDAC_MC_LAYER_CHANNEL; in i82860_probe1()
202 layers[0].size = 2; in i82860_probe1()
203 layers[0].is_virt_csrow = true; in i82860_probe1()
204 layers[1].type = EDAC_MC_LAYER_SLOT; in i82860_probe1()
205 layers[1].size = 8; in i82860_probe1()
206 layers[1].is_virt_csrow = true; in i82860_probe1()
207 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in i82860_probe1()
Daspeed_edac.c282 struct edac_mc_layer layers[2]; in aspeed_probe() local
307 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in aspeed_probe()
308 layers[0].size = 1; in aspeed_probe()
309 layers[0].is_virt_csrow = true; in aspeed_probe()
310 layers[1].type = EDAC_MC_LAYER_CHANNEL; in aspeed_probe()
311 layers[1].size = 1; in aspeed_probe()
312 layers[1].is_virt_csrow = false; in aspeed_probe()
314 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in aspeed_probe()
Docteon_edac-lmc.c228 struct edac_mc_layer layers[1]; in octeon_lmc_edac_probe() local
233 layers[0].type = EDAC_MC_LAYER_CHANNEL; in octeon_lmc_edac_probe()
234 layers[0].size = 1; in octeon_lmc_edac_probe()
235 layers[0].is_virt_csrow = false; in octeon_lmc_edac_probe()
246 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt)); in octeon_lmc_edac_probe()
278 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt)); in octeon_lmc_edac_probe()
Dr82600_edac.c272 struct edac_mc_layer layers[2]; in r82600_probe1() local
286 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in r82600_probe1()
287 layers[0].size = R82600_NR_CSROWS; in r82600_probe1()
288 layers[0].is_virt_csrow = true; in r82600_probe1()
289 layers[1].type = EDAC_MC_LAYER_CHANNEL; in r82600_probe1()
290 layers[1].size = R82600_NR_CHANS; in r82600_probe1()
291 layers[1].is_virt_csrow = false; in r82600_probe1()
292 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in r82600_probe1()
/kernel/linux/linux-4.19/drivers/edac/
Dhighbank_mc_edac.c159 struct edac_mc_layer layers[2]; in highbank_mc_probe() local
173 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in highbank_mc_probe()
174 layers[0].size = 1; in highbank_mc_probe()
175 layers[0].is_virt_csrow = true; in highbank_mc_probe()
176 layers[1].type = EDAC_MC_LAYER_CHANNEL; in highbank_mc_probe()
177 layers[1].size = 1; in highbank_mc_probe()
178 layers[1].is_virt_csrow = false; in highbank_mc_probe()
179 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, in highbank_mc_probe()
Dpasemi_edac.c195 struct edac_mc_layer layers[2]; in pasemi_edac_probe() local
212 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in pasemi_edac_probe()
213 layers[0].size = PASEMI_EDAC_NR_CSROWS; in pasemi_edac_probe()
214 layers[0].is_virt_csrow = true; in pasemi_edac_probe()
215 layers[1].type = EDAC_MC_LAYER_CHANNEL; in pasemi_edac_probe()
216 layers[1].size = PASEMI_EDAC_NR_CHANS; in pasemi_edac_probe()
217 layers[1].is_virt_csrow = false; in pasemi_edac_probe()
218 mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers, in pasemi_edac_probe()
Dcell_edac.c172 struct edac_mc_layer layers[2]; in cell_edac_probe() local
202 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in cell_edac_probe()
203 layers[0].size = 1; in cell_edac_probe()
204 layers[0].is_virt_csrow = true; in cell_edac_probe()
205 layers[1].type = EDAC_MC_LAYER_CHANNEL; in cell_edac_probe()
206 layers[1].size = num_chans; in cell_edac_probe()
207 layers[1].is_virt_csrow = false; in cell_edac_probe()
208 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers, in cell_edac_probe()
Damd76x_edac.c238 struct edac_mc_layer layers[2]; in amd76x_probe1() local
247 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in amd76x_probe1()
248 layers[0].size = AMD76X_NR_CSROWS; in amd76x_probe1()
249 layers[0].is_virt_csrow = true; in amd76x_probe1()
250 layers[1].type = EDAC_MC_LAYER_CHANNEL; in amd76x_probe1()
251 layers[1].size = 1; in amd76x_probe1()
252 layers[1].is_virt_csrow = false; in amd76x_probe1()
253 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in amd76x_probe1()
Di82860_edac.c188 struct edac_mc_layer layers[2]; in i82860_probe1() local
201 layers[0].type = EDAC_MC_LAYER_CHANNEL; in i82860_probe1()
202 layers[0].size = 2; in i82860_probe1()
203 layers[0].is_virt_csrow = true; in i82860_probe1()
204 layers[1].type = EDAC_MC_LAYER_SLOT; in i82860_probe1()
205 layers[1].size = 8; in i82860_probe1()
206 layers[1].is_virt_csrow = true; in i82860_probe1()
207 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in i82860_probe1()
Docteon_edac-lmc.c228 struct edac_mc_layer layers[1]; in octeon_lmc_edac_probe() local
233 layers[0].type = EDAC_MC_LAYER_CHANNEL; in octeon_lmc_edac_probe()
234 layers[0].size = 1; in octeon_lmc_edac_probe()
235 layers[0].is_virt_csrow = false; in octeon_lmc_edac_probe()
246 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt)); in octeon_lmc_edac_probe()
278 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt)); in octeon_lmc_edac_probe()
Di3200_edac.c341 struct edac_mc_layer layers[2]; in i3200_probe1() local
356 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in i3200_probe1()
357 layers[0].size = I3200_DIMMS; in i3200_probe1()
358 layers[0].is_virt_csrow = true; in i3200_probe1()
359 layers[1].type = EDAC_MC_LAYER_CHANNEL; in i3200_probe1()
360 layers[1].size = nr_channels; in i3200_probe1()
361 layers[1].is_virt_csrow = false; in i3200_probe1()
362 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, in i3200_probe1()
395 struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, in i3200_probe1()
Dr82600_edac.c272 struct edac_mc_layer layers[2]; in r82600_probe1() local
286 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in r82600_probe1()
287 layers[0].size = R82600_NR_CSROWS; in r82600_probe1()
288 layers[0].is_virt_csrow = true; in r82600_probe1()
289 layers[1].type = EDAC_MC_LAYER_CHANNEL; in r82600_probe1()
290 layers[1].size = R82600_NR_CHANS; in r82600_probe1()
291 layers[1].is_virt_csrow = false; in r82600_probe1()
292 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in r82600_probe1()
Dx38_edac.c323 struct edac_mc_layer layers[2]; in x38_probe1() local
339 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in x38_probe1()
340 layers[0].size = X38_RANKS; in x38_probe1()
341 layers[0].is_virt_csrow = true; in x38_probe1()
342 layers[1].type = EDAC_MC_LAYER_CHANNEL; in x38_probe1()
343 layers[1].size = x38_channel_num; in x38_probe1()
344 layers[1].is_virt_csrow = false; in x38_probe1()
345 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in x38_probe1()
/kernel/linux/linux-5.10/drivers/media/dvb-frontends/
Dtc90522.c201 int layers; in tc90522s_get_frontend() local
209 layers = 0; in tc90522s_get_frontend()
236 layers = (v > 0) ? 2 : 1; in tc90522s_get_frontend()
284 stats->len = layers; in tc90522s_get_frontend()
287 for (i = 0; i < layers; i++) in tc90522s_get_frontend()
290 for (i = 0; i < layers; i++) { in tc90522s_get_frontend()
298 stats->len = layers; in tc90522s_get_frontend()
300 for (i = 0; i < layers; i++) in tc90522s_get_frontend()
303 for (i = 0; i < layers; i++) { in tc90522s_get_frontend()
336 int layers; in tc90522t_get_frontend() local
[all …]
/kernel/linux/linux-4.19/drivers/media/dvb-frontends/
Dtc90522.c201 int layers; in tc90522s_get_frontend() local
209 layers = 0; in tc90522s_get_frontend()
236 layers = (v > 0) ? 2 : 1; in tc90522s_get_frontend()
284 stats->len = layers; in tc90522s_get_frontend()
287 for (i = 0; i < layers; i++) in tc90522s_get_frontend()
290 for (i = 0; i < layers; i++) { in tc90522s_get_frontend()
298 stats->len = layers; in tc90522s_get_frontend()
300 for (i = 0; i < layers; i++) in tc90522s_get_frontend()
303 for (i = 0; i < layers; i++) { in tc90522s_get_frontend()
336 int layers; in tc90522t_get_frontend() local
[all …]
/kernel/linux/linux-4.19/Documentation/scsi/
Dscsi_eh.txt137 Note that this does not mean lower layers are quiescent. If a LLDD
138 completed a scmd with error status, the LLDD and lower layers are
140 has timed out, unless hostt->eh_timed_out() made lower layers forget
142 active as long as lower layers are concerned and completion could
188 lower layers and lower layers are ready to process or fail the scmd
355 that lower layers have forgotten about the scmd and we can
364 and STU doesn't make lower layers forget about those
366 if STU succeeds leaving lower layers in an inconsistent
418 On completion, the handler should have made lower layers forget about
458 - Know that timed out scmds are still active on lower layers. Make
[all …]
Dufs.txt48 UFS communication architecture consists of following layers,
63 layers. Device level configurations involve handling of query
70 the higher layers through Service Access Points. UTP defines 3
71 service access points for higher layers.
88 * UIO_SAP: To issue commands to Unipro layers.
/kernel/linux/linux-5.10/Documentation/block/
Dinline-encryption.rst44 - We need a way for upper layers like filesystems to specify an encryption
49 capabilities in a unified way to the upper layers.
57 encryption context from the upper layers (like the fs layer) to the
67 upper layers. The generic mode of operation is: each device driver that wants
69 Upper layers that want to use IE on this device can then use this KSM in
93 We introduce ``block/blk-crypto-fallback.c``, which allows upper layers to remain
149 ``blk_crypto_init_key`` allows upper layers to initialize such a
158 ``blk_crypto_config_supported`` allows upper layers to query whether or not the
166 ``blk_crypto_start_using_key`` - Upper layers must call this function on
174 ``blk_crypto_evict_key`` *must* be called by upper layers before a
/kernel/linux/linux-5.10/Documentation/scsi/
Dscsi_eh.rst151 Note that this does not mean lower layers are quiescent. If a LLDD
152 completed a scmd with error status, the LLDD and lower layers are
154 has timed out, unless hostt->eh_timed_out() made lower layers forget
156 active as long as lower layers are concerned and completion could
205 lower layers and lower layers are ready to process or fail the scmd
388 that lower layers have forgotten about the scmd and we can
397 and STU doesn't make lower layers forget about those
399 if STU succeeds leaving lower layers in an inconsistent
452 On completion, the handler should have made lower layers forget about
495 - Know that timed out scmds are still active on lower layers. Make
[all …]
/kernel/linux/linux-4.19/drivers/staging/most/Documentation/
Ddriver_usage.txt8 MOST defines the protocol, hardware and software layers necessary to allow
19 consumer devices via optical or electrical physical layers directly to one
27 three layers. From bottom up these layers are: the adapter layer, the core
31 routing through all three layers, the configuration of the driver, the
35 For each of the other two layers a set of modules is provided. Those can be

12345678910>>...40