/kernel/linux/linux-5.10/arch/nios2/mm/ |
D | tlb.c | 47 unsigned int way; in replace_tlb_one_pid() local 50 /* remember pid/way until we return. */ in replace_tlb_one_pid() 55 for (way = 0; way < cpuinfo.tlb_num_ways; way++) { in replace_tlb_one_pid() 60 tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT); in replace_tlb_one_pid() 73 (way << TLBMISC_WAY_SHIFT); in replace_tlb_one_pid() 126 unsigned int way; in flush_tlb_one() local 131 /* remember pid/way until we return. */ in flush_tlb_one() 136 for (way = 0; way < cpuinfo.tlb_num_ways; way++) { in flush_tlb_one() 140 tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT); in flush_tlb_one() 147 pr_debug("Flush entry by writing way=%dl pid=%ld\n", in flush_tlb_one() [all …]
|
/kernel/linux/linux-5.10/arch/x86/kernel/cpu/ |
D | cacheinfo.c | 45 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 46 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 47 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ 48 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ 49 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ 50 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ 51 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ 52 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ 53 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 54 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ [all …]
|
D | intel.c | 729 * One has 256kb of cache, the other 512. We have no way in intel_size_cache() 737 * Intel Quark SoC X1000 contains a 4-way set associative in intel_size_cache() 768 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 770 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 771 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, 772 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, 773 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, 779 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" }, 780 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" }, 782 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" }, [all …]
|
/kernel/linux/linux-5.10/arch/sh/mm/ |
D | cache-sh2a.c | 26 static void sh2a_flush_oc_line(unsigned long v, int way) in sh2a_flush_oc_line() argument 28 unsigned long addr = (v & 0x000007f0) | (way << 11); in sh2a_flush_oc_line() 76 int way; in sh2a__flush_wback_region() local 77 for (way = 0; way < nr_ways; way++) { in sh2a__flush_wback_region() 79 sh2a_flush_oc_line(v, way); in sh2a__flush_wback_region() 106 int way; in sh2a__flush_purge_region() local 108 for (way = 0; way < nr_ways; way++) in sh2a__flush_purge_region() 109 sh2a_flush_oc_line(v, way); in sh2a__flush_purge_region()
|
D | cache-sh2.c | 28 int way; in sh2__flush_wback_region() local 29 for (way = 0; way < 4; way++) { in sh2__flush_wback_region() 30 unsigned long data = __raw_readl(addr | (way << 12)); in sh2__flush_wback_region() 33 __raw_writel(data, addr | (way << 12)); in sh2__flush_wback_region()
|
D | cache-debugfs.c | 29 unsigned int waysize, way; in cache_seq_show() local 66 for (way = 0; way < cache->ways; way++) { in cache_seq_show() 71 seq_printf(file, "Way %d\n", way); in cache_seq_show()
|
/kernel/linux/linux-5.10/arch/xtensa/include/asm/ |
D | tlbflush.h | 130 static inline void write_dtlb_entry (pte_t entry, int way) in write_dtlb_entry() argument 133 : : "r" (way), "r" (entry) ); in write_dtlb_entry() 136 static inline void write_itlb_entry (pte_t entry, int way) in write_itlb_entry() argument 139 : : "r" (way), "r" (entry) ); in write_itlb_entry() 176 static inline unsigned long read_dtlb_virtual (int way) in read_dtlb_virtual() argument 179 __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way)); in read_dtlb_virtual() 183 static inline unsigned long read_dtlb_translation (int way) in read_dtlb_translation() argument 186 __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way)); in read_dtlb_translation() 190 static inline unsigned long read_itlb_virtual (int way) in read_itlb_virtual() argument 193 __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way)); in read_itlb_virtual() [all …]
|
/kernel/linux/linux-5.10/arch/arm/mm/ |
D | cache-xsc3l2.c | 44 int set, way; in xsc3_l2_inv_all() local 49 for (way = 0; way < CACHE_WAY_PER_SET; way++) { in xsc3_l2_inv_all() 50 set_way = (way << 29) | (set << 5); in xsc3_l2_inv_all() 149 * optimize L2 flush all operation by set/way format 154 int set, way; in xsc3_l2_flush_all() local 159 for (way = 0; way < CACHE_WAY_PER_SET; way++) { in xsc3_l2_flush_all() 160 set_way = (way << 29) | (set << 5); in xsc3_l2_flush_all()
|
D | cache-v7m.S | 48 * dcisw: Invalidate data cache by set/way 55 * dccisw: Clean and invalidate data cache by set/way 198 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 199 clz r5, r4 @ find bit position of way size increment 206 orr r11, r10, r6 @ factor way and cache number into r11 209 dccisw r11, r6 @ clean/invalidate by set/way 212 subs r4, r4, #1 @ decrement the way 231 * working outwards from L1 cache. This is done using Set/Way based cache
|
/kernel/linux/linux-5.10/arch/openrisc/include/asm/ |
D | spr_defs.h | 72 #define SPR_DTLBMR_BASE(WAY) (SPRGROUP_DMMU + 0x200 + (WAY) * 0x100) argument 73 #define SPR_DTLBMR_LAST(WAY) (SPRGROUP_DMMU + 0x27f + (WAY) * 0x100) argument 74 #define SPR_DTLBTR_BASE(WAY) (SPRGROUP_DMMU + 0x280 + (WAY) * 0x100) argument 75 #define SPR_DTLBTR_LAST(WAY) (SPRGROUP_DMMU + 0x2ff + (WAY) * 0x100) argument 80 #define SPR_ITLBMR_BASE(WAY) (SPRGROUP_IMMU + 0x200 + (WAY) * 0x100) argument 81 #define SPR_ITLBMR_LAST(WAY) (SPRGROUP_IMMU + 0x27f + (WAY) * 0x100) argument 82 #define SPR_ITLBTR_BASE(WAY) (SPRGROUP_IMMU + 0x280 + (WAY) * 0x100) argument 83 #define SPR_ITLBTR_LAST(WAY) (SPRGROUP_IMMU + 0x2ff + (WAY) * 0x100) argument 92 #define SPR_DCR_BASE(WAY) (SPRGROUP_DC + 0x200 + (WAY) * 0x200) argument 93 #define SPR_DCR_LAST(WAY) (SPRGROUP_DC + 0x3ff + (WAY) * 0x200) argument [all …]
|
/kernel/linux/linux-5.10/arch/x86/crypto/ |
D | twofish_glue_3way.c | 3 * Glue Code for 3-way parallel assembler optimized version of Twofish 175 .base.cra_driver_name = "ecb-twofish-3way", 187 .base.cra_driver_name = "cbc-twofish-3way", 200 .base.cra_driver_name = "ctr-twofish-3way", 225 * On Atom, twofish-3way is slower than original assembler in is_blacklisted_cpu() 226 * implementation. Twofish-3way trades off some performance in in is_blacklisted_cpu() 238 * On Pentium 4, twofish-3way is slower than original assembler in is_blacklisted_cpu() 257 "twofish-x86_64-3way: performance on this CPU " in init() 259 "twofish-x86_64-3way.\n"); in init() 276 MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
|
/kernel/linux/linux-5.10/Documentation/x86/ |
D | entry_64.rst | 28 either way. 36 magically-generated functions that make their way to do_IRQ with 64 Now, there's a secondary complication: there's a cheap way to test 65 which mode the CPU is in and an expensive way. 67 The cheap way is to pick this info off the entry frame on the kernel 75 The expensive (paranoid) way is to read back the MSR_GS_BASE value 96 stack but before we executed SWAPGS, then the only safe way to check
|
/kernel/linux/linux-5.10/arch/mips/include/asm/octeon/ |
D | cvmx-l2c.h | 183 * Return the L2 Cache way partitioning for a given core. 199 * a way, while a 1 bit blocks the core from evicting any 200 * lines from that way. There must be at least one allowed 201 * way (0 bit) in the mask. 212 * Return the L2 Cache way partitioning for the hw blocks. 214 * Returns The mask specifying the reserved way. 0 bits in mask indicates 225 * a way, while a 1 bit blocks the core from evicting any 226 * lines from that way. There must be at least one allowed 227 * way (0 bit) in the mask. 295 * @index: Which way to read from. [all …]
|
/kernel/linux/linux-5.10/arch/mips/kernel/ |
D | bmips_5xxx_init.S | 126 * Determine sets per way: IS 128 * This field contains the number of sets (i.e., indices) per way of 137 /* sets per way = (64<<IS) */ 164 /* v0 now have sets per way, multiply it by line size now 174 * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3: 175 * 4-way, v) 0x4 - 0x7: Reserved. 219 * Determine sets per way: IS 221 * This field contains the number of sets (i.e., indices) per way of 230 /* sets per way = (64<<IS) */ 256 /* v0 now have sets per way, multiply it by line size now [all …]
|
/kernel/linux/linux-5.10/arch/arc/mm/ |
D | tlb.c | 62 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc. 67 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has 275 * Flush the entire MM for userland. The fastest way is to move to Next ASID 304 * -Here the fastest way (if range is too large) is to move to next ASID 874 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) argument 901 int is_valid, way; in do_tlb_overlap_fault() local 905 for (way = 0, is_valid = 0; way < n_ways; way++) { in do_tlb_overlap_fault() 907 SET_WAY_TO_IDX(mmu, set, way)); in do_tlb_overlap_fault() 909 pd0[way] = read_aux_reg(ARC_REG_TLBPD0); in do_tlb_overlap_fault() 910 is_valid |= pd0[way] & _PAGE_PRESENT; in do_tlb_overlap_fault() [all …]
|
/kernel/linux/linux-5.10/arch/mips/mm/ |
D | cerr-sb1.c | 321 unsigned short way; in extract_ic() local 330 for (way = 0; way < 4; way++) { in extract_ic() 344 : "r" ((way << 13) | addr)); in extract_ic() 347 if (way == 0) { in extract_ic() 377 way, va, valid, taghi, taglo); in extract_ic() 399 : "r" ((way << 13) | addr | (offset << 3))); in extract_ic() 477 int valid, way; in extract_dc() local 485 for (way = 0; way < 4; way++) { in extract_dc() 498 : "r" ((way << 13) | addr)); in extract_dc() 502 if (way == 0) { in extract_dc() [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/mm/nohash/ |
D | tlb_low.S | 108 oris r7,r6,0x8000 /* specify way explicitly */ 141 /* 476 variant. There's not simple way to do this, hopefully we'll 149 lis r7,0x8000 /* Specify way explicitly */ 154 li r4,0 /* Current way */ 156 andi. r0,r8,1 /* Check if way 0 is bolted */ 157 mtctr r9 /* Load way counter */ 160 2: /* For each way */ 161 or r5,r3,r4 /* Make way|index for tlbre */ 164 3: addis r4,r4,0x2000 /* Next way */ 167 rlwimi r7,r5,0,1,2 /* Insert way number */ [all …]
|
/kernel/linux/linux-5.10/arch/arm/include/asm/ |
D | v7m.h | 83 #define V7M_SCB_DCISW 0x260 /* D-cache invalidate by set-way */ 86 #define V7M_SCB_DCCSW 0x26c /* D-cache clean by set-way */ 88 #define V7M_SCB_DCCISW 0x274 /* D-cache clean and invalidate by set-way */ 89 #define V7M_SCB_BPIALL 0x278 /* D-cache clean and invalidate by set-way */
|
/kernel/linux/linux-5.10/drivers/acpi/apei/ |
D | Kconfig | 27 Generic Hardware Error Source provides a way to report 31 Linux by firmware. This way, some non-standard hardware 67 ERST is a way provided by APEI to save and retrieve hardware
|
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/mux/ |
D | mux-controller.txt | 11 space is a simple zero-based enumeration. I.e. 0-1 for a 2-way multiplexer, 12 0-7 for an 8-way multiplexer, etc. 43 /* One consumer of a 2-way mux controller (one GPIO-line) */ 69 * parallel 4-way multiplexers controlled by the same two GPIO-lines. 149 4-way multiplexer):
|
/kernel/linux/linux-5.10/Documentation/admin-guide/ |
D | devices.txt | 207 Partitions are handled in the same way as for IDE 519 Partitions are handled the same way as for IDE disks 530 Partitions are handled the same way as for the first 638 Partitions are handled in the same way as for IDE 726 Partitions are handled the same way as for the first 744 Partitions are handled the same way as for the first 871 Partitions are handled in the same way as for IDE 895 Partitions are handled in the same way as for IDE 1044 Partitions are handled the same way as for the first 1056 Partitions are handled the same way as for the first [all …]
|
/kernel/linux/linux-5.10/arch/arc/include/asm/ |
D | tlb-mmu1.h | 20 ; Calculate set index for 2-way MMU 38 or.nz r0,r0,1 ; set way bit 45 ; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU 52 and.f r0,r0,0x000fe000 /* 2-way MMU mask */
|
/kernel/linux/linux-5.10/Documentation/process/ |
D | 2.Process.rst | 67 As fixes make their way into the mainline, the patch rate will slow over 161 describes the process in a somewhat idealized way. A much more detailed 166 - Design. This is where the real requirements for the patch - and the way 180 all the way to the mainline. The patch will show up in the maintainer's 224 unassisted. The way the kernel developers have addressed this growth is 231 subsystem maintainers are the gatekeepers (in a loose way) for the portion 260 normally the right way to go. 313 their way into linux-next some time before the merge window opens. 320 many sub-directories for drivers or filesystems that are on their way to 323 kernel proper. This is a way to keep track of drivers that aren't [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/platforms/powernv/ |
D | subcore.c | 30 * A core can be in one of three states, unsplit, 2-way split, and 4-way split. 37 * 2-way split | 2 38 * 4-way split | 4 50 * 2-way split: 57 * 4-way split: 72 * | | <----> | 2-way split | 76 * | | <----> | 4-way split |
|
/kernel/linux/linux-5.10/lib/ |
D | pci_iomap.c | 23 * you expect from them in the correct way. 62 * you expect from them in the correct way. When possible write combining 106 * you expect from them in the correct way. 126 * you expect from them in the correct way. When possible write combining
|