• Home
  • Raw
  • Download

Lines Matching +full:has +full:- +full:builtin +full:- +full:dma

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
55 p_slc->sz_k = 128 << slc_cfg.sz; in read_decode_cache_bcr_arcv2()
56 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; in read_decode_cache_bcr_arcv2()
57 n += scnprintf(buf + n, len - n, in read_decode_cache_bcr_arcv2()
59 p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable)); in read_decode_cache_bcr_arcv2()
69 * only ZONE_NORMAL (low mem) and any dma transactions outside this in read_decode_cache_bcr_arcv2()
72 * bounce_buffer to handle dma transactions to HIGHMEM. in read_decode_cache_bcr_arcv2()
88 /* HS 3.0 has limit and strict-ordering fields */ in read_decode_cache_bcr_arcv2()
90 perip_end = (vol.limit << 28) - 1; in read_decode_cache_bcr_arcv2()
93 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", in read_decode_cache_bcr_arcv2()
95 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); in read_decode_cache_bcr_arcv2()
118 p_ic->line_len = 8 << ibcr.line_len; in arc_cache_mumbojumbo()
119 p_ic->sz_k = 1 << (ibcr.sz - 1); in arc_cache_mumbojumbo()
120 p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE); in arc_cache_mumbojumbo()
122 n += scnprintf(buf + n, len - n, in arc_cache_mumbojumbo()
123 "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n", in arc_cache_mumbojumbo()
124 p_ic->sz_k, assoc, p_ic->line_len, in arc_cache_mumbojumbo()
125 p_ic->colors > 1 ? " aliasing" : "", in arc_cache_mumbojumbo()
137 p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE); in arc_cache_mumbojumbo()
141 p_dc->colors = 1; /* PIPT so can't VIPT alias */ in arc_cache_mumbojumbo()
144 p_dc->line_len = 16 << dbcr.line_len; in arc_cache_mumbojumbo()
145 p_dc->sz_k = 1 << (dbcr.sz - 1); in arc_cache_mumbojumbo()
147 n += scnprintf(buf + n, len - n, in arc_cache_mumbojumbo()
148 "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", in arc_cache_mumbojumbo()
149 p_dc->sz_k, assoc, p_dc->line_len, in arc_cache_mumbojumbo()
151 p_dc->colors > 1 ? " aliasing" : "", in arc_cache_mumbojumbo()
156 n += read_decode_cache_bcr_arcv2(c, buf + n, len - n); in arc_cache_mumbojumbo()
162 * Line Operation on {I,D}-Cache
176 * - vaddr in {I,D}C_IV?L
177 * - paddr in {I,D}C_PTAG
180 * Programming model is different for aliasing vs. non-aliasing I$
181 * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
182 * - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$)
184 * - If PAE40 is enabled, independent of aliasing considerations, the higher
203 /* Ensure we properly floor/ceil the non-line aligned/sized requests in __cache_line_loop_v3()
204 * and have @paddr - aligned to cache line and integral @num_lines. in __cache_line_loop_v3()
206 * -@paddr will be cache-line aligned already (being page aligned) in __cache_line_loop_v3()
207 * -@sz will be integral multiple of line size (being page sized). in __cache_line_loop_v3()
218 * if V-P const for loop, PTAG can be written once outside loop in __cache_line_loop_v3()
225 * Special work for HS38 aliasing I-cache configuration with PAE40 in __cache_line_loop_v3()
226 * - upper 8 bits of paddr need to be written into PTAG_HI in __cache_line_loop_v3()
227 * - (and needs to be written before the lower 32 bits) in __cache_line_loop_v3()
233 while (num_lines-- > 0) { in __cache_line_loop_v3()
258 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ in __cache_line_loop_v4()
262 /* Ensure we properly floor/ceil the non-line aligned/sized requests in __cache_line_loop_v4()
263 * and have @paddr - aligned to cache line and integral @num_lines. in __cache_line_loop_v4()
265 * -@paddr will be cache-line aligned already (being page aligned) in __cache_line_loop_v4()
266 * -@sz will be integral multiple of line size (being page sized). in __cache_line_loop_v4()
277 * - upper 8 bits of paddr need to be written into PTAG_HI in __cache_line_loop_v4()
278 * - (and needs to be written before the lower 32 bits) in __cache_line_loop_v4()
283 * Non aliasing I-cache in HS38, in __cache_line_loop_v4()
284 * aliasing I-cache handled in __cache_line_loop_v3() in __cache_line_loop_v4()
291 while (num_lines-- > 0) { in __cache_line_loop_v4()
308 /* Only for Non aliasing I-cache in HS38 */ in __cache_line_loop_v4()
326 sz += L1_CACHE_BYTES - 1; in __cache_line_loop_v4()
355 * Machine specific helpers for Entire D-Cache or Per Line ops
367 * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE in __before_dc_op()
368 * flush-n-inv is achieved by INV cmd but with IM=1 in __before_dc_op()
369 * So toggle INV sub-mode depending on op request and default in __before_dc_op()
390 * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above in __before_dc_op()
408 /* flush / flush-n-inv both wait */ in __after_dc_op()
419 * Operation on Entire D-Cache
430 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ in __dc_entire_op()
459 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
523 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); in __ic_line_inv_vaddr_helper()
566 * - b'000 (default) is Flush, in slc_op_rgn()
567 * - b'001 is Invalidate if CTRL.IM == 0 in slc_op_rgn()
568 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 in slc_op_rgn()
579 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ in slc_op_rgn()
588 * END can't be same as START, so add (l2_line_sz - 1) to sz in slc_op_rgn()
590 end = paddr + sz + l2_line_sz - 1; in slc_op_rgn()
622 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1); in slc_op_line()
646 while (num_lines-- > 0) { in slc_op_line()
675 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ in slc_entire_op()
708 * writes-to/reads-from
711 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
712 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
713 * -In SMP, if hardware caches are coherent
716 * If the U-mapping is not congruent to K-mapping, former needs flushing.
723 clear_bit(PG_dc_clean, &folio->flags); in flush_dcache_folio()
734 * Make a note that K-mapping is dirty in flush_dcache_folio()
737 clear_bit(PG_dc_clean, &folio->flags); in flush_dcache_folio()
739 /* kernel reading from page with U-mapping */ in flush_dcache_folio()
761 * DMA ops for systems with L1 cache only
780 * DMA ops for systems with both L1 and L2 caches, but without IOC
802 * Exported DMA API
838 tot_sz = kend - kstart; in flush_icache_range()
849 * given the callers for this case: kprobe/kgdb in built-in in flush_icache_range()
852 __sync_icache_dcache(kstart, kstart, kend - kstart); in flush_icache_range()
872 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); in flush_icache_range()
875 tot_sz -= sz; in flush_icache_range()
885 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
887 * builtin kernel page will not have any virtual mappings.
940 if (vma->vm_flags & VM_EXEC) in flush_cache_page()
972 * If SRC page was already mapped in userspace AND it's U-mapping is in copy_user_highpage()
973 * not congruent with K-mapping, sync former to physical page so that in copy_user_highpage()
974 * K-mapping in memcpy below, sees the right data in copy_user_highpage()
990 * Mark DST page K-mapping as dirty for a later finalization by in copy_user_highpage()
993 * But update_mmu_cache() already has code to do that for other in copy_user_highpage()
997 clear_bit(PG_dc_clean, &dst->flags); in copy_user_highpage()
1000 * if SRC was already usermapped and non-congruent to kernel mapping in copy_user_highpage()
1007 clear_bit(PG_dc_clean, &src->flags); in copy_user_highpage()
1018 clear_bit(PG_dc_clean, &folio->flags); in clear_user_page()
1034 * IO-Coherency (IOC) setup rules:
1037 * Non-Masters need not be accessing caches at that time
1038 * - They are either HALT_ON_RESET and kick started much later or
1039 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
1045 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
1056 * reenabling IOC when DMA might be potentially active is tricky business. in arc_ioc_setup()
1087 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2); in arc_ioc_setup()
1099 /* Re-enable L1 dcache */ in arc_ioc_setup()
1105 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1108 * - IOC setup / dma callbacks only need to be done once
1115 if (!ic->line_len) in arc_cache_init_master()
1116 panic("cache support enabled but non-existent cache\n"); in arc_cache_init_master()
1118 if (ic->line_len != L1_CACHE_BYTES) in arc_cache_init_master()
1120 ic->line_len, L1_CACHE_BYTES); in arc_cache_init_master()
1126 if (is_isa_arcv2() && ic->colors > 1) in arc_cache_init_master()
1135 if (!dc->line_len) in arc_cache_init_master()
1136 panic("cache support enabled but non-existent cache\n"); in arc_cache_init_master()
1138 if (dc->line_len != L1_CACHE_BYTES) in arc_cache_init_master()
1140 dc->line_len, L1_CACHE_BYTES); in arc_cache_init_master()
1142 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ in arc_cache_init_master()
1146 if (dc->colors > 1) { in arc_cache_init_master()
1149 if (CACHE_COLORS_NUM != dc->colors) in arc_cache_init_master()
1151 } else if (handled && dc->colors == 1) { in arc_cache_init_master()
1186 * called at all for devices using coherent DMA. in arc_cache_init_master()
1187 * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() in arc_cache_init_master()
1200 * And even if PAE is not enabled in kernel, the upper 32-bits still need in arc_cache_init()