• Home
  • Raw
  • Download

Lines Matching +full:has +full:- +full:builtin +full:- +full:dma

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
44 if (!(p)->line_len) \ in arc_cache_mumbojumbo()
45 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ in arc_cache_mumbojumbo()
47 n += scnprintf(buf + n, len - n, \ in arc_cache_mumbojumbo()
49 (p)->sz_k, (p)->assoc, (p)->line_len, \ in arc_cache_mumbojumbo()
50 (p)->vipt ? "VIPT" : "PIPT", \ in arc_cache_mumbojumbo()
51 (p)->alias ? " aliasing" : "", \ in arc_cache_mumbojumbo()
54 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); in arc_cache_mumbojumbo()
55 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); in arc_cache_mumbojumbo()
58 if (p->line_len) in arc_cache_mumbojumbo()
59 n += scnprintf(buf + n, len - n, in arc_cache_mumbojumbo()
61 p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); in arc_cache_mumbojumbo()
63 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", in arc_cache_mumbojumbo()
65 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); in arc_cache_mumbojumbo()
108 p_slc->sz_k = 128 << slc_cfg.sz; in read_decode_cache_bcr_arcv2()
109 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; in read_decode_cache_bcr_arcv2()
119 * only ZONE_NORMAL (low mem) and any dma transactions outside this in read_decode_cache_bcr_arcv2()
122 * bounce_buffer to handle dma transactions to HIGHMEM. in read_decode_cache_bcr_arcv2()
136 /* HS 3.0 has limit and strict-ordering fields */ in read_decode_cache_bcr_arcv2()
138 perip_end = (vol.limit << 28) - 1; in read_decode_cache_bcr_arcv2()
162 p_ic->assoc = 2; /* Fixed to 2w set assoc */ in read_decode_cache_bcr()
164 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */ in read_decode_cache_bcr()
167 p_ic->line_len = 8 << ibcr.line_len; in read_decode_cache_bcr()
168 p_ic->sz_k = 1 << (ibcr.sz - 1); in read_decode_cache_bcr()
169 p_ic->vipt = 1; in read_decode_cache_bcr()
170 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; in read_decode_cache_bcr()
181 p_dc->assoc = 4; /* Fixed to 4w set assoc */ in read_decode_cache_bcr()
182 p_dc->vipt = 1; in read_decode_cache_bcr()
183 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; in read_decode_cache_bcr()
185 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */ in read_decode_cache_bcr()
186 p_dc->vipt = 0; in read_decode_cache_bcr()
187 p_dc->alias = 0; /* PIPT so can't VIPT alias */ in read_decode_cache_bcr()
190 p_dc->line_len = 16 << dbcr.line_len; in read_decode_cache_bcr()
191 p_dc->sz_k = 1 << (dbcr.sz - 1); in read_decode_cache_bcr()
199 * Line Operation on {I,D}-Cache
208 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
210 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
212 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
213 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
216 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
219 * ------------------
221 * ------------------
223 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
228 * represent the offset within cache-line. The adv of using this "clumsy"
233 * fewer, based on the num-of-aliases possible.
234 * -for 2 alias possibility, only bit 13 needed (32K cache)
235 * -for 4 alias possibility, bits 14:13 needed (64K cache)
237 * ------------------
239 * ------------------
240 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
243 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
259 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ in __cache_line_loop_v2()
263 /* Ensure we properly floor/ceil the non-line aligned/sized requests in __cache_line_loop_v2()
264 * and have @paddr - aligned to cache line and integral @num_lines. in __cache_line_loop_v2()
266 * -@paddr will be cache-line aligned already (being page aligned) in __cache_line_loop_v2()
267 * -@sz will be integral multiple of line size (being page sized). in __cache_line_loop_v2()
280 while (num_lines-- > 0) { in __cache_line_loop_v2()
287 * For ARC700 MMUv3 I-cache and D-cache flushes
288 * - ARC700 programming model requires paddr and vaddr be passed in seperate
291 * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
292 * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
309 /* Ensure we properly floor/ceil the non-line aligned/sized requests in __cache_line_loop_v3()
310 * and have @paddr - aligned to cache line and integral @num_lines. in __cache_line_loop_v3()
312 * -@paddr will be cache-line aligned already (being page aligned) in __cache_line_loop_v3()
313 * -@sz will be integral multiple of line size (being page sized). in __cache_line_loop_v3()
324 * if V-P const for loop, PTAG can be written once outside loop in __cache_line_loop_v3()
331 * Special work for HS38 aliasing I-cache configuration with PAE40 in __cache_line_loop_v3()
332 * - upper 8 bits of paddr need to be written into PTAG_HI in __cache_line_loop_v3()
333 * - (and needs to be written before the lower 32 bits) in __cache_line_loop_v3()
339 while (num_lines-- > 0) { in __cache_line_loop_v3()
353 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
356 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
357 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
358 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
375 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ in __cache_line_loop_v4()
379 /* Ensure we properly floor/ceil the non-line aligned/sized requests in __cache_line_loop_v4()
380 * and have @paddr - aligned to cache line and integral @num_lines. in __cache_line_loop_v4()
382 * -@paddr will be cache-line aligned already (being page aligned) in __cache_line_loop_v4()
383 * -@sz will be integral multiple of line size (being page sized). in __cache_line_loop_v4()
394 * - upper 8 bits of paddr need to be written into PTAG_HI in __cache_line_loop_v4()
395 * - (and needs to be written before the lower 32 bits) in __cache_line_loop_v4()
400 * Non aliasing I-cache in HS38, in __cache_line_loop_v4()
401 * aliasing I-cache handled in __cache_line_loop_v3() in __cache_line_loop_v4()
408 while (num_lines-- > 0) { in __cache_line_loop_v4()
425 /* Only for Non aliasing I-cache in HS38 */ in __cache_line_loop_v4()
443 sz += L1_CACHE_BYTES - 1; in __cache_line_loop_v4()
474 * Machine specific helpers for Entire D-Cache or Per Line ops
486 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE in __before_dc_op()
487 * flush-n-inv is achieved by INV cmd but with IM=1 in __before_dc_op()
488 * So toggle INV sub-mode depending on op request and default in __before_dc_op()
509 * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above in __before_dc_op()
527 /* flush / flush-n-inv both wait */ in __after_dc_op()
538 * Operation on Entire D-Cache
549 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ in __dc_entire_op()
578 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
642 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); in __ic_line_inv_vaddr_helper()
685 * - b'000 (default) is Flush, in slc_op_rgn()
686 * - b'001 is Invalidate if CTRL.IM == 0 in slc_op_rgn()
687 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 in slc_op_rgn()
698 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ in slc_op_rgn()
707 * END can't be same as START, so add (l2_line_sz - 1) to sz in slc_op_rgn()
709 end = paddr + sz + l2_line_sz - 1; in slc_op_rgn()
741 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1); in slc_op_line()
765 while (num_lines-- > 0) { in slc_op_line()
794 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ in slc_entire_op()
827 * writes-to/reads-from
830 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
831 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
832 * -In SMP, if hardware caches are coherent
835 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
842 clear_bit(PG_dc_clean, &page->flags); in flush_dcache_page()
853 * Make a note that K-mapping is dirty in flush_dcache_page()
856 clear_bit(PG_dc_clean, &page->flags); in flush_dcache_page()
859 /* kernel reading from page with U-mapping */ in flush_dcache_page()
861 unsigned long vaddr = page->index << PAGE_SHIFT; in flush_dcache_page()
870 * DMA ops for systems with L1 cache only
889 * DMA ops for systems with both L1 and L2 caches, but without IOC
911 * Exported DMA API
947 tot_sz = kend - kstart; in flush_icache_range()
958 * given the callers for this case: kprobe/kgdb in built-in in flush_icache_range()
961 __sync_icache_dcache(kstart, kstart, kend - kstart); in flush_icache_range()
981 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); in flush_icache_range()
984 tot_sz -= sz; in flush_icache_range()
994 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
996 * builtin kernel page will not have any virtual mappings.
1049 if (vma->vm_flags & VM_EXEC) in flush_cache_page()
1079 * If SRC page was already mapped in userspace AND it's U-mapping is in copy_user_highpage()
1080 * not congruent with K-mapping, sync former to physical page so that in copy_user_highpage()
1081 * K-mapping in memcpy below, sees the right data in copy_user_highpage()
1097 * Mark DST page K-mapping as dirty for a later finalization by in copy_user_highpage()
1100 * But update_mmu_cache() already has code to do that for other in copy_user_highpage()
1104 clear_bit(PG_dc_clean, &to->flags); in copy_user_highpage()
1107 * if SRC was already usermapped and non-congruent to kernel mapping in copy_user_highpage()
1112 set_bit(PG_dc_clean, &from->flags); in copy_user_highpage()
1114 clear_bit(PG_dc_clean, &from->flags); in copy_user_highpage()
1124 clear_bit(PG_dc_clean, &page->flags); in clear_user_page()
1140 * IO-Coherency (IOC) setup rules:
1143 * Non-Masters need not be accessing caches at that time
1144 * - They are either HALT_ON_RESET and kick started much later or
1145 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
1151 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
1162 * reenabling IOC when DMA might be potentially active is tricky business. in arc_ioc_setup()
1193 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2); in arc_ioc_setup()
1205 /* Re-enable L1 dcache */ in arc_ioc_setup()
1211 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1214 * - IOC setup / dma callbacks only need to be done once
1223 if (!ic->line_len) in arc_cache_init_master()
1224 panic("cache support enabled but non-existent cache\n"); in arc_cache_init_master()
1226 if (ic->line_len != L1_CACHE_BYTES) in arc_cache_init_master()
1228 ic->line_len, L1_CACHE_BYTES); in arc_cache_init_master()
1234 if (is_isa_arcv2() && ic->alias) in arc_cache_init_master()
1243 if (!dc->line_len) in arc_cache_init_master()
1244 panic("cache support enabled but non-existent cache\n"); in arc_cache_init_master()
1246 if (dc->line_len != L1_CACHE_BYTES) in arc_cache_init_master()
1248 dc->line_len, L1_CACHE_BYTES); in arc_cache_init_master()
1250 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ in arc_cache_init_master()
1253 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE); in arc_cache_init_master()
1255 if (dc->alias) { in arc_cache_init_master()
1260 } else if (!dc->alias && handled) { in arc_cache_init_master()
1295 * called at all for devices using coherent DMA. in arc_cache_init_master()
1296 * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() in arc_cache_init_master()
1312 * And even if PAE is not enabled in kernel, the upper 32-bits still need in arc_cache_init()