Home
last modified time | relevance | path

Searched full:accesses (Results 1 – 25 of 2712) sorted by relevance

12345678910>>...109

/external/llvm/lib/Transforms/Utils/
DMemorySSA.cpp91 /// disambiguate accesses.
185 AccessList *Accesses = It->second.get(); in renameBlock() local
186 for (MemoryAccess &L : *Accesses) { in renameBlock()
212 AccessList *Accesses = It->second.get(); in renameBlock() local
213 auto *Phi = cast<MemoryPhi>(&Accesses->front()); in renameBlock()
222 /// We walk the dominator tree in preorder, renaming accesses, and then filling
256 /// \brief This handles unreachable block accesses by deleting phi nodes in
274 AccessList *Accesses = It->second.get(); in markUnreachableAsLiveOnEntry() local
275 auto *Phi = cast<MemoryPhi>(&Accesses->front()); in markUnreachableAsLiveOnEntry()
283 auto &Accesses = It->second; in markUnreachableAsLiveOnEntry() local
[all …]
/external/linux-kselftest/tools/testing/selftests/bpf/progs/
Duser_ringbuf_fail.c32 /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
54 /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
73 /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
92 /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
113 /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
132 /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
151 /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
168 /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
/external/perfetto/src/trace_processor/perfetto_sql/stdlib/wattson/
Darm_dsu.sql16 -- Converts event counter from count to rate (num of accesses per ns).
23 -- Rate of event accesses in a section (i.e. count / dur).
32 -- accesses in a given duration can be calculated by multiplying the appropriate
40 -- The rate of L3 accesses for each time slice based on the ARM DSU PMU
41 -- counter's l3d_cache event. Units will be in number of DDR accesses per ns.
42 -- The number of accesses in a given duration can be calculated by multiplying
/external/gemmlowp/standalone/
Dcache_counters.cc143 void Workload(int accesses, int size, std::uint8_t* buf) { in Workload() argument
186 "subs %w[accesses], %w[accesses], #1\n" in Workload()
188 : [ accesses ] "+r"(accesses), [ dummy ] "+r"(dummy) in Workload()
193 void MeasureCacheCounts(int accesses, int size, std::uint8_t* buf, in MeasureCacheCounts() argument
227 Workload(accesses, size, buf); in MeasureCacheCounts()
242 // How many accesses were recorded, total? The other fields must sum to that.
244 // How many accesses were serviced with the typical cost of a L1 cache hit?
246 // How many accesses were serviced with the typical cost of a L2 cache hit?
248 // How many accesses were serviced with the typical cost of a L3 cache hit?
250 // How many accesses were serviced with the typical cost of a DRAM access?
[all …]
/external/swiftshader/third_party/llvm-16.0/llvm/include/llvm/Analysis/
DLoopAccessAnalysis.h53 /// Checks memory dependences among accesses to the same underlying
63 /// on the program order of memory accesses to determine their safety.
64 /// At the moment we will only deem accesses as safe for:
85 /// * Zero distances and all accesses have the same size.
91 /// Set of potential dependent memory accesses.
183 /// Check whether the dependencies between the accesses are safe.
202 /// the accesses safely with.
249 /// Returns an empty ArrayRef if there are no accesses for the location.
251 auto I = Accesses.find({Ptr, IsWrite}); in getOrderForAccess()
252 if (I != Accesses.end()) in getOrderForAccess()
[all …]
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Analysis/
DLoopAccessAnalysis.h58 /// Checks memory dependences among accesses to the same underlying
68 /// on the program order of memory accesses to determine their safety.
69 /// At the moment we will only deem accesses as safe for:
90 /// * Zero distances and all accesses have the same size.
96 /// Set of potential dependent memory accesses.
186 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx); in addAccess()
195 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx); in addAccess()
200 /// Check whether the dependencies between the accesses are safe.
213 /// the accesses safely with.
268 DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses; variable
[all …]
/external/cronet/base/timer/
Dtimer.cc59 // No more member accesses here: |this| could be deleted after Stop() call. in Stop()
141 // No more member accesses here: |this| could be deleted at this point. in OnScheduledTaskInvoked()
168 // No more member accesses here: |this| could be deleted after freeing in OnStop()
179 // No more member accesses here: |this| could be deleted at this point. in RunUserTask()
218 // No more member accesses here: |this| could be deleted at this point. in RunUserTask()
257 // No more member accesses here: |this| could be deleted at this point. in RunUserTask()
280 // No more member accesses here: |this| could be deleted after freeing in OnStop()
306 // No more member accesses here: |this| could be deleted at this point. in OnScheduledTaskInvoked()
336 // No more member accesses here: |this| could be deleted after freeing in OnStop()
375 // No more member accesses here: |this| could be deleted at this point. in OnScheduledTaskInvoked()
/external/llvm/lib/Analysis/
DLoopAccessAnalysis.cpp30 #define DEBUG_TYPE "loop-accesses"
71 /// accesses in code like the following.
288 // the accesses are safe. in groupChecks()
300 // accesses to the same underlying object. This cannot happen unless in groupChecks()
326 // and add them to the overall solution. We use the order in which accesses in groupChecks()
340 // Because DepCands is constructed by visiting accesses in the order in in groupChecks()
433 OS.indent(Depth) << "Grouped accesses:\n"; in print()
448 /// \brief Analyses memory accesses in a loop.
468 Accesses.insert(MemAccessInfo(Ptr, false)); in addLoad()
477 Accesses.insert(MemAccessInfo(Ptr, true)); in addStore()
[all …]
/external/kernel-headers/original/uapi/asm-arm/asm/
Dbyteorder.h6 * that byte accesses appear as:
8 * and word accesses (data or instruction) appear as:
11 * When in big endian mode, byte accesses appear as:
13 * and word accesses (data or instruction) appear as:
Dswab.h6 * that byte accesses appear as:
8 * and word accesses (data or instruction) appear as:
11 * When in big endian mode, byte accesses appear as:
13 * and word accesses (data or instruction) appear as:
/external/llvm/include/llvm/Analysis/
DLoopAccessAnalysis.h90 /// \brief Checks memory dependences among accesses to the same underlying
100 /// on the program order of memory accesses to determine their safety.
101 /// At the moment we will only deem accesses as safe for:
122 /// * Zero distances and all accesses have the same size.
128 /// \brief Set of potential dependent memory accesses.
205 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx); in addAccess()
214 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx); in addAccess()
219 /// \brief Check whether the dependencies between the accesses are safe.
230 /// the accesses safely with.
278 DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses; variable
[all …]
/external/compiler-rt/lib/tsan/rtl/
Dtsan_flags.inc42 "Report races between atomic and plain memory accesses.")
66 "Per-thread history size, controls how many previous memory accesses "
68 "history_size=0 amounts to 32K memory accesses. Each next value doubles "
69 "the amount of memory accesses, up to history_size=7 that amounts to "
70 "4M memory accesses. The default value is 2 (128K memory accesses).")
/external/swiftshader/third_party/llvm-16.0/llvm/lib/Analysis/
DLoopAccessAnalysis.cpp72 #define DEBUG_TYPE "loop-accesses"
113 /// accesses in code like the following.
445 // the accesses are safe. in groupChecks()
457 // accesses to the same underlying object. This cannot happen unless in groupChecks()
485 // and add them to the overall solution. We use the order in which accesses in groupChecks()
499 // Because DepCands is constructed by visiting accesses in the order in in groupChecks()
596 OS.indent(Depth) << "Grouped accesses:\n"; in print()
612 /// Analyses memory accesses in a loop.
634 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy); in addLoad()
643 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy); in addStore()
[all …]
DMemorySSA.cpp478 // We do have accesses that claim they're optimized, but could be optimized in checkClobberSanity()
1012 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1106 AccessList *Accesses = It->second.get(); in renameSuccessorPhis() local
1107 auto *Phi = cast<MemoryPhi>(&Accesses->front()); in renameSuccessorPhis()
1130 AccessList *Accesses = It->second.get(); in renameBlock() local
1131 for (MemoryAccess &L : *Accesses) { in renameBlock()
1147 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1152 assert(Root && "Trying to rename accesses in an unreachable block"); in renamePass()
1196 /// This handles unreachable block accesses by deleting phi nodes in
1214 AccessList *Accesses = It->second.get(); in markUnreachableAsLiveOnEntry() local
[all …]
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/
DLoopAccessAnalysis.cpp72 #define DEBUG_TYPE "loop-accesses"
113 /// accesses in code like the following.
332 // the accesses are safe. in groupChecks()
344 // accesses to the same underlying object. This cannot happen unless in groupChecks()
370 // and add them to the overall solution. We use the order in which accesses in groupChecks()
384 // Because DepCands is constructed by visiting accesses in the order in in groupChecks()
477 OS.indent(Depth) << "Grouped accesses:\n"; in print()
493 /// Analyses memory accesses in a loop.
513 Accesses.insert(MemAccessInfo(Ptr, false)); in addLoad()
522 Accesses.insert(MemAccessInfo(Ptr, true)); in addStore()
[all …]
DMemorySSA.cpp477 // We do have accesses that claim they're optimized, but could be optimized in checkClobberSanity()
1010 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1095 AccessList *Accesses = It->second.get(); in renameSuccessorPhis() local
1096 auto *Phi = cast<MemoryPhi>(&Accesses->front()); in renameSuccessorPhis()
1119 AccessList *Accesses = It->second.get(); in renameBlock() local
1120 for (MemoryAccess &L : *Accesses) { in renameBlock()
1136 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1141 assert(Root && "Trying to rename accesses in an unreachable block"); in renamePass()
1185 /// This handles unreachable block accesses by deleting phi nodes in
1203 AccessList *Accesses = It->second.get(); in markUnreachableAsLiveOnEntry() local
[all …]
/external/rust/crates/pin-project-lite/tests/ui/pin_project/
Dpacked.stderr13 …ructs are only aligned by one byte, and many modern architectures penalize unaligned field accesses
30 …ructs are only aligned by one byte, and many modern architectures penalize unaligned field accesses
47 …ructs are only aligned by one byte, and many modern architectures penalize unaligned field accesses
64 …ructs are only aligned by one byte, and many modern architectures penalize unaligned field accesses
81 …ructs are only aligned by one byte, and many modern architectures penalize unaligned field accesses
98 …ructs are only aligned by one byte, and many modern architectures penalize unaligned field accesses
/external/vixl/doc/aarch64/topics/
Dstate-trace.md5 execution, register contents, and memory accesses. The trace is designed to be
29 these represent the register state updates and memory accesses that occurred
46 literal separator (`'`) character. Refer to the "Memory accesses" section
139 Memory accesses
149 Accesses shown in this style are always contiguous, and with little-endian
151 access trace, particularly if the instruction performs non-contiguous accesses.
153 In the case of simple accesses, the `VALUE` is shared with register value trace:
169 (for contiguous accesses) the lowest address ends up at the bottom:
/external/llvm/test/CodeGen/SystemZ/
Dunaligned-01.ll1 ; Check that unaligned accesses are allowed in general. We check the
25 ; Check that unaligned 2-byte accesses are allowed.
36 ; Check that unaligned 4-byte accesses are allowed.
50 ; Check that unaligned 8-byte accesses are allowed.
/external/trusty/arm-trusted-firmware/lib/extensions/sys_reg_trace/aarch64/
Dsys_reg_trace.c16 * CPTR_EL3.TTA: Set to zero so that System register accesses to the in sys_reg_trace_enable_per_world()
27 * CPTR_EL3.TTA: Set to one so that System register accesses to the in sys_reg_trace_disable_per_world()
39 * CPTR_EL2.TTA: Set to zero so that Non-secure System register accesses in sys_reg_trace_init_el2_unused()
/external/angle/src/compiler/translator/msl/
DAstHelpers.h75 // Accesses a field for the given node with the given field name.
79 // Accesses a field for the given node with the given field name.
83 // Accesses a field for the given node by its field index.
87 // Accesses an element by index for the given node.
91 // Accesses an element by index for the given node if `index` is non-null.
/external/llvm/test/CodeGen/X86/
Dslow-unaligned-mem.ll1 ; Intel chips with slow unaligned memory accesses
15 ; Intel chips with fast unaligned memory accesses
27 ; AMD chips with slow unaligned memory accesses
39 ; AMD chips with fast unaligned memory accesses
50 ; Other chips with slow unaligned memory accesses
58 ; Also verify that SSE4.2 or SSE4a imply fast unaligned accesses.
/external/mesa3d/src/broadcom/common/
Dv3d_performance_counters.h62 {"TMU", "TMU-total-text-quads-access", "[TMU] Total texture cache accesses"},
160 {"TMU", "TMU-total-text-quads-access", "[TMU] Total texture cache accesses"},
185 {"TMU", "TMU-total-config-access", "[TMU] Total config accesses"},
188 {"L2T", "L2T-TMU-writes", "[L2T] TMU write accesses"},
192 {"L2T", "L2T-TMU-reads", "[L2T] TMU read accesses"},
193 {"L2T", "L2T-CLE-reads", "[L2T] CLE read accesses"},
194 {"L2T", "L2T-VCD-reads", "[L2T] VCD read accesses"},
195 {"L2T", "L2T-TMU-config-reads", "[L2T] TMU CFG read accesses"},
196 {"L2T", "L2T-SLC0-reads", "[L2T] SLC0 read accesses"},
197 {"L2T", "L2T-SLC1-reads", "[L2T] SLC1 read accesses"},
[all …]
/external/sdv/vsomeip/third_party/boost/thread/doc/
Dsynchronized_value.qbk41 … a lock, and yet access semantics are still straightforward. For simple accesses, synchronized_val…
62 [section Beyond Simple Accesses]
64 …mple accesses such as assignment and calls to member functions. However, sometimes you need to per…
78 [endsect] [/Beyond Simple Accesses]
/external/okio/okio/src/commonMain/kotlin/okio/
DForwardingFileSystem.kt50 * You can extend this to verify which files your program accesses. This is a testing file system
51 * that records accesses as they happen:
115 * Subclasses may override this to log accesses, fail on unexpected accesses, or map paths across
134 * Subclasses may override this to log accesses, fail on unexpected path accesses, or map in onPathParameter()

12345678910>>...109