Home
last modified time | relevance | path

Searched full:pinned (Results 1 – 25 of 958) sorted by relevance

12345678910>>...39

/kernel/linux/linux-5.10/kernel/events/
Dhw_breakpoint.c41 /* Number of pinned cpu breakpoints in a cpu */
45 /* Number of non-pinned cpu/task breakpoints in a cpu */
62 /* Gather the number of total pinned and un-pinned bp in a cpuset */
64 unsigned int pinned; member
85 * Report the maximum number of pinned breakpoints a task
129 * Report the number of pinned/un-pinned breakpoints we have in
149 if (nr > slots->pinned) in fetch_bp_busy_slots()
150 slots->pinned = nr; in fetch_bp_busy_slots()
159 * For now, continue to consider flexible as pinned, until we can
160 * ensure no flexible event can ever be scheduled before a pinned event
[all …]
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/
Dpin_system.c119 int pinned, cleared; in pin_system_pages() local
138 pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0, in pin_system_pages()
141 if (pinned < 0) { in pin_system_pages()
143 SDMA_DBG(req, "pinned %d", pinned); in pin_system_pages()
144 return pinned; in pin_system_pages()
146 if (pinned != npages) { in pin_system_pages()
147 unpin_vector_pages(current->mm, pages, node->npages, pinned); in pin_system_pages()
148 SDMA_DBG(req, "npages %u pinned %d", npages, pinned); in pin_system_pages()
155 atomic_add(pinned, &pq->n_locked); in pin_system_pages()
156 SDMA_DBG(req, "done. pinned %d", pinned); in pin_system_pages()
[all …]
/kernel/linux/linux-6.6/drivers/infiniband/core/
Dumem.c140 * @access: IB_ACCESS_xxx flags for memory being pinned
153 int pinned, ret; in ib_umem_get() local
213 pinned = pin_user_pages_fast(cur_base, in ib_umem_get()
218 if (pinned < 0) { in ib_umem_get()
219 ret = pinned; in ib_umem_get()
223 cur_base += pinned * PAGE_SIZE; in ib_umem_get()
224 npages -= pinned; in ib_umem_get()
226 &umem->sgt_append, page_list, pinned, 0, in ib_umem_get()
227 pinned << PAGE_SHIFT, ib_dma_max_seg_size(device), in ib_umem_get()
230 unpin_user_pages_dirty_lock(page_list, pinned, 0); in ib_umem_get()
[all …]
/kernel/linux/linux-5.10/Documentation/core-api/
Dpin_user_pages.rst35 In other words, use pin_user_pages*() for DMA-pinned pages, and
90 Tracking dma-pinned pages
93 Some of the key design constraints, and solutions, for tracking dma-pinned
99 * False positives (reporting that a page is dma-pinned, when in fact it is not)
106 the upper bits in that field for a dma-pinned count. "Sort of", means that,
110 on it 1024 times, then it will appear to have a single dma-pinned count.
116 * Callers must specifically request "dma-pinned tracking of pages". In other
147 NOTE: Some pages, such as DAX pages, cannot be pinned with longterm pins. That's
195 The whole point of marking pages as "DMA-pinned" or "gup-pinned" is to be able
196 to query, "is this page DMA-pinned?" That allows code such as page_mkclean()
[all …]
/kernel/linux/linux-6.6/Documentation/core-api/
Dpin_user_pages.rst35 In other words, use pin_user_pages*() for DMA-pinned pages, and
89 Tracking dma-pinned pages
92 Some of the key design constraints, and solutions, for tracking dma-pinned
98 * False positives (reporting that a page is dma-pinned, when in fact it is not)
105 the upper bits in that field for a dma-pinned count. "Sort of", means that,
109 on it 1024 times, then it will appear to have a single dma-pinned count.
121 * Callers must specifically request "dma-pinned tracking of pages". In other
152 NOTE: Some pages, such as DAX pages, cannot be pinned with longterm pins. That's
200 The whole point of marking pages as "DMA-pinned" or "gup-pinned" is to be able
201 to query, "is this page DMA-pinned?" That allows code such as page_mkclean()
[all …]
/kernel/linux/linux-6.6/rust/kernel/init/
Dmacros.rs72 //! This macro is used to specify which fields are structurally pinned and which fields are not. It
153 //! // Now we only want to implement `Unpin` for `Bar` when every structurally pinned field is
154 //! // `Unpin`. In other words, whether `Bar` is `Unpin` only depends on structurally pinned
178 //! // access to `&mut self` inside of `drop` even if the struct was pinned. This could lead to
394 //! let pinned = unsafe { ::core::pin::Pin::new_unchecked(self) };
398 //! ::kernel::init::PinnedDrop::drop(pinned, token);
526 /// This macro first parses the struct definition such that it separates pinned and not pinned
544 // identify fields marked with `#[pin]`, these fields are the 'pinned fields'. The user
545 // wants these to be structurally pinned. The rest of the fields are the
546 // 'not pinned fields'. Additionally we collect all fields, since we need them in the right
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/
Dtest_xdp_veth.sh103 bpftool map update pinned $BPF_DIR/maps/tx_port key 0 0 0 0 value 122 0 0 0
104 bpftool map update pinned $BPF_DIR/maps/tx_port key 1 0 0 0 value 133 0 0 0
105 bpftool map update pinned $BPF_DIR/maps/tx_port key 2 0 0 0 value 111 0 0 0
106 ip link set dev veth1 xdp pinned $BPF_DIR/progs/redirect_map_0
107 ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
108 ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/
Dtest_xdp_veth.sh106 bpftool map update pinned $BPF_DIR/maps/tx_port key 0 0 0 0 value 122 0 0 0
107 bpftool map update pinned $BPF_DIR/maps/tx_port key 1 0 0 0 value 133 0 0 0
108 bpftool map update pinned $BPF_DIR/maps/tx_port key 2 0 0 0 value 111 0 0 0
109 ip link set dev veth1 xdp pinned $BPF_DIR/progs/xdp_redirect_map_0
110 ip link set dev veth2 xdp pinned $BPF_DIR/progs/xdp_redirect_map_1
111 ip link set dev veth3 xdp pinned $BPF_DIR/progs/xdp_redirect_map_2
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/
Di915_gem_userptr.c448 unsigned long pinned; in __i915_gem_userptr_get_pages_worker() local
453 pinned = 0; in __i915_gem_userptr_get_pages_worker()
466 while (pinned < npages) { in __i915_gem_userptr_get_pages_worker()
473 obj->userptr.ptr + pinned * PAGE_SIZE, in __i915_gem_userptr_get_pages_worker()
474 npages - pinned, in __i915_gem_userptr_get_pages_worker()
476 pvec + pinned, NULL, &locked); in __i915_gem_userptr_get_pages_worker()
480 pinned += ret; in __i915_gem_userptr_get_pages_worker()
492 if (pinned == npages) { in __i915_gem_userptr_get_pages_worker()
496 pinned = 0; in __i915_gem_userptr_get_pages_worker()
507 unpin_user_pages(pvec, pinned); in __i915_gem_userptr_get_pages_worker()
[all …]
/kernel/linux/linux-5.10/Documentation/powerpc/
Dpmu-ebb.rst53 existing "pinned" and "exclusive" attributes of perf_events. This means EBB
54 events will be given priority over other events, unless they are also pinned.
55 If an EBB event and a regular event are both pinned, then whichever is enabled
70 An EBB event must be created with the "pinned" and "exclusive" attributes set.
100 This behaviour occurs because the EBB event is pinned and exclusive. When the
101 EBB event is enabled it will force all other non-pinned events off the PMU. In
103 pinned on the PMU then the enable will not be successful.
/kernel/linux/linux-6.6/Documentation/powerpc/
Dpmu-ebb.rst53 existing "pinned" and "exclusive" attributes of perf_events. This means EBB
54 events will be given priority over other events, unless they are also pinned.
55 If an EBB event and a regular event are both pinned, then whichever is enabled
70 An EBB event must be created with the "pinned" and "exclusive" attributes set.
100 This behaviour occurs because the EBB event is pinned and exclusive. When the
101 EBB event is enabled it will force all other non-pinned events off the PMU. In
103 pinned on the PMU then the enable will not be successful.
/kernel/linux/linux-6.6/tools/bpf/bpftool/Documentation/
Dbpftool-map.rst47 | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* | **name** *MAP_NAME* }
49 | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* }
172 Show file names of pinned maps.
222 | **# bpftool map del pinned /sys/fs/bpf/map key 13 00 07 00**
230 processing. Note that the prog array map MUST be pinned into the BPF virtual
244 pinned /sys/fs/bpf/foo/xdp
248 pinned /sys/fs/bpf/foo/process
252 pinned /sys/fs/bpf/foo/debug
264 | **# bpftool map dump pinned /sys/fs/bpf/bar**
271 | **# bpftool map update pinned /sys/fs/bpf/bar key 0 0 0 0 value pinned /sys/fs/bpf/foo/debug**
[all …]
/kernel/linux/linux-5.10/tools/bpf/bpftool/Documentation/
Dbpftool-map.rst43 | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* | **name** *MAP_NAME* }
45 | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* }
167 Show file names of pinned maps.
217 | **# bpftool map del pinned /sys/fs/bpf/map key 13 00 07 00**
225 processing. Note that the prog array map MUST be pinned into the BPF virtual
239 pinned /sys/fs/bpf/foo/xdp
243 pinned /sys/fs/bpf/foo/process
247 pinned /sys/fs/bpf/foo/debug
259 | **# bpftool map dump pinned /sys/fs/bpf/bar**
266 | **# bpftool map update pinned /sys/fs/bpf/bar key 0 0 0 0 value pinned /sys/fs/bpf/foo/debug**
[all …]
/kernel/linux/linux-5.10/fs/btrfs/
Dinode-map.c219 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; in btrfs_return_ino() local
225 __btrfs_add_free_space(fs_info, pinned, objectid, 1, 0); in btrfs_return_ino()
238 __btrfs_add_free_space(fs_info, pinned, objectid, 1, 0); in btrfs_return_ino()
246 * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
357 * - The pinned tree is only used during the process of caching in pinned_use_bitmap()
372 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; in btrfs_init_free_ino_ctl() local
389 spin_lock_init(&pinned->tree_lock); in btrfs_init_free_ino_ctl()
390 pinned->unit = 1; in btrfs_init_free_ino_ctl()
391 pinned->start = 0; in btrfs_init_free_ino_ctl()
392 pinned->private = NULL; in btrfs_init_free_ino_ctl()
[all …]
/kernel/linux/linux-6.6/drivers/vfio/
Diova_bitmap.c21 * subset of said IOVA space that is pinned by its parent structure (struct
41 /* page offset of the first user page pinned */
44 /* number of pages pinned */
47 /* pinned pages representing the bitmap data */
60 * The bitmap object uses one base page to store all the pinned pages
63 * 2M of bitmap data is pinned at a time. If the iova_bitmap page size is
179 * Bitmap address to be pinned is calculated via pointer arithmetic in iova_bitmap_get()
201 * offset of the page where pinned pages bit 0 is located. in iova_bitmap_get()
296 * the currently pinned bitmap pages.
323 * pinned data can cover. Afterwards, that is capped to in iova_bitmap_mapped_length()
[all …]
/kernel/linux/linux-5.10/tools/bpf/bpftool/bash-completion/
Dbpftool281 file|pinned)
344 local PROG_TYPE='id pinned tag name'
345 local MAP_TYPE='id pinned name'
400 pinned)
423 pinned)
449 COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
494 pinned|pinmaps)
524 pinned)
628 pinned)
645 local MAP_TYPE='id pinned name'
[all …]
/kernel/linux/linux-5.10/include/trace/events/
Dxen.h287 TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
288 TP_ARGS(mm, pfn, level, pinned),
293 __field(bool, pinned)
298 __entry->pinned = pinned),
301 __entry->pinned ? "" : "un")
305 TP_PROTO(unsigned long pfn, unsigned level, bool pinned),
306 TP_ARGS(pfn, level, pinned),
310 __field(bool, pinned)
314 __entry->pinned = pinned),
317 __entry->pinned ? "" : "un")
/kernel/linux/linux-6.6/include/trace/events/
Dxen.h287 TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
288 TP_ARGS(mm, pfn, level, pinned),
293 __field(bool, pinned)
298 __entry->pinned = pinned),
301 __entry->pinned ? "" : "un")
305 TP_PROTO(unsigned long pfn, unsigned level, bool pinned),
306 TP_ARGS(pfn, level, pinned),
310 __field(bool, pinned)
314 __entry->pinned = pinned),
317 __entry->pinned ? "" : "un")
/kernel/linux/linux-5.10/arch/powerpc/platforms/8xx/
DKconfig169 bool "Pinned Kernel TLBs"
173 table 4 TLBs can be pinned.
182 bool "Pinned TLB for DATA"
189 bool "Pinned TLB for IMMR"
198 bool "Pinned TLB for TEXT"
/kernel/linux/linux-6.6/kernel/events/
Dhw_breakpoint.c54 /* Number of pinned CPU breakpoints in a CPU. */
56 /* Histogram of pinned task breakpoints in a CPU. */
67 /* Number of pinned CPU breakpoints globally. */
69 /* Number of pinned CPU-independent task breakpoints. */
300 * Return the maximum number of pinned breakpoints a task has in this CPU.
366 * Returns the max pinned breakpoint slots in a given
418 * Update the pinned CPU slots, in per-CPU bp_cpuinfo and in the in toggle_bp_slot()
439 * Update the pinned task slots, in per-CPU bp_cpuinfo and in the global in toggle_bp_slot()
485 /* Add this first CPU-pinned task breakpoint. */ in toggle_bp_slot()
488 /* Rebalance global task pinned histogram. */ in toggle_bp_slot()
[all …]
/kernel/linux/linux-6.6/tools/bpf/bpftool/bash-completion/
Dbpftool285 file|pinned|-B|--base-btf)
349 local PROG_TYPE='id pinned tag name'
350 local MAP_TYPE='id pinned name'
409 pinned)
434 pinned)
460 COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
505 pinned|pinmaps)
534 pinned)
638 pinned)
655 local MAP_TYPE='id pinned name'
[all …]
/kernel/linux/linux-5.10/drivers/fpga/
Ddfl-afu-dma-region.c29 * @region: dma memory region to be pinned
39 int ret, pinned; in afu_dma_pin_pages() local
51 pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages()
53 if (pinned < 0) { in afu_dma_pin_pages()
54 ret = pinned; in afu_dma_pin_pages()
56 } else if (pinned != npages) { in afu_dma_pin_pages()
61 dev_dbg(dev, "%d pages pinned\n", pinned); in afu_dma_pin_pages()
66 unpin_user_pages(region->pages, pinned); in afu_dma_pin_pages()
/kernel/linux/linux-6.6/drivers/fpga/
Ddfl-afu-dma-region.c29 * @region: dma memory region to be pinned
39 int ret, pinned; in afu_dma_pin_pages() local
51 pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages()
53 if (pinned < 0) { in afu_dma_pin_pages()
54 ret = pinned; in afu_dma_pin_pages()
56 } else if (pinned != npages) { in afu_dma_pin_pages()
61 dev_dbg(dev, "%d pages pinned\n", pinned); in afu_dma_pin_pages()
66 unpin_user_pages(region->pages, pinned); in afu_dma_pin_pages()
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/
Dintel_gt_buffer_pool.c107 if (node->pinned) { in pool_retire()
112 node->pinned = false; in pool_retire()
129 if (node->pinned) in intel_gt_buffer_pool_mark_used()
133 /* Hide this pinned object from the shrinker until retired */ in intel_gt_buffer_pool_mark_used()
135 node->pinned = true; in intel_gt_buffer_pool_mark_used()
153 node->pinned = false; in node_create()
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/
Dintel_context.h45 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
48 * Acquire a lock on the pinned status of the HW context, such that the context
59 * intel_context_is_pinned - Reports the 'pinned' status
63 * tables is pinned into memory and the GTT.
65 * Returns: true if the context is currently pinned for use by the GPU.
74 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status

12345678910>>...39