Home
last modified time | relevance | path

Searched refs:memory (Results 1 – 25 of 2893) sorted by relevance

12345678910>>...116

/kernel/linux/linux-5.10/tools/testing/selftests/memory-hotplug/
Dmem-on-off-test.sh25 if ! ls $SYSFS/devices/system/memory/memory* > /dev/null 2>&1; then
26 echo $msg memory hotplug is not supported >&2
30 if ! grep -q 1 $SYSFS/devices/system/memory/memory*/removable; then
31 echo $msg no hot-pluggable memory >&2
43 for memory in $SYSFS/devices/system/memory/memory*; do
44 if grep -q 1 $memory/removable &&
45 grep -q $state $memory/state; then
46 echo ${memory##/*/memory}
63 grep -q online $SYSFS/devices/system/memory/memory$1/state
68 grep -q offline $SYSFS/devices/system/memory/memory$1/state
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/core/
Dmemory.c30 nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device, in nvkm_memory_tags_put() argument
39 kfree(memory->tags); in nvkm_memory_tags_put()
40 memory->tags = NULL; in nvkm_memory_tags_put()
48 nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device, in nvkm_memory_tags_get() argument
56 if ((tags = memory->tags)) { in nvkm_memory_tags_get()
94 *ptags = memory->tags = tags; in nvkm_memory_tags_get()
101 struct nvkm_memory *memory) in nvkm_memory_ctor() argument
103 memory->func = func; in nvkm_memory_ctor()
104 kref_init(&memory->kref); in nvkm_memory_ctor()
110 struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref); in nvkm_memory_del() local
[all …]
/kernel/linux/linux-5.10/Documentation/admin-guide/mm/
Dmemory-hotplug.rst10 This document is about memory hotplug including how-to-use and current status.
18 (1) x86_64's has special implementation for memory hotplug.
26 Purpose of memory hotplug
29 Memory Hotplug allows users to increase/decrease the amount of memory.
32 (A) For changing the amount of memory.
38 hardware which supports memory power management.
40 Linux memory hotplug is designed for both purpose.
42 Phases of memory hotplug
51 environment for hotplugged memory. Basically, this phase is necessary
55 When memory is hotplugged, the kernel recognizes new memory, makes new memory
[all …]
Dnumaperf.rst7 Some platforms may have multiple types of memory attached to a compute
8 node. These disparate memory ranges may share some characteristics, such
12 A system supports such heterogeneous memory by grouping each memory type
14 characteristics. Some memory may share the same node as a CPU, and others
15 are provided as memory only nodes. While memory only nodes do not provide
18 nodes with local memory and a memory only node for each of compute node::
29 A "memory initiator" is a node containing one or more devices such as
30 CPUs or separate memory I/O devices that can initiate memory requests.
31 A "memory target" is a node containing one or more physical address
32 ranges accessible from one or more memory initiators.
[all …]
Dconcepts.rst7 The memory management in Linux is a complex system that evolved over the
9 systems from MMU-less microcontrollers to supercomputers. The memory
21 The physical memory in a computer system is a limited resource and
22 even for systems that support memory hotplug there is a hard limit on
23 the amount of memory that can be installed. The physical memory is not
29 All this makes dealing directly with physical memory quite complex and
30 to avoid this complexity a concept of virtual memory was developed.
32 The virtual memory abstracts the details of physical memory from the
34 physical memory (demand paging) and provides a mechanism for the
37 With virtual memory, each and every memory access uses a virtual
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dmem.c22 #define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
31 struct nvkm_memory memory; member
43 nvkm_mem_target(struct nvkm_memory *memory) in nvkm_mem_target() argument
45 return nvkm_mem(memory)->target; in nvkm_mem_target()
49 nvkm_mem_page(struct nvkm_memory *memory) in nvkm_mem_page() argument
55 nvkm_mem_addr(struct nvkm_memory *memory) in nvkm_mem_addr() argument
57 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_addr()
64 nvkm_mem_size(struct nvkm_memory *memory) in nvkm_mem_size() argument
66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size()
70 nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_mem_map_dma() argument
[all …]
/kernel/linux/linux-5.10/Documentation/ABI/testing/
Dsysfs-devices-memory1 What: /sys/devices/system/memory
5 The /sys/devices/system/memory contains a snapshot of the
6 internal state of the kernel memory blocks. Files could be
9 Users: hotplug memory add/remove tools
12 What: /sys/devices/system/memory/memoryX/removable
16 The file /sys/devices/system/memory/memoryX/removable
17 indicates whether this memory block is removable or not.
19 identify removable sections of the memory before attempting
20 potentially expensive hot-remove memory operation
21 Users: hotplug memory remove tools
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dnv50.c43 #define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory)
56 nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) in nv50_instobj_wr32_slow() argument
58 struct nv50_instobj *iobj = nv50_instobj(memory); in nv50_instobj_wr32_slow()
75 nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) in nv50_instobj_rd32_slow() argument
77 struct nv50_instobj *iobj = nv50_instobj(memory); in nv50_instobj_rd32_slow()
102 nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) in nv50_instobj_wr32() argument
104 iowrite32_native(data, nv50_instobj(memory)->map + offset); in nv50_instobj_wr32()
108 nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset) in nv50_instobj_rd32() argument
110 return ioread32_native(nv50_instobj(memory)->map + offset); in nv50_instobj_rd32()
124 struct nvkm_memory *memory = &iobj->base.memory; in nv50_instobj_kmap() local
[all …]
Dbase.c34 struct nvkm_memory *memory = &iobj->memory; in nvkm_instobj_load() local
35 const u64 size = nvkm_memory_size(memory); in nvkm_instobj_load()
39 if (!(map = nvkm_kmap(memory))) { in nvkm_instobj_load()
41 nvkm_wo32(memory, i, iobj->suspend[i / 4]); in nvkm_instobj_load()
45 nvkm_done(memory); in nvkm_instobj_load()
54 struct nvkm_memory *memory = &iobj->memory; in nvkm_instobj_save() local
55 const u64 size = nvkm_memory_size(memory); in nvkm_instobj_save()
63 if (!(map = nvkm_kmap(memory))) { in nvkm_instobj_save()
65 iobj->suspend[i / 4] = nvkm_ro32(memory, i); in nvkm_instobj_save()
69 nvkm_done(memory); in nvkm_instobj_save()
[all …]
Dgk20a.c52 struct nvkm_memory memory; member
59 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
116 gk20a_instobj_target(struct nvkm_memory *memory) in gk20a_instobj_target() argument
122 gk20a_instobj_page(struct nvkm_memory *memory) in gk20a_instobj_page() argument
128 gk20a_instobj_addr(struct nvkm_memory *memory) in gk20a_instobj_addr() argument
130 return (u64)gk20a_instobj(memory)->mn->offset << 12; in gk20a_instobj_addr()
134 gk20a_instobj_size(struct nvkm_memory *memory) in gk20a_instobj_size() argument
136 return (u64)gk20a_instobj(memory)->mn->length << 12; in gk20a_instobj_size()
151 imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); in gk20a_instobj_iommu_recycle_vaddr()
174 gk20a_instobj_acquire_dma(struct nvkm_memory *memory) in gk20a_instobj_acquire_dma() argument
[all …]
Dnv04.c37 #define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory)
46 nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) in nv04_instobj_wr32() argument
48 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_wr32()
54 nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset) in nv04_instobj_rd32() argument
56 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_rd32()
68 nv04_instobj_release(struct nvkm_memory *memory) in nv04_instobj_release() argument
73 nv04_instobj_acquire(struct nvkm_memory *memory) in nv04_instobj_acquire() argument
75 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_acquire()
81 nv04_instobj_size(struct nvkm_memory *memory) in nv04_instobj_size() argument
83 return nv04_instobj(memory)->node->length; in nv04_instobj_size()
[all …]
/kernel/linux/linux-5.10/Documentation/admin-guide/cgroup-v1/
Dmemory.rst13 memory controller in this document. Do not confuse memory controller
14 used here with the memory controller that is used in hardware.
17 When we mention a cgroup (cgroupfs's directory) with memory controller,
18 we call it "memory cgroup". When you see git-log and source code, you'll
22 Benefits and Purpose of the memory controller
25 The memory controller isolates the memory behaviour of a group of tasks
27 uses of the memory controller. The memory controller can be used to
31 amount of memory.
32 b. Create a cgroup with a limited amount of memory; this can be used
34 c. Virtualization solutions can control the amount of memory they want
[all …]
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/reserved-memory/
Dreserved-memory.txt1 *** Reserved memory regions ***
3 Reserved memory is specified as a node under the /reserved-memory node.
4 The operating system shall exclude reserved memory from normal usage
6 normal use) memory regions. Such memory regions are usually designed for
9 Parameters for each memory region can be encoded into the device tree
12 /reserved-memory node
19 /reserved-memory/ child nodes
21 Each child of the reserved-memory node specifies one or more regions of
22 reserved memory. Each child node may either use a 'reg' property to
23 specify a specific range of reserved memory, or a 'size' property with
[all …]
Dxen,shared-memory.txt1 * Xen hypervisor reserved-memory binding
3 Expose one or more memory regions as reserved-memory to the guest
5 to be a shared memory area across multiple virtual machines for
8 For each of these pre-shared memory regions, a range is exposed under
9 the /reserved-memory node as a child node. Each range sub-node is named
13 compatible = "xen,shared-memory-v1"
16 the base guest physical address and size of the shared memory region
20 memory region used for the mapping in the borrower VM.
23 a string that identifies the shared memory region as specified in
/kernel/linux/linux-5.10/drivers/staging/octeon/
Dethernet-mem.c49 char *memory; in cvm_oct_free_hw_skbuff() local
52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff()
53 if (memory) { in cvm_oct_free_hw_skbuff()
55 *(struct sk_buff **)(memory - sizeof(void *)); in cvm_oct_free_hw_skbuff()
59 } while (memory); in cvm_oct_free_hw_skbuff()
79 char *memory; in cvm_oct_fill_hw_memory() local
94 memory = kmalloc(size + 256, GFP_ATOMIC); in cvm_oct_fill_hw_memory()
95 if (unlikely(!memory)) { in cvm_oct_fill_hw_memory()
100 fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL); in cvm_oct_fill_hw_memory()
101 *((char **)fpa - 1) = memory; in cvm_oct_fill_hw_memory()
[all …]
/kernel/linux/linux-5.10/Documentation/core-api/
Dmemory-hotplug.rst12 There are six types of notification defined in ``include/linux/memory.h``:
15 Generated before new memory becomes available in order to be able to
16 prepare subsystems to handle memory. The page allocator is still unable
17 to allocate from the new memory.
23 Generated when memory has successfully brought online. The callback may
24 allocate pages from the new memory.
27 Generated to begin the process of offlining memory. Allocations are no
28 longer possible from the memory but some of the memory to be offlined
29 is still in use. The callback can be used to free memory known to a
30 subsystem from the indicated memory block.
[all …]
/kernel/linux/linux-5.10/Documentation/userspace-api/media/v4l/
Ddev-mem2mem.rst9 A V4L2 memory-to-memory device can compress, decompress, transform, or
10 otherwise convert video data from one format into another format, in memory.
11 Such memory-to-memory devices set the ``V4L2_CAP_VIDEO_M2M`` or
12 ``V4L2_CAP_VIDEO_M2M_MPLANE`` capability. Examples of memory-to-memory
16 A memory-to-memory video node acts just like a normal video node, but it
17 supports both output (sending frames from memory to the hardware)
19 memory) stream I/O. An application will have to setup the stream I/O for
23 Memory-to-memory devices function as a shared resource: you can
32 One of the most common memory-to-memory device is the codec. Codecs
35 See :ref:`mpeg-controls`. More details on how to use codec memory-to-memory
/kernel/linux/linux-5.10/Documentation/vm/
Dmemory-model.rst9 Physical memory in a system may be addressed in different ways. The
10 simplest case is when the physical memory starts at address 0 and
15 different memory banks are attached to different CPUs.
17 Linux abstracts this diversity using one of the three memory models:
19 memory models it supports, what the default memory model is and
26 All the memory models track the status of physical page frames using
29 Regardless of the selected memory model, there exists one-to-one
33 Each memory model defines :c:func:`pfn_to_page` and :c:func:`page_to_pfn`
40 The simplest memory model is FLATMEM. This model is suitable for
42 memory.
[all …]
Dnuma.rst14 or more CPUs, local memory, and/or IO buses. For brevity and to
28 Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible
32 Memory access time and effective memory bandwidth varies depending on how far
33 away the cell containing the CPU or IO bus making the memory access is from the
34 cell containing the target memory. For example, access to memory by CPUs
36 bandwidths than accesses to memory on other, remote cells. NUMA platforms
41 memory bandwidth. However, to achieve scalable memory bandwidth, system and
42 application software must arrange for a large majority of the memory references
43 [cache misses] to be to "local" memory--memory on the same cell, if any--or
44 to the closest cell with memory.
[all …]
Dhmm.rst7 Provide infrastructure and helpers to integrate non-conventional memory (device
8 memory like GPU on board memory) into regular kernel path, with the cornerstone
9 of this being specialized struct page for such memory (see sections 5 to 7 of
20 related to using device specific memory allocators. In the second section, I
24 fifth section deals with how device memory is represented inside the kernel.
30 Problems of using a device specific memory allocator
33 Devices with a large amount of on board memory (several gigabytes) like GPUs
34 have historically managed their memory through dedicated driver specific APIs.
35 This creates a disconnect between memory allocated and managed by a device
36 driver and regular application memory (private anonymous, shared memory, or
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
Dram.c24 #define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
31 struct nvkm_memory memory; member
38 nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_vram_map() argument
41 struct nvkm_vram *vram = nvkm_vram(memory); in nvkm_vram_map()
43 .memory = &vram->memory, in nvkm_vram_map()
52 nvkm_vram_size(struct nvkm_memory *memory) in nvkm_vram_size() argument
54 return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT; in nvkm_vram_size()
58 nvkm_vram_addr(struct nvkm_memory *memory) in nvkm_vram_addr() argument
60 struct nvkm_vram *vram = nvkm_vram(memory); in nvkm_vram_addr()
67 nvkm_vram_page(struct nvkm_memory *memory) in nvkm_vram_page() argument
[all …]
/kernel/linux/linux-5.10/drivers/dax/
DKconfig7 tristate "DAX: direct access to differentiated memory"
18 latency...) memory via an mmap(2) capable character
20 platform memory resource that is differentiated from the
21 baseline memory pool. Mappings of a /dev/daxX.Y device impose
25 tristate "PMEM DAX: direct access to persistent memory"
29 Support raw access to persistent memory. Note that this
30 driver consumes memory ranges allocated and exported by the
36 tristate "HMEM DAX: direct access to 'specific purpose' memory"
42 memory. For example, a high bandwidth memory pool. The
44 memory from typical usage by default. This driver creates
[all …]
/kernel/linux/linux-5.10/Documentation/powerpc/
Dfirmware-assisted-dump.rst14 - Fadump uses the same firmware interfaces and memory reservation model
16 - Unlike phyp dump, FADump exports the memory dump through /proc/vmcore
21 - Unlike phyp dump, FADump allows user to release all the memory reserved
35 - Once the dump is copied out, the memory that held the dump
44 - The first kernel registers the sections of memory with the
46 These registered sections of memory are reserved by the first
50 low memory regions (boot memory) from source to destination area.
54 The term 'boot memory' means size of the low memory chunk
56 booted with restricted memory. By default, the boot memory
58 Alternatively, user can also specify boot memory size
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
Dgv100.c33 struct nvkm_memory *memory, u32 offset) in gv100_fifo_runlist_chan() argument
39 nvkm_wo32(memory, offset + 0x0, lower_32_bits(user)); in gv100_fifo_runlist_chan()
40 nvkm_wo32(memory, offset + 0x4, upper_32_bits(user)); in gv100_fifo_runlist_chan()
41 nvkm_wo32(memory, offset + 0x8, lower_32_bits(inst) | chan->base.chid); in gv100_fifo_runlist_chan()
42 nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst)); in gv100_fifo_runlist_chan()
47 struct nvkm_memory *memory, u32 offset) in gv100_fifo_runlist_cgrp() argument
49 nvkm_wo32(memory, offset + 0x0, (128 << 24) | (3 << 16) | 0x00000001); in gv100_fifo_runlist_cgrp()
50 nvkm_wo32(memory, offset + 0x4, cgrp->chan_nr); in gv100_fifo_runlist_cgrp()
51 nvkm_wo32(memory, offset + 0x8, cgrp->id); in gv100_fifo_runlist_cgrp()
52 nvkm_wo32(memory, offset + 0xc, 0x00000000); in gv100_fifo_runlist_cgrp()
/kernel/linux/linux-5.10/arch/arm64/boot/dts/renesas/
Dr8a779a0-falcon-cpu.dtsi14 memory@48000000 {
15 device_type = "memory";
20 memory@500000000 {
21 device_type = "memory";
25 memory@600000000 {
26 device_type = "memory";
30 memory@700000000 {
31 device_type = "memory";

12345678910>>...116