| /kernel/linux/linux-6.6/drivers/gpu/drm/ |
| D | drm_gem_shmem_helper.c | 31 * This library provides helpers for GEM objects backed by shmem buffers 55 struct drm_gem_shmem_object *shmem; in __drm_gem_shmem_create() local 65 shmem = to_drm_gem_shmem_obj(obj); in __drm_gem_shmem_create() 67 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); in __drm_gem_shmem_create() 68 if (!shmem) in __drm_gem_shmem_create() 70 obj = &shmem->base; in __drm_gem_shmem_create() 78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */ in __drm_gem_shmem_create() 91 INIT_LIST_HEAD(&shmem->madv_list); in __drm_gem_shmem_create() 105 return shmem; in __drm_gem_shmem_create() 119 * This function creates a shmem GEM object. [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/ |
| D | drm_gem_shmem_helper.c | 23 * This library provides helpers for GEM objects backed by shmem buffers 41 struct drm_gem_shmem_object *shmem; in __drm_gem_shmem_create() local 50 obj = kzalloc(sizeof(*shmem), GFP_KERNEL); in __drm_gem_shmem_create() 68 shmem = to_drm_gem_shmem_obj(obj); in __drm_gem_shmem_create() 69 mutex_init(&shmem->pages_lock); in __drm_gem_shmem_create() 70 mutex_init(&shmem->vmap_lock); in __drm_gem_shmem_create() 71 INIT_LIST_HEAD(&shmem->madv_list); in __drm_gem_shmem_create() 85 return shmem; in __drm_gem_shmem_create() 99 * This function creates a shmem GEM object. 112 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object [all …]
|
| /kernel/linux/linux-6.6/include/drm/ |
| D | drm_gem_shmem_helper.h | 21 * struct drm_gem_shmem_object - GEM object backed by shmem 91 * @map_wc: map object write-combined (instead of using shmem defaults). 100 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); 102 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); 103 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); 104 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); 105 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, 107 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, 109 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); 111 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); [all …]
|
| /kernel/linux/linux-6.6/drivers/firmware/arm_scmi/ |
| D | shmem.c | 35 void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, in shmem_tx_prepare() argument 55 spin_until_cond((ioread32(&shmem->channel_status) & in shmem_tx_prepare() 58 if (!(ioread32(&shmem->channel_status) & in shmem_tx_prepare() 67 iowrite32(0x0, &shmem->channel_status); in shmem_tx_prepare() 69 &shmem->flags); in shmem_tx_prepare() 70 iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length); in shmem_tx_prepare() 71 iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header); in shmem_tx_prepare() 73 memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); in shmem_tx_prepare() 76 u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) in shmem_read_header() argument 78 return ioread32(&shmem->msg_header); in shmem_read_header() [all …]
|
| D | mailbox.c | 25 * @shmem: Transmit/Receive shared memory area 32 struct scmi_shared_mem __iomem *shmem; member 41 shmem_tx_prepare(smbox->shmem, m, smbox->cinfo); in tx_prepare() 57 if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) { in rx_callback() 62 scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL); in rx_callback() 93 * 'mboxes' and 'shmem', then determin which mailbox channel indexes are 105 num_sh = of_count_phandle_with_args(np, "shmem", NULL); in mailbox_chan_validate() 108 /* Bail out if mboxes and shmem descriptors are inconsistent */ in mailbox_chan_validate() 117 /* Bail out if provided shmem descriptors do not refer distinct areas */ in mailbox_chan_validate() 121 np_tx = of_parse_phandle(np, "shmem", 0); in mailbox_chan_validate() [all …]
|
| D | smc.c | 24 * The shmem address is split into 4K page and offset. 27 * This however limits the shmem address to 44 bit. 45 * @shmem: Transmit/Receive shared memory area 51 * @param_page: 4K page number of the shmem channel 52 * @param_offset: Offset within the 4K page of the shmem channel 58 struct scmi_shared_mem __iomem *shmem; member 59 /* Protect access to shmem area */ 73 shmem_read_header(scmi_info->shmem), NULL); in smc_msg_done_isr() 80 struct device_node *np = of_parse_phandle(of_node, "shmem", 0); in smc_chan_available() 141 np = of_parse_phandle(cdev->of_node, "shmem", 0); in smc_chan_setup() [all …]
|
| /kernel/linux/linux-5.10/drivers/firmware/arm_scmi/ |
| D | shmem.c | 32 void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, in shmem_tx_prepare() argument 41 spin_until_cond(ioread32(&shmem->channel_status) & in shmem_tx_prepare() 44 iowrite32(0x0, &shmem->channel_status); in shmem_tx_prepare() 46 &shmem->flags); in shmem_tx_prepare() 47 iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length); in shmem_tx_prepare() 48 iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header); in shmem_tx_prepare() 50 memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); in shmem_tx_prepare() 53 u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) in shmem_read_header() argument 55 return ioread32(&shmem->msg_header); in shmem_read_header() 58 void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, in shmem_fetch_response() argument [all …]
|
| D | mailbox.c | 24 * @shmem: Transmit/Receive shared memory area 30 struct scmi_shared_mem __iomem *shmem; member 39 shmem_tx_prepare(smbox->shmem, m); in tx_prepare() 46 scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem)); in rx_callback() 61 num_sh = of_count_phandle_with_args(np, "shmem", NULL); in mailbox_chan_validate() 62 /* Bail out if mboxes and shmem descriptors are inconsistent */ in mailbox_chan_validate() 72 np_tx = of_parse_phandle(np, "shmem", 0); in mailbox_chan_validate() 73 np_rx = of_parse_phandle(np, "shmem", 1); in mailbox_chan_validate() 76 dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", in mailbox_chan_validate() 94 struct device_node *shmem; in mailbox_chan_setup() local [all …]
|
| D | smc.c | 23 * @shmem: Transmit/Receive shared memory area 30 struct scmi_shared_mem __iomem *shmem; member 37 struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0); in smc_chan_available() 63 np = of_parse_phandle(cdev->of_node, "shmem", 0); in smc_chan_setup() 72 scmi_info->shmem = devm_ioremap(dev, res.start, size); in smc_chan_setup() 73 if (!scmi_info->shmem) { in smc_chan_setup() 111 shmem_tx_prepare(scmi_info->shmem, xfer); in smc_send_message() 114 scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem)); in smc_send_message() 129 shmem_fetch_response(scmi_info->shmem, xfer); in smc_fetch_response() 137 return shmem_poll_done(scmi_info->shmem, xfer); in smc_poll_done()
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/virtio/ |
| D | virtgpu_object.c | 71 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); in virtio_gpu_cleanup_object() local 73 if (shmem->pages) { in virtio_gpu_cleanup_object() 74 if (shmem->mapped) { in virtio_gpu_cleanup_object() 76 shmem->pages, DMA_TO_DEVICE, 0); in virtio_gpu_cleanup_object() 77 shmem->mapped = 0; in virtio_gpu_cleanup_object() 80 sg_free_table(shmem->pages); in virtio_gpu_cleanup_object() 81 kfree(shmem->pages); in virtio_gpu_cleanup_object() 82 shmem->pages = NULL; in virtio_gpu_cleanup_object() 126 struct virtio_gpu_object_shmem *shmem; in virtio_gpu_create_object() local 129 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); in virtio_gpu_create_object() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/panfrost/ |
| D | panfrost_gem_shrinker.c | 23 struct drm_gem_shmem_object *shmem; in panfrost_gem_shrinker_count() local 29 list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) { in panfrost_gem_shrinker_count() 30 if (drm_gem_shmem_is_purgeable(shmem)) in panfrost_gem_shrinker_count() 31 count += shmem->base.size >> PAGE_SHIFT; in panfrost_gem_shrinker_count() 41 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); in panfrost_gem_purge() local 51 if (!dma_resv_trylock(shmem->base.resv)) in panfrost_gem_purge() 58 dma_resv_unlock(shmem->base.resv); in panfrost_gem_purge() 70 struct drm_gem_shmem_object *shmem, *tmp; in panfrost_gem_shrinker_scan() local 76 list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) { in panfrost_gem_shrinker_scan() 79 if (drm_gem_shmem_is_purgeable(shmem) && in panfrost_gem_shrinker_scan() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/panfrost/ |
| D | panfrost_gem_shrinker.c | 23 struct drm_gem_shmem_object *shmem; in panfrost_gem_shrinker_count() local 29 list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) { in panfrost_gem_shrinker_count() 30 if (drm_gem_shmem_is_purgeable(shmem)) in panfrost_gem_shrinker_count() 31 count += shmem->base.size >> PAGE_SHIFT; in panfrost_gem_shrinker_count() 41 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); in panfrost_gem_purge() local 51 if (!mutex_trylock(&shmem->pages_lock)) in panfrost_gem_purge() 58 mutex_unlock(&shmem->pages_lock); in panfrost_gem_purge() 70 struct drm_gem_shmem_object *shmem, *tmp; in panfrost_gem_shrinker_scan() local 76 list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) { in panfrost_gem_shrinker_scan() 79 if (drm_gem_shmem_is_purgeable(shmem) && in panfrost_gem_shrinker_scan() [all …]
|
| /kernel/linux/linux-6.6/Documentation/devicetree/bindings/firmware/ |
| D | arm,scmi.yaml | 38 with shmem address(4KB-page, offset) as parameters 85 The effective combination in numbers of mboxes and shmem descriptors let 88 1 mbox / 1 shmem => SCMI TX over 1 mailbox bidirectional channel 89 2 mbox / 2 shmem => SCMI TX and RX over 2 mailbox bidirectional channels 90 2 mbox / 1 shmem => SCMI TX over 2 mailbox unidirectional channels 91 3 mbox / 2 shmem => SCMI TX and RX over 3 mailbox unidirectional channels 92 Any other combination of mboxes and shmem is invalid. 96 shmem: 271 shmem: 299 - shmem [all …]
|
| /kernel/linux/linux-5.10/include/drm/ |
| D | drm_gem_shmem_helper.h | 21 * struct drm_gem_shmem_object - GEM object backed by shmem 112 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); 113 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); 121 static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) in drm_gem_shmem_is_purgeable() argument 123 return (shmem->madv > 0) && in drm_gem_shmem_is_purgeable() 124 !shmem->vmap_use_count && shmem->sgt && in drm_gem_shmem_is_purgeable() 125 !shmem->base.dma_buf && !shmem->base.import_attach; in drm_gem_shmem_is_purgeable() 156 * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations 158 * This macro provides a shortcut for setting the shmem GEM operations in
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/microsoft/mana/ |
| D | shm_channel.c | 81 /* shmem reads as 0xFFFFFFFF in the reset case */ in mana_smc_poll_register() 145 u64 *shmem; in mana_smc_setup_hwc() local 185 shmem = (u64 *)ptr; in mana_smc_setup_hwc() 187 *shmem = frame_addr & PAGE_FRAME_L48_MASK; in mana_smc_setup_hwc() 193 shmem = (u64 *)ptr; in mana_smc_setup_hwc() 195 *shmem = frame_addr & PAGE_FRAME_L48_MASK; in mana_smc_setup_hwc() 201 shmem = (u64 *)ptr; in mana_smc_setup_hwc() 203 *shmem = frame_addr & PAGE_FRAME_L48_MASK; in mana_smc_setup_hwc() 209 shmem = (u64 *)ptr; in mana_smc_setup_hwc() 211 *shmem = frame_addr & PAGE_FRAME_L48_MASK; in mana_smc_setup_hwc() [all …]
|
| /kernel/linux/linux-6.6/drivers/net/arcnet/ |
| D | com90xx.c | 44 * shmem are left in the list at Stage 5, they must correspond to each 58 static int com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *); 86 static int io; /* use the insmod io= irq= shmem= options */ 88 static int shmem; variable 93 module_param(shmem, int, 0); 107 if (!io && !irq && !shmem && !*device && com90xx_skip_probe) in com90xx_probe() 131 if (shmem) in com90xx_probe() 132 shmems[numshmems++] = shmem; in com90xx_probe() 203 /* Stage 3: abandon any shmem addresses that don't have the signature in com90xx_probe() 243 * sure no "mirror" shmem areas show up - if they occur in com90xx_probe() [all …]
|
| D | arc-rimi.c | 65 * need to be passed a specific shmem address, IRQ, and node ID. 72 pr_info("Given: node %02Xh, shmem %lXh, irq %d\n", in arcrimi_probe() 78 pr_err("No autoprobe for RIM I; you must specify the shmem and irq!\n"); in arcrimi_probe() 126 unsigned long first_mirror, last_mirror, shmem; in arcrimi_found() local 146 shmem = dev->mem_start; in arcrimi_found() 159 check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 && in arcrimi_found() 160 check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1) in arcrimi_found() 163 first_mirror = shmem - mirror_size; in arcrimi_found() 168 last_mirror = shmem + mirror_size; in arcrimi_found() 194 release_mem_region(shmem, MIRROR_SIZE); in arcrimi_found() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/arcnet/ |
| D | com90xx.c | 44 * shmem are left in the list at Stage 5, they must correspond to each 58 static int com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *); 86 static int io; /* use the insmod io= irq= shmem= options */ 88 static int shmem; variable 93 module_param(shmem, int, 0); 107 if (!io && !irq && !shmem && !*device && com90xx_skip_probe) in com90xx_probe() 131 if (shmem) in com90xx_probe() 132 shmems[numshmems++] = shmem; in com90xx_probe() 203 /* Stage 3: abandon any shmem addresses that don't have the signature in com90xx_probe() 243 * sure no "mirror" shmem areas show up - if they occur in com90xx_probe() [all …]
|
| D | arc-rimi.c | 65 * need to be passed a specific shmem address, IRQ, and node ID. 72 pr_info("Given: node %02Xh, shmem %lXh, irq %d\n", in arcrimi_probe() 78 pr_err("No autoprobe for RIM I; you must specify the shmem and irq!\n"); in arcrimi_probe() 126 unsigned long first_mirror, last_mirror, shmem; in arcrimi_found() local 146 shmem = dev->mem_start; in arcrimi_found() 159 check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 && in arcrimi_found() 160 check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1) in arcrimi_found() 163 first_mirror = shmem - mirror_size; in arcrimi_found() 168 last_mirror = shmem + mirror_size; in arcrimi_found() 194 release_mem_region(shmem, MIRROR_SIZE); in arcrimi_found() [all …]
|
| /kernel/linux/linux-5.10/Documentation/devicetree/bindings/mailbox/ |
| D | mailbox.txt | 26 - shmem : List of phandle pointing to the shared memory(SHM) area between the 39 Example with shared memory(shmem): 49 cl_shmem: shmem@0 { 50 compatible = "client-shmem"; 58 shmem = <&cl_shmem>;
|
| /kernel/linux/linux-6.6/Documentation/devicetree/bindings/mailbox/ |
| D | mailbox.txt | 26 - shmem : List of phandle pointing to the shared memory(SHM) area between the 39 Example with shared memory(shmem): 49 cl_shmem: shmem@0 { 50 compatible = "client-shmem"; 58 shmem = <&cl_shmem>;
|
| /kernel/linux/linux-6.6/Documentation/devicetree/bindings/reserved-memory/ |
| D | nvidia,tegra264-bpmp-shmem.yaml | 4 $id: http://devicetree.org/schemas/reserved-memory/nvidia,tegra264-bpmp-shmem.yaml# 17 The sub-node is named shmem@<address>. 24 const: nvidia,tegra264-bpmp-shmem 41 dram_cpu_bpmp_mail: shmem@f1be0000 { 42 compatible = "nvidia,tegra264-bpmp-shmem";
|
| /kernel/linux/linux-5.10/Documentation/devicetree/bindings/firmware/ |
| D | nvidia,tegra186-bpmp.txt | 16 - shmem : List of the phandle of the TX and RX shared memory area that 80 cpu_bpmp_tx: shmem@4e000 { 81 compatible = "nvidia,tegra186-bpmp-shmem"; 87 cpu_bpmp_rx: shmem@4f000 { 88 compatible = "nvidia,tegra186-bpmp-shmem"; 98 shmem = <&cpu_bpmp_tx &cpu_bpmp_rx>;
|
| /kernel/linux/linux-5.10/Documentation/admin-guide/cgroup-v1/ |
| D | memcg_test.rst | 108 6. Shmem(tmpfs) Page Cache 111 The best way to understand shmem's page state transition is to read 112 mm/shmem.c. 114 But brief explanation of the behavior of memcg around shmem will be 117 Shmem's page (just leaf page, not direct/indirect block) can be on 119 - radix-tree of shmem's inode. 126 - A new page is added to shmem's radix-tree. 164 9.2 Shmem 167 Historically, memcg's shmem handling was poor and we saw some amount 168 of troubles here. This is because shmem is page-cache but can be [all …]
|
| /kernel/linux/linux-6.6/Documentation/admin-guide/cgroup-v1/ |
| D | memcg_test.rst | 108 6. Shmem(tmpfs) Page Cache 111 The best way to understand shmem's page state transition is to read 112 mm/shmem.c. 114 But brief explanation of the behavior of memcg around shmem will be 117 Shmem's page (just leaf page, not direct/indirect block) can be on 119 - radix-tree of shmem's inode. 126 - A new page is added to shmem's radix-tree. 155 9.2 Shmem 158 Historically, memcg's shmem handling was poor and we saw some amount 159 of troubles here. This is because shmem is page-cache but can be [all …]
|