Home
last modified time | relevance | path

Searched full:smmu (Results 1 – 25 of 83) sorted by relevance

1234

/kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu/
Darm-smmu.c3 * IOMMU API for ARM architected SMMU implementations.
13 * - Non-secure access to the SMMU
18 #define pr_fmt(fmt) "arm-smmu: " fmt
44 #include "arm-smmu.h"
47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
61 …"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' f…
66 …domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
74 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) in arm_smmu_rpm_get() argument
76 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_get()
77 return pm_runtime_resume_and_get(smmu->dev); in arm_smmu_rpm_get()
[all …]
Darm-smmu-impl.c2 // Miscellaneous Arm SMMU implementation and integration quirks
5 #define pr_fmt(fmt) "arm-smmu: " fmt
10 #include "arm-smmu.h"
28 static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_read_ns() argument
33 return readl_relaxed(arm_smmu_page(smmu, page) + offset); in arm_smmu_read_ns()
36 static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_write_ns() argument
41 writel_relaxed(val, arm_smmu_page(smmu, page) + offset); in arm_smmu_write_ns()
52 struct arm_smmu_device smmu; member
56 static int cavium_cfg_probe(struct arm_smmu_device *smmu) in cavium_cfg_probe() argument
59 struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu); in cavium_cfg_probe()
[all …]
Darm-smmu-nvidia.c10 #include "arm-smmu.h"
20 * The third instance usage is through standard arm-smmu driver itself and
26 struct arm_smmu_device smmu; member
30 static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu, in nvidia_smmu_page() argument
35 nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu); in nvidia_smmu_page()
36 return nvidia_smmu->bases[inst] + (page << smmu->pgshift); in nvidia_smmu_page()
39 static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu, in nvidia_smmu_read_reg() argument
42 void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset; in nvidia_smmu_read_reg()
47 static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu, in nvidia_smmu_write_reg() argument
53 void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset; in nvidia_smmu_write_reg()
[all …]
Darm-smmu-qcom.c9 #include "arm-smmu.h"
12 struct arm_smmu_device smmu; member
17 static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) in to_qcom_smmu() argument
19 return container_of(smmu, struct qcom_smmu, smmu); in to_qcom_smmu()
33 static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) in qcom_smmu_cfg_probe() argument
35 unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1); in qcom_smmu_cfg_probe()
36 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); in qcom_smmu_cfg_probe()
50 arm_smmu_gr0_write(smmu, last_s2cr, reg); in qcom_smmu_cfg_probe()
51 reg = arm_smmu_gr0_read(smmu, last_s2cr); in qcom_smmu_cfg_probe()
54 qsmmu->bypass_cbndx = smmu->num_context_banks - 1; in qcom_smmu_cfg_probe()
[all …]
Darm-smmu.h3 * IOMMU API for ARM architected SMMU implementations.
239 /* Maximum number of context banks per SMMU */
364 struct arm_smmu_device *smmu; member
370 struct mutex init_mutex; /* Protects smmu pointer */
376 struct arm_smmu_device *smmu; member
421 u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset);
422 void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset,
424 u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset);
425 void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset,
427 int (*cfg_probe)(struct arm_smmu_device *smmu);
[all …]
DMakefile4 arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
/kernel/linux/linux-5.10/drivers/iommu/
Dtegra-smmu.c23 struct tegra_smmu *smmu; member
53 struct tegra_smmu *smmu; member
69 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, in smmu_writel() argument
72 writel(value, smmu->regs + offset); in smmu_writel()
75 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) in smmu_readl() argument
77 return readl(smmu->regs + offset); in smmu_readl()
86 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ argument
87 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
165 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) in smmu_dma_addr_valid() argument
168 return (addr & smmu->pfn_mask) == addr; in smmu_dma_addr_valid()
[all …]
DKconfig195 bool "NVIDIA Tegra SMMU Support"
201 This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra
249 tristate "ARM Ltd. System MMU (SMMU) Support"
259 the ARM SMMU architecture.
267 to the SMMU but does not provide any support via the DMA API.
274 bool "Default to disabling bypass on ARM SMMU v1 and v2"
281 will not be allowed to pass through the SMMU.
295 'arm-smmu.disable_bypass' will continue to override this
/kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu-v3/
Darm-smmu-v3.c34 #include "arm-smmu-v3.h"
39 …domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
86 struct arm_smmu_device *smmu) in arm_smmu_page1_fixup() argument
89 return smmu->page1 + offset - SZ_64K; in arm_smmu_page1_fixup()
91 return smmu->base + offset; in arm_smmu_page1_fixup()
99 static void parse_driver_options(struct arm_smmu_device *smmu) in parse_driver_options() argument
104 if (of_property_read_bool(smmu->dev->of_node, in parse_driver_options()
106 smmu->options |= arm_smmu_options[i].opt; in parse_driver_options()
107 dev_notice(smmu->dev, "option %s\n", in parse_driver_options()
190 static void queue_poll_init(struct arm_smmu_device *smmu, in queue_poll_init() argument
[all …]
Darm-smmu-v3-sva.c10 #include "arm-smmu-v3.h"
16 * Check if the CPU ASID is available on the SMMU side. If a private context
25 struct arm_smmu_device *smmu; in arm_smmu_share_asid() local
41 smmu = smmu_domain->smmu; in arm_smmu_share_asid()
44 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_share_asid()
61 arm_smmu_tlb_inv_asid(smmu, asid); in arm_smmu_share_asid()
158 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) in arm_smmu_sva_supported() argument
168 if ((smmu->features & feat_mask) != feat_mask) in arm_smmu_sva_supported()
171 if (!(smmu->pgsize_bitmap & PAGE_SIZE)) in arm_smmu_sva_supported()
182 if (smmu->oas < oas) in arm_smmu_sva_supported()
[all …]
DMakefile3 arm_smmu_v3-objs-y += arm-smmu-v3.o
4 arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/iommu/
Darm,smmu.yaml4 $id: http://devicetree.org/schemas/iommu/arm,smmu.yaml#
18 The SMMU may also raise interrupts in response to various fault
26 - description: Qcom SoCs implementing "arm,smmu-v2"
29 - qcom,msm8996-smmu-v2
30 - qcom,msm8998-smmu-v2
31 - qcom,sc7180-smmu-v2
32 - qcom,sdm845-smmu-v2
33 - const: qcom,smmu-v2
38 - qcom,sc7180-smmu-500
39 - qcom,sdm845-smmu-500
[all …]
Dnvidia,tegra30-smmu.txt1 NVIDIA Tegra 30 IOMMU H/W, SMMU (System Memory Management Unit)
4 - compatible : "nvidia,tegra30-smmu"
6 of the SMMU register blocks.
10 - nvidia,ahb : phandle to the ahb bus connected to SMMU.
13 smmu {
14 compatible = "nvidia,tegra30-smmu";
Darm,smmu-v3.yaml4 $id: http://devicetree.org/schemas/iommu/arm,smmu-v3.yaml#
23 const: arm,smmu-v3
53 Present if page table walks made by the SMMU are cache coherent with the
56 NOTE: this only applies to the SMMU itself, not masters connected
57 upstream of the SMMU.
63 description: Avoid sending CMD_PREFETCH_* commands to the SMMU.
70 doesn't support SMMU page1 register space.
85 compatible = "arm,smmu-v3";
/kernel/linux/linux-5.10/drivers/memory/tegra/
Dtegra30.c43 .smmu = {
57 .smmu = {
71 .smmu = {
85 .smmu = {
99 .smmu = {
113 .smmu = {
127 .smmu = {
141 .smmu = {
155 .smmu = {
169 .smmu = {
[all …]
Dtegra210.c19 .smmu = {
33 .smmu = {
47 .smmu = {
61 .smmu = {
75 .smmu = {
89 .smmu = {
103 .smmu = {
117 .smmu = {
131 .smmu = {
145 .smmu = {
[all …]
Dtegra114.c22 .smmu = {
36 .smmu = {
50 .smmu = {
64 .smmu = {
78 .smmu = {
92 .smmu = {
106 .smmu = {
120 .smmu = {
134 .smmu = {
148 .smmu = {
[all …]
Dtegra124.c22 .smmu = {
36 .smmu = {
50 .smmu = {
64 .smmu = {
78 .smmu = {
92 .smmu = {
106 .smmu = {
120 .smmu = {
134 .smmu = {
148 .smmu = {
[all …]
/kernel/linux/linux-5.10/arch/arm64/boot/dts/marvell/
Darmada-8040.dtsi20 <0x0 &smmu 0x480 0x20>,
21 <0x100 &smmu 0x4a0 0x20>,
22 <0x200 &smmu 0x4c0 0x20>;
36 iommus = <&smmu 0x444>;
40 iommus = <&smmu 0x445>;
44 iommus = <&smmu 0x440>;
48 iommus = <&smmu 0x441>;
52 iommus = <&smmu 0x454>;
56 iommus = <&smmu 0x450>;
60 iommus = <&smmu 0x451>;
Darmada-7040.dtsi20 <0x0 &smmu 0x480 0x20>,
21 <0x100 &smmu 0x4a0 0x20>,
22 <0x200 &smmu 0x4c0 0x20>;
27 iommus = <&smmu 0x444>;
31 iommus = <&smmu 0x445>;
35 iommus = <&smmu 0x440>;
39 iommus = <&smmu 0x441>;
/kernel/linux/linux-5.10/drivers/acpi/arm64/
Diort.c407 struct acpi_iort_smmu_v3 *smmu; in iort_get_id_mapping_index() local
419 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in iort_get_id_mapping_index()
424 if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv in iort_get_id_mapping_index()
425 && smmu->sync_gsiv) in iort_get_id_mapping_index()
428 if (smmu->id_mapping_index >= node->mapping_count) { in iort_get_id_mapping_index()
434 return smmu->id_mapping_index; in iort_get_id_mapping_index()
527 * as NC (named component) -> SMMU -> ITS. If the type is matched, in iort_node_map_platform_id()
547 * device (such as SMMU, PMCG),its iort node already cached in iort_find_dev_node()
799 struct acpi_iort_smmu_v3 *smmu; in iort_get_msi_resv_iommu() local
801 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; in iort_get_msi_resv_iommu()
[all …]
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/display/
Darm,komeda.txt15 devicetree/bindings/iommu/arm,smmu-v3.txt,
49 iommus = <&smmu 0>, <&smmu 1>, <&smmu 2>, <&smmu 3>,
50 <&smmu 4>, <&smmu 5>, <&smmu 6>, <&smmu 7>,
51 <&smmu 8>, <&smmu 9>;
/kernel/linux/linux-5.10/include/linux/
Dadreno-smmu-priv.h12 * struct adreno_smmu_priv - private interface between adreno-smmu and GPU
14 * @cookie: An opque token provided by adreno-smmu and passed
21 * The GPU driver (drm/msm) and adreno-smmu work together for controlling
22 * the GPU's SMMU instance. This is by necessity, as the GPU is directly
23 * updating the SMMU for context switches, while on the other hand we do
24 * not want to duplicate all of the initial setup logic from arm-smmu.
/kernel/linux/linux-5.10/include/soc/tegra/
Dmc.h43 struct tegra_smmu_enable smmu; member
84 void tegra_smmu_remove(struct tegra_smmu *smmu);
93 static inline void tegra_smmu_remove(struct tegra_smmu *smmu) in tegra_smmu_remove() argument
156 const struct tegra_smmu_soc *smmu; member
167 struct tegra_smmu *smmu; member
/kernel/linux/linux-5.10/drivers/iommu/arm/
DMakefile2 obj-y += arm-smmu/ arm-smmu-v3/

1234