1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "amdgpu.h"
25 #include "amdgpu_sdma.h"
26 #include "amdgpu_ras.h"
27
28 #define AMDGPU_CSA_SDMA_SIZE 64
29 /* SDMA CSA reside in the 3rd page of CSA */
30 #define AMDGPU_CSA_SDMA_OFFSET (4096 * 2)
31
32 /*
33 * GPU SDMA IP block helpers function.
34 */
35
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring * ring)36 struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
37 {
38 struct amdgpu_device *adev = ring->adev;
39 int i;
40
41 for (i = 0; i < adev->sdma.num_instances; i++)
42 if (ring == &adev->sdma.instance[i].ring ||
43 ring == &adev->sdma.instance[i].page)
44 return &adev->sdma.instance[i];
45
46 return NULL;
47 }
48
amdgpu_sdma_get_index_from_ring(struct amdgpu_ring * ring,uint32_t * index)49 int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
50 {
51 struct amdgpu_device *adev = ring->adev;
52 int i;
53
54 for (i = 0; i < adev->sdma.num_instances; i++) {
55 if (ring == &adev->sdma.instance[i].ring ||
56 ring == &adev->sdma.instance[i].page) {
57 *index = i;
58 return 0;
59 }
60 }
61
62 return -EINVAL;
63 }
64
amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring * ring,unsigned vmid)65 uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
66 unsigned vmid)
67 {
68 struct amdgpu_device *adev = ring->adev;
69 uint64_t csa_mc_addr;
70 uint32_t index = 0;
71 int r;
72
73 /* don't enable OS preemption on SDMA under SRIOV */
74 if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
75 return 0;
76
77 r = amdgpu_sdma_get_index_from_ring(ring, &index);
78
79 if (r || index > 31)
80 csa_mc_addr = 0;
81 else
82 csa_mc_addr = amdgpu_csa_vaddr(adev) +
83 AMDGPU_CSA_SDMA_OFFSET +
84 index * AMDGPU_CSA_SDMA_SIZE;
85
86 return csa_mc_addr;
87 }
88
amdgpu_sdma_ras_late_init(struct amdgpu_device * adev,void * ras_ih_info)89 int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
90 void *ras_ih_info)
91 {
92 int r, i;
93 struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
94 struct ras_fs_if fs_info = {
95 .sysfs_name = "sdma_err_count",
96 };
97
98 if (!ih_info)
99 return -EINVAL;
100
101 if (!adev->sdma.ras_if) {
102 adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
103 if (!adev->sdma.ras_if)
104 return -ENOMEM;
105 adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
106 adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
107 adev->sdma.ras_if->sub_block_index = 0;
108 strcpy(adev->sdma.ras_if->name, "sdma");
109 }
110 fs_info.head = ih_info->head = *adev->sdma.ras_if;
111
112 r = amdgpu_ras_late_init(adev, adev->sdma.ras_if,
113 &fs_info, ih_info);
114 if (r)
115 goto free;
116
117 if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) {
118 for (i = 0; i < adev->sdma.num_instances; i++) {
119 r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
120 AMDGPU_SDMA_IRQ_INSTANCE0 + i);
121 if (r)
122 goto late_fini;
123 }
124 } else {
125 r = 0;
126 goto free;
127 }
128
129 return 0;
130
131 late_fini:
132 amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info);
133 free:
134 kfree(adev->sdma.ras_if);
135 adev->sdma.ras_if = NULL;
136 return r;
137 }
138
amdgpu_sdma_ras_fini(struct amdgpu_device * adev)139 void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
140 {
141 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
142 adev->sdma.ras_if) {
143 struct ras_common_if *ras_if = adev->sdma.ras_if;
144 struct ras_ih_if ih_info = {
145 .head = *ras_if,
146 /* the cb member will not be used by
147 * amdgpu_ras_interrupt_remove_handler, init it only
148 * to cheat the check in ras_late_fini
149 */
150 .cb = amdgpu_sdma_process_ras_data_cb,
151 };
152
153 amdgpu_ras_late_fini(adev, ras_if, &ih_info);
154 kfree(ras_if);
155 }
156 }
157
amdgpu_sdma_process_ras_data_cb(struct amdgpu_device * adev,void * err_data,struct amdgpu_iv_entry * entry)158 int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
159 void *err_data,
160 struct amdgpu_iv_entry *entry)
161 {
162 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
163 amdgpu_ras_reset_gpu(adev);
164
165 return AMDGPU_RAS_SUCCESS;
166 }
167
amdgpu_sdma_process_ecc_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)168 int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
169 struct amdgpu_irq_src *source,
170 struct amdgpu_iv_entry *entry)
171 {
172 struct ras_common_if *ras_if = adev->sdma.ras_if;
173 struct ras_dispatch_if ih_data = {
174 .entry = entry,
175 };
176
177 if (!ras_if)
178 return 0;
179
180 ih_data.head = *ras_if;
181
182 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
183 return 0;
184 }
185