• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <linux/acpi.h>
28 #include <drm/drmP.h>
29 #include <linux/firmware.h>
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32 #include "cgs_linux.h"
33 #include "atom.h"
34 #include "amdgpu_ucode.h"
35 
36 struct amdgpu_cgs_device {
37 	struct cgs_device base;
38 	struct amdgpu_device *adev;
39 };
40 
41 #define CGS_FUNC_ADEV							\
42 	struct amdgpu_device *adev =					\
43 		((struct amdgpu_cgs_device *)cgs_device)->adev
44 
amdgpu_cgs_alloc_gpu_mem(struct cgs_device * cgs_device,enum cgs_gpu_mem_type type,uint64_t size,uint64_t align,uint64_t min_offset,uint64_t max_offset,cgs_handle_t * handle)45 static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
46 				    enum cgs_gpu_mem_type type,
47 				    uint64_t size, uint64_t align,
48 				    uint64_t min_offset, uint64_t max_offset,
49 				    cgs_handle_t *handle)
50 {
51 	CGS_FUNC_ADEV;
52 	uint16_t flags = 0;
53 	int ret = 0;
54 	uint32_t domain = 0;
55 	struct amdgpu_bo *obj;
56 	struct ttm_placement placement;
57 	struct ttm_place place;
58 
59 	if (min_offset > max_offset) {
60 		BUG_ON(1);
61 		return -EINVAL;
62 	}
63 
64 	/* fail if the alignment is not a power of 2 */
65 	if (((align != 1) && (align & (align - 1)))
66 	    || size == 0 || align == 0)
67 		return -EINVAL;
68 
69 
70 	switch(type) {
71 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
72 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
73 		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
74 			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
75 		domain = AMDGPU_GEM_DOMAIN_VRAM;
76 		if (max_offset > adev->mc.real_vram_size)
77 			return -EINVAL;
78 		place.fpfn = min_offset >> PAGE_SHIFT;
79 		place.lpfn = max_offset >> PAGE_SHIFT;
80 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
81 			TTM_PL_FLAG_VRAM;
82 		break;
83 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
84 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
85 		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
86 			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
87 		domain = AMDGPU_GEM_DOMAIN_VRAM;
88 		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
89 			place.fpfn =
90 				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
91 			place.lpfn =
92 				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
93 			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
94 				TTM_PL_FLAG_VRAM;
95 		}
96 
97 		break;
98 	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
99 		domain = AMDGPU_GEM_DOMAIN_GTT;
100 		place.fpfn = min_offset >> PAGE_SHIFT;
101 		place.lpfn = max_offset >> PAGE_SHIFT;
102 		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
103 		break;
104 	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
105 		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
106 		domain = AMDGPU_GEM_DOMAIN_GTT;
107 		place.fpfn = min_offset >> PAGE_SHIFT;
108 		place.lpfn = max_offset >> PAGE_SHIFT;
109 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
110 			TTM_PL_FLAG_UNCACHED;
111 		break;
112 	default:
113 		return -EINVAL;
114 	}
115 
116 
117 	*handle = 0;
118 
119 	placement.placement = &place;
120 	placement.num_placement = 1;
121 	placement.busy_placement = &place;
122 	placement.num_busy_placement = 1;
123 
124 	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
125 					  true, domain, flags,
126 					  NULL, &placement, NULL,
127 					  0, &obj);
128 	if (ret) {
129 		DRM_ERROR("(%d) bo create failed\n", ret);
130 		return ret;
131 	}
132 	*handle = (cgs_handle_t)obj;
133 
134 	return ret;
135 }
136 
amdgpu_cgs_free_gpu_mem(struct cgs_device * cgs_device,cgs_handle_t handle)137 static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
138 {
139 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
140 
141 	if (obj) {
142 		int r = amdgpu_bo_reserve(obj, true);
143 		if (likely(r == 0)) {
144 			amdgpu_bo_kunmap(obj);
145 			amdgpu_bo_unpin(obj);
146 			amdgpu_bo_unreserve(obj);
147 		}
148 		amdgpu_bo_unref(&obj);
149 
150 	}
151 	return 0;
152 }
153 
amdgpu_cgs_gmap_gpu_mem(struct cgs_device * cgs_device,cgs_handle_t handle,uint64_t * mcaddr)154 static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
155 				   uint64_t *mcaddr)
156 {
157 	int r;
158 	u64 min_offset, max_offset;
159 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
160 
161 	WARN_ON_ONCE(obj->placement.num_placement > 1);
162 
163 	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
164 	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
165 
166 	r = amdgpu_bo_reserve(obj, true);
167 	if (unlikely(r != 0))
168 		return r;
169 	r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
170 				     min_offset, max_offset, mcaddr);
171 	amdgpu_bo_unreserve(obj);
172 	return r;
173 }
174 
amdgpu_cgs_gunmap_gpu_mem(struct cgs_device * cgs_device,cgs_handle_t handle)175 static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
176 {
177 	int r;
178 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
179 	r = amdgpu_bo_reserve(obj, true);
180 	if (unlikely(r != 0))
181 		return r;
182 	r = amdgpu_bo_unpin(obj);
183 	amdgpu_bo_unreserve(obj);
184 	return r;
185 }
186 
amdgpu_cgs_kmap_gpu_mem(struct cgs_device * cgs_device,cgs_handle_t handle,void ** map)187 static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
188 				   void **map)
189 {
190 	int r;
191 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
192 	r = amdgpu_bo_reserve(obj, true);
193 	if (unlikely(r != 0))
194 		return r;
195 	r = amdgpu_bo_kmap(obj, map);
196 	amdgpu_bo_unreserve(obj);
197 	return r;
198 }
199 
amdgpu_cgs_kunmap_gpu_mem(struct cgs_device * cgs_device,cgs_handle_t handle)200 static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
201 {
202 	int r;
203 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
204 	r = amdgpu_bo_reserve(obj, true);
205 	if (unlikely(r != 0))
206 		return r;
207 	amdgpu_bo_kunmap(obj);
208 	amdgpu_bo_unreserve(obj);
209 	return r;
210 }
211 
amdgpu_cgs_read_register(struct cgs_device * cgs_device,unsigned offset)212 static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
213 {
214 	CGS_FUNC_ADEV;
215 	return RREG32(offset);
216 }
217 
amdgpu_cgs_write_register(struct cgs_device * cgs_device,unsigned offset,uint32_t value)218 static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
219 				      uint32_t value)
220 {
221 	CGS_FUNC_ADEV;
222 	WREG32(offset, value);
223 }
224 
amdgpu_cgs_read_ind_register(struct cgs_device * cgs_device,enum cgs_ind_reg space,unsigned index)225 static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
226 					     enum cgs_ind_reg space,
227 					     unsigned index)
228 {
229 	CGS_FUNC_ADEV;
230 	switch (space) {
231 	case CGS_IND_REG__MMIO:
232 		return RREG32_IDX(index);
233 	case CGS_IND_REG__PCIE:
234 		return RREG32_PCIE(index);
235 	case CGS_IND_REG__SMC:
236 		return RREG32_SMC(index);
237 	case CGS_IND_REG__UVD_CTX:
238 		return RREG32_UVD_CTX(index);
239 	case CGS_IND_REG__DIDT:
240 		return RREG32_DIDT(index);
241 	case CGS_IND_REG_GC_CAC:
242 		return RREG32_GC_CAC(index);
243 	case CGS_IND_REG_SE_CAC:
244 		return RREG32_SE_CAC(index);
245 	case CGS_IND_REG__AUDIO_ENDPT:
246 		DRM_ERROR("audio endpt register access not implemented.\n");
247 		return 0;
248 	}
249 	WARN(1, "Invalid indirect register space");
250 	return 0;
251 }
252 
amdgpu_cgs_write_ind_register(struct cgs_device * cgs_device,enum cgs_ind_reg space,unsigned index,uint32_t value)253 static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
254 					  enum cgs_ind_reg space,
255 					  unsigned index, uint32_t value)
256 {
257 	CGS_FUNC_ADEV;
258 	switch (space) {
259 	case CGS_IND_REG__MMIO:
260 		return WREG32_IDX(index, value);
261 	case CGS_IND_REG__PCIE:
262 		return WREG32_PCIE(index, value);
263 	case CGS_IND_REG__SMC:
264 		return WREG32_SMC(index, value);
265 	case CGS_IND_REG__UVD_CTX:
266 		return WREG32_UVD_CTX(index, value);
267 	case CGS_IND_REG__DIDT:
268 		return WREG32_DIDT(index, value);
269 	case CGS_IND_REG_GC_CAC:
270 		return WREG32_GC_CAC(index, value);
271 	case CGS_IND_REG_SE_CAC:
272 		return WREG32_SE_CAC(index, value);
273 	case CGS_IND_REG__AUDIO_ENDPT:
274 		DRM_ERROR("audio endpt register access not implemented.\n");
275 		return;
276 	}
277 	WARN(1, "Invalid indirect register space");
278 }
279 
amdgpu_cgs_get_pci_resource(struct cgs_device * cgs_device,enum cgs_resource_type resource_type,uint64_t size,uint64_t offset,uint64_t * resource_base)280 static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
281 				       enum cgs_resource_type resource_type,
282 				       uint64_t size,
283 				       uint64_t offset,
284 				       uint64_t *resource_base)
285 {
286 	CGS_FUNC_ADEV;
287 
288 	if (resource_base == NULL)
289 		return -EINVAL;
290 
291 	switch (resource_type) {
292 	case CGS_RESOURCE_TYPE_MMIO:
293 		if (adev->rmmio_size == 0)
294 			return -ENOENT;
295 		if ((offset + size) > adev->rmmio_size)
296 			return -EINVAL;
297 		*resource_base = adev->rmmio_base;
298 		return 0;
299 	case CGS_RESOURCE_TYPE_DOORBELL:
300 		if (adev->doorbell.size == 0)
301 			return -ENOENT;
302 		if ((offset + size) > adev->doorbell.size)
303 			return -EINVAL;
304 		*resource_base = adev->doorbell.base;
305 		return 0;
306 	case CGS_RESOURCE_TYPE_FB:
307 	case CGS_RESOURCE_TYPE_IO:
308 	case CGS_RESOURCE_TYPE_ROM:
309 	default:
310 		return -EINVAL;
311 	}
312 }
313 
amdgpu_cgs_atom_get_data_table(struct cgs_device * cgs_device,unsigned table,uint16_t * size,uint8_t * frev,uint8_t * crev)314 static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
315 						  unsigned table, uint16_t *size,
316 						  uint8_t *frev, uint8_t *crev)
317 {
318 	CGS_FUNC_ADEV;
319 	uint16_t data_start;
320 
321 	if (amdgpu_atom_parse_data_header(
322 		    adev->mode_info.atom_context, table, size,
323 		    frev, crev, &data_start))
324 		return (uint8_t*)adev->mode_info.atom_context->bios +
325 			data_start;
326 
327 	return NULL;
328 }
329 
amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device * cgs_device,unsigned table,uint8_t * frev,uint8_t * crev)330 static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
331 					      uint8_t *frev, uint8_t *crev)
332 {
333 	CGS_FUNC_ADEV;
334 
335 	if (amdgpu_atom_parse_cmd_header(
336 		    adev->mode_info.atom_context, table,
337 		    frev, crev))
338 		return 0;
339 
340 	return -EINVAL;
341 }
342 
amdgpu_cgs_atom_exec_cmd_table(struct cgs_device * cgs_device,unsigned table,void * args)343 static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
344 					  void *args)
345 {
346 	CGS_FUNC_ADEV;
347 
348 	return amdgpu_atom_execute_table(
349 		adev->mode_info.atom_context, table, args);
350 }
351 
352 struct cgs_irq_params {
353 	unsigned src_id;
354 	cgs_irq_source_set_func_t set;
355 	cgs_irq_handler_func_t handler;
356 	void *private_data;
357 };
358 
cgs_set_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)359 static int cgs_set_irq_state(struct amdgpu_device *adev,
360 			     struct amdgpu_irq_src *src,
361 			     unsigned type,
362 			     enum amdgpu_interrupt_state state)
363 {
364 	struct cgs_irq_params *irq_params =
365 		(struct cgs_irq_params *)src->data;
366 	if (!irq_params)
367 		return -EINVAL;
368 	if (!irq_params->set)
369 		return -EINVAL;
370 	return irq_params->set(irq_params->private_data,
371 			       irq_params->src_id,
372 			       type,
373 			       (int)state);
374 }
375 
cgs_process_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)376 static int cgs_process_irq(struct amdgpu_device *adev,
377 			   struct amdgpu_irq_src *source,
378 			   struct amdgpu_iv_entry *entry)
379 {
380 	struct cgs_irq_params *irq_params =
381 		(struct cgs_irq_params *)source->data;
382 	if (!irq_params)
383 		return -EINVAL;
384 	if (!irq_params->handler)
385 		return -EINVAL;
386 	return irq_params->handler(irq_params->private_data,
387 				   irq_params->src_id,
388 				   entry->iv_entry);
389 }
390 
391 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
392 	.set = cgs_set_irq_state,
393 	.process = cgs_process_irq,
394 };
395 
amdgpu_cgs_add_irq_source(void * cgs_device,unsigned client_id,unsigned src_id,unsigned num_types,cgs_irq_source_set_func_t set,cgs_irq_handler_func_t handler,void * private_data)396 static int amdgpu_cgs_add_irq_source(void *cgs_device,
397 				     unsigned client_id,
398 				     unsigned src_id,
399 				     unsigned num_types,
400 				     cgs_irq_source_set_func_t set,
401 				     cgs_irq_handler_func_t handler,
402 				     void *private_data)
403 {
404 	CGS_FUNC_ADEV;
405 	int ret = 0;
406 	struct cgs_irq_params *irq_params;
407 	struct amdgpu_irq_src *source =
408 		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
409 	if (!source)
410 		return -ENOMEM;
411 	irq_params =
412 		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
413 	if (!irq_params) {
414 		kfree(source);
415 		return -ENOMEM;
416 	}
417 	source->num_types = num_types;
418 	source->funcs = &cgs_irq_funcs;
419 	irq_params->src_id = src_id;
420 	irq_params->set = set;
421 	irq_params->handler = handler;
422 	irq_params->private_data = private_data;
423 	source->data = (void *)irq_params;
424 	ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
425 	if (ret) {
426 		kfree(irq_params);
427 		kfree(source);
428 	}
429 
430 	return ret;
431 }
432 
amdgpu_cgs_irq_get(void * cgs_device,unsigned client_id,unsigned src_id,unsigned type)433 static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
434 			      unsigned src_id, unsigned type)
435 {
436 	CGS_FUNC_ADEV;
437 
438 	if (!adev->irq.client[client_id].sources)
439 		return -EINVAL;
440 
441 	return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
442 }
443 
amdgpu_cgs_irq_put(void * cgs_device,unsigned client_id,unsigned src_id,unsigned type)444 static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
445 			      unsigned src_id, unsigned type)
446 {
447 	CGS_FUNC_ADEV;
448 
449 	if (!adev->irq.client[client_id].sources)
450 		return -EINVAL;
451 
452 	return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
453 }
454 
amdgpu_cgs_set_clockgating_state(struct cgs_device * cgs_device,enum amd_ip_block_type block_type,enum amd_clockgating_state state)455 static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
456 				  enum amd_ip_block_type block_type,
457 				  enum amd_clockgating_state state)
458 {
459 	CGS_FUNC_ADEV;
460 	int i, r = -1;
461 
462 	for (i = 0; i < adev->num_ip_blocks; i++) {
463 		if (!adev->ip_blocks[i].status.valid)
464 			continue;
465 
466 		if (adev->ip_blocks[i].version->type == block_type) {
467 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
468 								(void *)adev,
469 									state);
470 			break;
471 		}
472 	}
473 	return r;
474 }
475 
amdgpu_cgs_set_powergating_state(struct cgs_device * cgs_device,enum amd_ip_block_type block_type,enum amd_powergating_state state)476 static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
477 				  enum amd_ip_block_type block_type,
478 				  enum amd_powergating_state state)
479 {
480 	CGS_FUNC_ADEV;
481 	int i, r = -1;
482 
483 	for (i = 0; i < adev->num_ip_blocks; i++) {
484 		if (!adev->ip_blocks[i].status.valid)
485 			continue;
486 
487 		if (adev->ip_blocks[i].version->type == block_type) {
488 			r = adev->ip_blocks[i].version->funcs->set_powergating_state(
489 								(void *)adev,
490 									state);
491 			break;
492 		}
493 	}
494 	return r;
495 }
496 
497 
fw_type_convert(struct cgs_device * cgs_device,uint32_t fw_type)498 static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
499 {
500 	CGS_FUNC_ADEV;
501 	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
502 
503 	switch (fw_type) {
504 	case CGS_UCODE_ID_SDMA0:
505 		result = AMDGPU_UCODE_ID_SDMA0;
506 		break;
507 	case CGS_UCODE_ID_SDMA1:
508 		result = AMDGPU_UCODE_ID_SDMA1;
509 		break;
510 	case CGS_UCODE_ID_CP_CE:
511 		result = AMDGPU_UCODE_ID_CP_CE;
512 		break;
513 	case CGS_UCODE_ID_CP_PFP:
514 		result = AMDGPU_UCODE_ID_CP_PFP;
515 		break;
516 	case CGS_UCODE_ID_CP_ME:
517 		result = AMDGPU_UCODE_ID_CP_ME;
518 		break;
519 	case CGS_UCODE_ID_CP_MEC:
520 	case CGS_UCODE_ID_CP_MEC_JT1:
521 		result = AMDGPU_UCODE_ID_CP_MEC1;
522 		break;
523 	case CGS_UCODE_ID_CP_MEC_JT2:
524 		/* for VI. JT2 should be the same as JT1, because:
525 			1, MEC2 and MEC1 use exactly same FW.
526 			2, JT2 is not pached but JT1 is.
527 		*/
528 		if (adev->asic_type >= CHIP_TOPAZ)
529 			result = AMDGPU_UCODE_ID_CP_MEC1;
530 		else
531 			result = AMDGPU_UCODE_ID_CP_MEC2;
532 		break;
533 	case CGS_UCODE_ID_RLC_G:
534 		result = AMDGPU_UCODE_ID_RLC_G;
535 		break;
536 	case CGS_UCODE_ID_STORAGE:
537 		result = AMDGPU_UCODE_ID_STORAGE;
538 		break;
539 	default:
540 		DRM_ERROR("Firmware type not supported\n");
541 	}
542 	return result;
543 }
544 
amdgpu_cgs_rel_firmware(struct cgs_device * cgs_device,enum cgs_ucode_id type)545 static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
546 {
547 	CGS_FUNC_ADEV;
548 	if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
549 		release_firmware(adev->pm.fw);
550 		adev->pm.fw = NULL;
551 		return 0;
552 	}
553 	/* cannot release other firmware because they are not created by cgs */
554 	return -EINVAL;
555 }
556 
amdgpu_get_firmware_version(struct cgs_device * cgs_device,enum cgs_ucode_id type)557 static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
558 					enum cgs_ucode_id type)
559 {
560 	CGS_FUNC_ADEV;
561 	uint16_t fw_version = 0;
562 
563 	switch (type) {
564 		case CGS_UCODE_ID_SDMA0:
565 			fw_version = adev->sdma.instance[0].fw_version;
566 			break;
567 		case CGS_UCODE_ID_SDMA1:
568 			fw_version = adev->sdma.instance[1].fw_version;
569 			break;
570 		case CGS_UCODE_ID_CP_CE:
571 			fw_version = adev->gfx.ce_fw_version;
572 			break;
573 		case CGS_UCODE_ID_CP_PFP:
574 			fw_version = adev->gfx.pfp_fw_version;
575 			break;
576 		case CGS_UCODE_ID_CP_ME:
577 			fw_version = adev->gfx.me_fw_version;
578 			break;
579 		case CGS_UCODE_ID_CP_MEC:
580 			fw_version = adev->gfx.mec_fw_version;
581 			break;
582 		case CGS_UCODE_ID_CP_MEC_JT1:
583 			fw_version = adev->gfx.mec_fw_version;
584 			break;
585 		case CGS_UCODE_ID_CP_MEC_JT2:
586 			fw_version = adev->gfx.mec_fw_version;
587 			break;
588 		case CGS_UCODE_ID_RLC_G:
589 			fw_version = adev->gfx.rlc_fw_version;
590 			break;
591 		case CGS_UCODE_ID_STORAGE:
592 			break;
593 		default:
594 			DRM_ERROR("firmware type %d do not have version\n", type);
595 			break;
596 	}
597 	return fw_version;
598 }
599 
amdgpu_cgs_enter_safe_mode(struct cgs_device * cgs_device,bool en)600 static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
601 					bool en)
602 {
603 	CGS_FUNC_ADEV;
604 
605 	if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
606 		adev->gfx.rlc.funcs->exit_safe_mode == NULL)
607 		return 0;
608 
609 	if (en)
610 		adev->gfx.rlc.funcs->enter_safe_mode(adev);
611 	else
612 		adev->gfx.rlc.funcs->exit_safe_mode(adev);
613 
614 	return 0;
615 }
616 
amdgpu_cgs_lock_grbm_idx(struct cgs_device * cgs_device,bool lock)617 static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
618 					bool lock)
619 {
620 	CGS_FUNC_ADEV;
621 
622 	if (lock)
623 		mutex_lock(&adev->grbm_idx_mutex);
624 	else
625 		mutex_unlock(&adev->grbm_idx_mutex);
626 }
627 
amdgpu_cgs_get_firmware_info(struct cgs_device * cgs_device,enum cgs_ucode_id type,struct cgs_firmware_info * info)628 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
629 					enum cgs_ucode_id type,
630 					struct cgs_firmware_info *info)
631 {
632 	CGS_FUNC_ADEV;
633 
634 	if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
635 		uint64_t gpu_addr;
636 		uint32_t data_size;
637 		const struct gfx_firmware_header_v1_0 *header;
638 		enum AMDGPU_UCODE_ID id;
639 		struct amdgpu_firmware_info *ucode;
640 
641 		id = fw_type_convert(cgs_device, type);
642 		ucode = &adev->firmware.ucode[id];
643 		if (ucode->fw == NULL)
644 			return -EINVAL;
645 
646 		gpu_addr  = ucode->mc_addr;
647 		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
648 		data_size = le32_to_cpu(header->header.ucode_size_bytes);
649 
650 		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
651 		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
652 			gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
653 			data_size = le32_to_cpu(header->jt_size) << 2;
654 		}
655 
656 		info->kptr = ucode->kaddr;
657 		info->image_size = data_size;
658 		info->mc_addr = gpu_addr;
659 		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
660 
661 		if (CGS_UCODE_ID_CP_MEC == type)
662 			info->image_size = le32_to_cpu(header->jt_offset) << 2;
663 
664 		info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
665 		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
666 	} else {
667 		char fw_name[30] = {0};
668 		int err = 0;
669 		uint32_t ucode_size;
670 		uint32_t ucode_start_address;
671 		const uint8_t *src;
672 		const struct smc_firmware_header_v1_0 *hdr;
673 		const struct common_firmware_header *header;
674 		struct amdgpu_firmware_info *ucode = NULL;
675 
676 		if (!adev->pm.fw) {
677 			switch (adev->asic_type) {
678 			case CHIP_TOPAZ:
679 				if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
680 				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
681 				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
682 					info->is_kicker = true;
683 					strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
684 				} else
685 					strcpy(fw_name, "amdgpu/topaz_smc.bin");
686 				break;
687 			case CHIP_TONGA:
688 				if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
689 				    ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
690 					info->is_kicker = true;
691 					strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
692 				} else
693 					strcpy(fw_name, "amdgpu/tonga_smc.bin");
694 				break;
695 			case CHIP_FIJI:
696 				strcpy(fw_name, "amdgpu/fiji_smc.bin");
697 				break;
698 			case CHIP_POLARIS11:
699 				if (type == CGS_UCODE_ID_SMU) {
700 					if (((adev->pdev->device == 0x67ef) &&
701 					     ((adev->pdev->revision == 0xe0) ||
702 					      (adev->pdev->revision == 0xe2) ||
703 					      (adev->pdev->revision == 0xe5))) ||
704 					    ((adev->pdev->device == 0x67ff) &&
705 					     ((adev->pdev->revision == 0xcf) ||
706 					      (adev->pdev->revision == 0xef) ||
707 					      (adev->pdev->revision == 0xff)))) {
708 						info->is_kicker = true;
709 						strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
710 					} else
711 						strcpy(fw_name, "amdgpu/polaris11_smc.bin");
712 				} else if (type == CGS_UCODE_ID_SMU_SK) {
713 					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
714 				}
715 				break;
716 			case CHIP_POLARIS10:
717 				if (type == CGS_UCODE_ID_SMU) {
718 					if (((adev->pdev->device == 0x67df) &&
719 					     ((adev->pdev->revision == 0xe0) ||
720 					      (adev->pdev->revision == 0xe3) ||
721 					      (adev->pdev->revision == 0xe4) ||
722 					      (adev->pdev->revision == 0xe5) ||
723 					      (adev->pdev->revision == 0xe7) ||
724 					      (adev->pdev->revision == 0xef))) ||
725 					    ((adev->pdev->device == 0x6fdf) &&
726 					     ((adev->pdev->revision == 0xef) ||
727 					      (adev->pdev->revision == 0xff)))) {
728 						info->is_kicker = true;
729 						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
730 					} else
731 						strcpy(fw_name, "amdgpu/polaris10_smc.bin");
732 				} else if (type == CGS_UCODE_ID_SMU_SK) {
733 					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
734 				}
735 				break;
736 			case CHIP_POLARIS12:
737 				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
738 				break;
739 			case CHIP_VEGA10:
740 				if ((adev->pdev->device == 0x687f) &&
741 					((adev->pdev->revision == 0xc0) ||
742 					(adev->pdev->revision == 0xc1) ||
743 					(adev->pdev->revision == 0xc3)))
744 					strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
745 				else
746 					strcpy(fw_name, "amdgpu/vega10_smc.bin");
747 				break;
748 			default:
749 				DRM_ERROR("SMC firmware not supported\n");
750 				return -EINVAL;
751 			}
752 
753 			err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
754 			if (err) {
755 				DRM_ERROR("Failed to request firmware\n");
756 				return err;
757 			}
758 
759 			err = amdgpu_ucode_validate(adev->pm.fw);
760 			if (err) {
761 				DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
762 				release_firmware(adev->pm.fw);
763 				adev->pm.fw = NULL;
764 				return err;
765 			}
766 
767 			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
768 				ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
769 				ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
770 				ucode->fw = adev->pm.fw;
771 				header = (const struct common_firmware_header *)ucode->fw->data;
772 				adev->firmware.fw_size +=
773 					ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
774 			}
775 		}
776 
777 		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
778 		amdgpu_ucode_print_smc_hdr(&hdr->header);
779 		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
780 		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
781 		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
782 		src = (const uint8_t *)(adev->pm.fw->data +
783 		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
784 
785 		info->version = adev->pm.fw_version;
786 		info->image_size = ucode_size;
787 		info->ucode_start_address = ucode_start_address;
788 		info->kptr = (void *)src;
789 	}
790 	return 0;
791 }
792 
amdgpu_cgs_is_virtualization_enabled(void * cgs_device)793 static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
794 {
795 	CGS_FUNC_ADEV;
796 	return amdgpu_sriov_vf(adev);
797 }
798 
amdgpu_cgs_query_system_info(struct cgs_device * cgs_device,struct cgs_system_info * sys_info)799 static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
800 					struct cgs_system_info *sys_info)
801 {
802 	CGS_FUNC_ADEV;
803 
804 	if (NULL == sys_info)
805 		return -ENODEV;
806 
807 	if (sizeof(struct cgs_system_info) != sys_info->size)
808 		return -ENODEV;
809 
810 	switch (sys_info->info_id) {
811 	case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
812 		sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
813 		break;
814 	case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
815 		sys_info->value = adev->pm.pcie_gen_mask;
816 		break;
817 	case CGS_SYSTEM_INFO_PCIE_MLW:
818 		sys_info->value = adev->pm.pcie_mlw_mask;
819 		break;
820 	case CGS_SYSTEM_INFO_PCIE_DEV:
821 		sys_info->value = adev->pdev->device;
822 		break;
823 	case CGS_SYSTEM_INFO_PCIE_REV:
824 		sys_info->value = adev->pdev->revision;
825 		break;
826 	case CGS_SYSTEM_INFO_CG_FLAGS:
827 		sys_info->value = adev->cg_flags;
828 		break;
829 	case CGS_SYSTEM_INFO_PG_FLAGS:
830 		sys_info->value = adev->pg_flags;
831 		break;
832 	case CGS_SYSTEM_INFO_GFX_CU_INFO:
833 		sys_info->value = adev->gfx.cu_info.number;
834 		break;
835 	case CGS_SYSTEM_INFO_GFX_SE_INFO:
836 		sys_info->value = adev->gfx.config.max_shader_engines;
837 		break;
838 	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
839 		sys_info->value = adev->pdev->subsystem_device;
840 		break;
841 	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
842 		sys_info->value = adev->pdev->subsystem_vendor;
843 		break;
844 	default:
845 		return -ENODEV;
846 	}
847 
848 	return 0;
849 }
850 
amdgpu_cgs_get_active_displays_info(struct cgs_device * cgs_device,struct cgs_display_info * info)851 static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
852 					  struct cgs_display_info *info)
853 {
854 	CGS_FUNC_ADEV;
855 	struct amdgpu_crtc *amdgpu_crtc;
856 	struct drm_device *ddev = adev->ddev;
857 	struct drm_crtc *crtc;
858 	uint32_t line_time_us, vblank_lines;
859 	struct cgs_mode_info *mode_info;
860 
861 	if (info == NULL)
862 		return -EINVAL;
863 
864 	mode_info = info->mode_info;
865 	if (mode_info) {
866 		/* if the displays are off, vblank time is max */
867 		mode_info->vblank_time_us = 0xffffffff;
868 		/* always set the reference clock */
869 		mode_info->ref_clock = adev->clock.spll.reference_freq;
870 	}
871 
872 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
873 		list_for_each_entry(crtc,
874 				&ddev->mode_config.crtc_list, head) {
875 			amdgpu_crtc = to_amdgpu_crtc(crtc);
876 			if (crtc->enabled) {
877 				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
878 				info->display_count++;
879 			}
880 			if (mode_info != NULL &&
881 				crtc->enabled && amdgpu_crtc->enabled &&
882 				amdgpu_crtc->hw_mode.clock) {
883 				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
884 							amdgpu_crtc->hw_mode.clock;
885 				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
886 							amdgpu_crtc->hw_mode.crtc_vdisplay +
887 							(amdgpu_crtc->v_border * 2);
888 				mode_info->vblank_time_us = vblank_lines * line_time_us;
889 				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
890 				mode_info->ref_clock = adev->clock.spll.reference_freq;
891 				mode_info = NULL;
892 			}
893 		}
894 	}
895 
896 	return 0;
897 }
898 
899 
amdgpu_cgs_notify_dpm_enabled(struct cgs_device * cgs_device,bool enabled)900 static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
901 {
902 	CGS_FUNC_ADEV;
903 
904 	adev->pm.dpm_enabled = enabled;
905 
906 	return 0;
907 }
908 
909 /** \brief evaluate acpi namespace object, handle or pathname must be valid
910  *  \param cgs_device
911  *  \param info input/output arguments for the control method
912  *  \return status
913  */
914 
915 #if defined(CONFIG_ACPI)
amdgpu_cgs_acpi_eval_object(struct cgs_device * cgs_device,struct cgs_acpi_method_info * info)916 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
917 				    struct cgs_acpi_method_info *info)
918 {
919 	CGS_FUNC_ADEV;
920 	acpi_handle handle;
921 	struct acpi_object_list input;
922 	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
923 	union acpi_object *params, *obj;
924 	uint8_t name[5] = {'\0'};
925 	struct cgs_acpi_method_argument *argument;
926 	uint32_t i, count;
927 	acpi_status status;
928 	int result;
929 
930 	handle = ACPI_HANDLE(&adev->pdev->dev);
931 	if (!handle)
932 		return -ENODEV;
933 
934 	memset(&input, 0, sizeof(struct acpi_object_list));
935 
936 	/* validate input info */
937 	if (info->size != sizeof(struct cgs_acpi_method_info))
938 		return -EINVAL;
939 
940 	input.count = info->input_count;
941 	if (info->input_count > 0) {
942 		if (info->pinput_argument == NULL)
943 			return -EINVAL;
944 		argument = info->pinput_argument;
945 		for (i = 0; i < info->input_count; i++) {
946 			if (((argument->type == ACPI_TYPE_STRING) ||
947 			     (argument->type == ACPI_TYPE_BUFFER)) &&
948 			    (argument->pointer == NULL))
949 				return -EINVAL;
950 			argument++;
951 		}
952 	}
953 
954 	if (info->output_count > 0) {
955 		if (info->poutput_argument == NULL)
956 			return -EINVAL;
957 		argument = info->poutput_argument;
958 		for (i = 0; i < info->output_count; i++) {
959 			if (((argument->type == ACPI_TYPE_STRING) ||
960 				(argument->type == ACPI_TYPE_BUFFER))
961 				&& (argument->pointer == NULL))
962 				return -EINVAL;
963 			argument++;
964 		}
965 	}
966 
967 	/* The path name passed to acpi_evaluate_object should be null terminated */
968 	if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
969 		strncpy(name, (char *)&(info->name), sizeof(uint32_t));
970 		name[4] = '\0';
971 	}
972 
973 	/* parse input parameters */
974 	if (input.count > 0) {
975 		input.pointer = params =
976 				kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
977 		if (params == NULL)
978 			return -EINVAL;
979 
980 		argument = info->pinput_argument;
981 
982 		for (i = 0; i < input.count; i++) {
983 			params->type = argument->type;
984 			switch (params->type) {
985 			case ACPI_TYPE_INTEGER:
986 				params->integer.value = argument->value;
987 				break;
988 			case ACPI_TYPE_STRING:
989 				params->string.length = argument->data_length;
990 				params->string.pointer = argument->pointer;
991 				break;
992 			case ACPI_TYPE_BUFFER:
993 				params->buffer.length = argument->data_length;
994 				params->buffer.pointer = argument->pointer;
995 				break;
996 			default:
997 				break;
998 			}
999 			params++;
1000 			argument++;
1001 		}
1002 	}
1003 
1004 	/* parse output info */
1005 	count = info->output_count;
1006 	argument = info->poutput_argument;
1007 
1008 	/* evaluate the acpi method */
1009 	status = acpi_evaluate_object(handle, name, &input, &output);
1010 
1011 	if (ACPI_FAILURE(status)) {
1012 		result = -EIO;
1013 		goto free_input;
1014 	}
1015 
1016 	/* return the output info */
1017 	obj = output.pointer;
1018 
1019 	if (count > 1) {
1020 		if ((obj->type != ACPI_TYPE_PACKAGE) ||
1021 			(obj->package.count != count)) {
1022 			result = -EIO;
1023 			goto free_obj;
1024 		}
1025 		params = obj->package.elements;
1026 	} else
1027 		params = obj;
1028 
1029 	if (params == NULL) {
1030 		result = -EIO;
1031 		goto free_obj;
1032 	}
1033 
1034 	for (i = 0; i < count; i++) {
1035 		if (argument->type != params->type) {
1036 			result = -EIO;
1037 			goto free_obj;
1038 		}
1039 		switch (params->type) {
1040 		case ACPI_TYPE_INTEGER:
1041 			argument->value = params->integer.value;
1042 			break;
1043 		case ACPI_TYPE_STRING:
1044 			if ((params->string.length != argument->data_length) ||
1045 				(params->string.pointer == NULL)) {
1046 				result = -EIO;
1047 				goto free_obj;
1048 			}
1049 			strncpy(argument->pointer,
1050 				params->string.pointer,
1051 				params->string.length);
1052 			break;
1053 		case ACPI_TYPE_BUFFER:
1054 			if (params->buffer.pointer == NULL) {
1055 				result = -EIO;
1056 				goto free_obj;
1057 			}
1058 			memcpy(argument->pointer,
1059 				params->buffer.pointer,
1060 				argument->data_length);
1061 			break;
1062 		default:
1063 			break;
1064 		}
1065 		argument++;
1066 		params++;
1067 	}
1068 
1069 	result = 0;
1070 free_obj:
1071 	kfree(obj);
1072 free_input:
1073 	kfree((void *)input.pointer);
1074 	return result;
1075 }
1076 #else
amdgpu_cgs_acpi_eval_object(struct cgs_device * cgs_device,struct cgs_acpi_method_info * info)1077 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1078 				struct cgs_acpi_method_info *info)
1079 {
1080 	return -EIO;
1081 }
1082 #endif
1083 
amdgpu_cgs_call_acpi_method(struct cgs_device * cgs_device,uint32_t acpi_method,uint32_t acpi_function,void * pinput,void * poutput,uint32_t output_count,uint32_t input_size,uint32_t output_size)1084 static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
1085 					uint32_t acpi_method,
1086 					uint32_t acpi_function,
1087 					void *pinput, void *poutput,
1088 					uint32_t output_count,
1089 					uint32_t input_size,
1090 					uint32_t output_size)
1091 {
1092 	struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
1093 	struct cgs_acpi_method_argument acpi_output = {0};
1094 	struct cgs_acpi_method_info info = {0};
1095 
1096 	acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
1097 	acpi_input[0].data_length = sizeof(uint32_t);
1098 	acpi_input[0].value = acpi_function;
1099 
1100 	acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
1101 	acpi_input[1].data_length = input_size;
1102 	acpi_input[1].pointer = pinput;
1103 
1104 	acpi_output.type = CGS_ACPI_TYPE_BUFFER;
1105 	acpi_output.data_length = output_size;
1106 	acpi_output.pointer = poutput;
1107 
1108 	info.size = sizeof(struct cgs_acpi_method_info);
1109 	info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
1110 	info.input_count = 2;
1111 	info.name = acpi_method;
1112 	info.pinput_argument = acpi_input;
1113 	info.output_count = output_count;
1114 	info.poutput_argument = &acpi_output;
1115 
1116 	return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
1117 }
1118 
1119 static const struct cgs_ops amdgpu_cgs_ops = {
1120 	.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
1121 	.free_gpu_mem = amdgpu_cgs_free_gpu_mem,
1122 	.gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
1123 	.gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
1124 	.kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
1125 	.kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
1126 	.read_register = amdgpu_cgs_read_register,
1127 	.write_register = amdgpu_cgs_write_register,
1128 	.read_ind_register = amdgpu_cgs_read_ind_register,
1129 	.write_ind_register = amdgpu_cgs_write_ind_register,
1130 	.get_pci_resource = amdgpu_cgs_get_pci_resource,
1131 	.atom_get_data_table = amdgpu_cgs_atom_get_data_table,
1132 	.atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
1133 	.atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
1134 	.get_firmware_info = amdgpu_cgs_get_firmware_info,
1135 	.rel_firmware = amdgpu_cgs_rel_firmware,
1136 	.set_powergating_state = amdgpu_cgs_set_powergating_state,
1137 	.set_clockgating_state = amdgpu_cgs_set_clockgating_state,
1138 	.get_active_displays_info = amdgpu_cgs_get_active_displays_info,
1139 	.notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
1140 	.call_acpi_method = amdgpu_cgs_call_acpi_method,
1141 	.query_system_info = amdgpu_cgs_query_system_info,
1142 	.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
1143 	.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
1144 	.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
1145 };
1146 
1147 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
1148 	.add_irq_source = amdgpu_cgs_add_irq_source,
1149 	.irq_get = amdgpu_cgs_irq_get,
1150 	.irq_put = amdgpu_cgs_irq_put
1151 };
1152 
amdgpu_cgs_create_device(struct amdgpu_device * adev)1153 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
1154 {
1155 	struct amdgpu_cgs_device *cgs_device =
1156 		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
1157 
1158 	if (!cgs_device) {
1159 		DRM_ERROR("Couldn't allocate CGS device structure\n");
1160 		return NULL;
1161 	}
1162 
1163 	cgs_device->base.ops = &amdgpu_cgs_ops;
1164 	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
1165 	cgs_device->adev = adev;
1166 
1167 	return (struct cgs_device *)cgs_device;
1168 }
1169 
amdgpu_cgs_destroy_device(struct cgs_device * cgs_device)1170 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
1171 {
1172 	kfree(cgs_device);
1173 }
1174