• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include "amdgpu.h"
28 #include "amdgpu_gfx.h"
29 #include "amdgpu_rlc.h"
30 #include "amdgpu_ras.h"
31 
32 /* delay 0.1 second to enable gfx off feature */
33 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
34 
35 #define GFX_OFF_NO_DELAY 0
36 
37 /*
38  * GPU GFX IP block helpers function.
39  */
40 
amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device * adev,int mec,int pipe,int queue)41 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
42 				int pipe, int queue)
43 {
44 	int bit = 0;
45 
46 	bit += mec * adev->gfx.mec.num_pipe_per_mec
47 		* adev->gfx.mec.num_queue_per_pipe;
48 	bit += pipe * adev->gfx.mec.num_queue_per_pipe;
49 	bit += queue;
50 
51 	return bit;
52 }
53 
amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device * adev,int bit,int * mec,int * pipe,int * queue)54 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
55 				 int *mec, int *pipe, int *queue)
56 {
57 	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
58 	*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
59 		% adev->gfx.mec.num_pipe_per_mec;
60 	*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
61 	       / adev->gfx.mec.num_pipe_per_mec;
62 
63 }
64 
amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device * adev,int mec,int pipe,int queue)65 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
66 				     int mec, int pipe, int queue)
67 {
68 	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
69 			adev->gfx.mec.queue_bitmap);
70 }
71 
amdgpu_gfx_me_queue_to_bit(struct amdgpu_device * adev,int me,int pipe,int queue)72 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
73 			       int me, int pipe, int queue)
74 {
75 	int bit = 0;
76 
77 	bit += me * adev->gfx.me.num_pipe_per_me
78 		* adev->gfx.me.num_queue_per_pipe;
79 	bit += pipe * adev->gfx.me.num_queue_per_pipe;
80 	bit += queue;
81 
82 	return bit;
83 }
84 
amdgpu_gfx_bit_to_me_queue(struct amdgpu_device * adev,int bit,int * me,int * pipe,int * queue)85 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
86 				int *me, int *pipe, int *queue)
87 {
88 	*queue = bit % adev->gfx.me.num_queue_per_pipe;
89 	*pipe = (bit / adev->gfx.me.num_queue_per_pipe)
90 		% adev->gfx.me.num_pipe_per_me;
91 	*me = (bit / adev->gfx.me.num_queue_per_pipe)
92 		/ adev->gfx.me.num_pipe_per_me;
93 }
94 
amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device * adev,int me,int pipe,int queue)95 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
96 				    int me, int pipe, int queue)
97 {
98 	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
99 			adev->gfx.me.queue_bitmap);
100 }
101 
102 /**
103  * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
104  *
105  * @mask: array in which the per-shader array disable masks will be stored
106  * @max_se: number of SEs
107  * @max_sh: number of SHs
108  *
109  * The bitmask of CUs to be disabled in the shader array determined by se and
110  * sh is stored in mask[se * max_sh + sh].
111  */
amdgpu_gfx_parse_disable_cu(unsigned * mask,unsigned max_se,unsigned max_sh)112 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
113 {
114 	unsigned se, sh, cu;
115 	const char *p;
116 
117 	memset(mask, 0, sizeof(*mask) * max_se * max_sh);
118 
119 	if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
120 		return;
121 
122 	p = amdgpu_disable_cu;
123 	for (;;) {
124 		char *next;
125 		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
126 		if (ret < 3) {
127 			DRM_ERROR("amdgpu: could not parse disable_cu\n");
128 			return;
129 		}
130 
131 		if (se < max_se && sh < max_sh && cu < 16) {
132 			DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
133 			mask[se * max_sh + sh] |= 1u << cu;
134 		} else {
135 			DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
136 				  se, sh, cu);
137 		}
138 
139 		next = strchr(p, ',');
140 		if (!next)
141 			break;
142 		p = next + 1;
143 	}
144 }
145 
amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device * adev)146 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
147 {
148 	return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
149 }
150 
amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device * adev)151 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
152 {
153 	if (amdgpu_compute_multipipe != -1) {
154 		DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
155 			 amdgpu_compute_multipipe);
156 		return amdgpu_compute_multipipe == 1;
157 	}
158 
159 	if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
160 		return true;
161 
162 	/* FIXME: spreading the queues across pipes causes perf regressions
163 	 * on POLARIS11 compute workloads */
164 	if (adev->asic_type == CHIP_POLARIS11)
165 		return false;
166 
167 	return adev->gfx.mec.num_mec > 1;
168 }
169 
amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)170 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
171 						struct amdgpu_ring *ring)
172 {
173 	int queue = ring->queue;
174 	int pipe = ring->pipe;
175 
176 	/* Policy: use pipe1 queue0 as high priority graphics queue if we
177 	 * have more than one gfx pipe.
178 	 */
179 	if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
180 	    adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
181 		int me = ring->me;
182 		int bit;
183 
184 		bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
185 		if (ring == &adev->gfx.gfx_ring[bit])
186 			return true;
187 	}
188 
189 	return false;
190 }
191 
amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)192 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
193 					       struct amdgpu_ring *ring)
194 {
195 	/* Policy: use 1st queue as high priority compute queue if we
196 	 * have more than one compute queue.
197 	 */
198 	if (adev->gfx.num_compute_rings > 1 &&
199 	    ring == &adev->gfx.compute_ring[0])
200 		return true;
201 
202 	return false;
203 }
204 
amdgpu_gfx_compute_queue_acquire(struct amdgpu_device * adev)205 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
206 {
207 	int i, queue, pipe;
208 	bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
209 	int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
210 				     adev->gfx.mec.num_queue_per_pipe,
211 				     adev->gfx.num_compute_rings);
212 
213 	if (multipipe_policy) {
214 		/* policy: make queues evenly cross all pipes on MEC1 only */
215 		for (i = 0; i < max_queues_per_mec; i++) {
216 			pipe = i % adev->gfx.mec.num_pipe_per_mec;
217 			queue = (i / adev->gfx.mec.num_pipe_per_mec) %
218 				adev->gfx.mec.num_queue_per_pipe;
219 
220 			set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
221 					adev->gfx.mec.queue_bitmap);
222 		}
223 	} else {
224 		/* policy: amdgpu owns all queues in the given pipe */
225 		for (i = 0; i < max_queues_per_mec; ++i)
226 			set_bit(i, adev->gfx.mec.queue_bitmap);
227 	}
228 
229 	dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
230 }
231 
amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device * adev)232 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
233 {
234 	int i, queue, pipe;
235 	bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
236 	int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
237 					adev->gfx.me.num_queue_per_pipe;
238 
239 	if (multipipe_policy) {
240 		/* policy: amdgpu owns the first queue per pipe at this stage
241 		 * will extend to mulitple queues per pipe later */
242 		for (i = 0; i < max_queues_per_me; i++) {
243 			pipe = i % adev->gfx.me.num_pipe_per_me;
244 			queue = (i / adev->gfx.me.num_pipe_per_me) %
245 				adev->gfx.me.num_queue_per_pipe;
246 
247 			set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
248 				adev->gfx.me.queue_bitmap);
249 		}
250 	} else {
251 		for (i = 0; i < max_queues_per_me; ++i)
252 			set_bit(i, adev->gfx.me.queue_bitmap);
253 	}
254 
255 	/* update the number of active graphics rings */
256 	adev->gfx.num_gfx_rings =
257 		bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
258 }
259 
amdgpu_gfx_kiq_acquire(struct amdgpu_device * adev,struct amdgpu_ring * ring)260 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
261 				  struct amdgpu_ring *ring)
262 {
263 	int queue_bit;
264 	int mec, pipe, queue;
265 
266 	queue_bit = adev->gfx.mec.num_mec
267 		    * adev->gfx.mec.num_pipe_per_mec
268 		    * adev->gfx.mec.num_queue_per_pipe;
269 
270 	while (--queue_bit >= 0) {
271 		if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
272 			continue;
273 
274 		amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
275 
276 		/*
277 		 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
278 		 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
279 		 * only can be issued on queue 0.
280 		 */
281 		if ((mec == 1 && pipe > 1) || queue != 0)
282 			continue;
283 
284 		ring->me = mec + 1;
285 		ring->pipe = pipe;
286 		ring->queue = queue;
287 
288 		return 0;
289 	}
290 
291 	dev_err(adev->dev, "Failed to find a queue for KIQ\n");
292 	return -EINVAL;
293 }
294 
amdgpu_gfx_kiq_init_ring(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_irq_src * irq)295 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
296 			     struct amdgpu_ring *ring,
297 			     struct amdgpu_irq_src *irq)
298 {
299 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
300 	int r = 0;
301 
302 	spin_lock_init(&kiq->ring_lock);
303 
304 	ring->adev = NULL;
305 	ring->ring_obj = NULL;
306 	ring->use_doorbell = true;
307 	ring->doorbell_index = adev->doorbell_index.kiq;
308 
309 	r = amdgpu_gfx_kiq_acquire(adev, ring);
310 	if (r)
311 		return r;
312 
313 	ring->eop_gpu_addr = kiq->eop_gpu_addr;
314 	ring->no_scheduler = true;
315 	sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
316 	r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
317 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
318 	if (r)
319 		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
320 
321 	return r;
322 }
323 
amdgpu_gfx_kiq_free_ring(struct amdgpu_ring * ring)324 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
325 {
326 	amdgpu_ring_fini(ring);
327 }
328 
amdgpu_gfx_kiq_fini(struct amdgpu_device * adev)329 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
330 {
331 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
332 
333 	amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
334 }
335 
amdgpu_gfx_kiq_init(struct amdgpu_device * adev,unsigned hpd_size)336 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
337 			unsigned hpd_size)
338 {
339 	int r;
340 	u32 *hpd;
341 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
342 
343 	r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
344 				    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
345 				    &kiq->eop_gpu_addr, (void **)&hpd);
346 	if (r) {
347 		dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
348 		return r;
349 	}
350 
351 	memset(hpd, 0, hpd_size);
352 
353 	r = amdgpu_bo_reserve(kiq->eop_obj, true);
354 	if (unlikely(r != 0))
355 		dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
356 	amdgpu_bo_kunmap(kiq->eop_obj);
357 	amdgpu_bo_unreserve(kiq->eop_obj);
358 
359 	return 0;
360 }
361 
362 /* create MQD for each compute/gfx queue */
amdgpu_gfx_mqd_sw_init(struct amdgpu_device * adev,unsigned mqd_size)363 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
364 			   unsigned mqd_size)
365 {
366 	struct amdgpu_ring *ring = NULL;
367 	int r, i;
368 
369 	/* create MQD for KIQ */
370 	ring = &adev->gfx.kiq.ring;
371 	if (!adev->enable_mes_kiq && !ring->mqd_obj) {
372 		/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
373 		 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
374 		 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
375 		 * KIQ MQD no matter SRIOV or Bare-metal
376 		 */
377 		r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
378 					    AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
379 					    &ring->mqd_gpu_addr, &ring->mqd_ptr);
380 		if (r) {
381 			dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
382 			return r;
383 		}
384 
385 		/* prepare MQD backup */
386 		adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
387 		if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
388 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
389 	}
390 
391 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
392 		/* create MQD for each KGQ */
393 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
394 			ring = &adev->gfx.gfx_ring[i];
395 			if (!ring->mqd_obj) {
396 				r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
397 							    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
398 							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
399 				if (r) {
400 					dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
401 					return r;
402 				}
403 
404 				/* prepare MQD backup */
405 				adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
406 				if (!adev->gfx.me.mqd_backup[i])
407 					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
408 			}
409 		}
410 	}
411 
412 	/* create MQD for each KCQ */
413 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
414 		ring = &adev->gfx.compute_ring[i];
415 		if (!ring->mqd_obj) {
416 			r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
417 						    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
418 						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
419 			if (r) {
420 				dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
421 				return r;
422 			}
423 
424 			/* prepare MQD backup */
425 			adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
426 			if (!adev->gfx.mec.mqd_backup[i])
427 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
428 		}
429 	}
430 
431 	return 0;
432 }
433 
amdgpu_gfx_mqd_sw_fini(struct amdgpu_device * adev)434 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
435 {
436 	struct amdgpu_ring *ring = NULL;
437 	int i;
438 
439 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
440 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
441 			ring = &adev->gfx.gfx_ring[i];
442 			kfree(adev->gfx.me.mqd_backup[i]);
443 			amdgpu_bo_free_kernel(&ring->mqd_obj,
444 					      &ring->mqd_gpu_addr,
445 					      &ring->mqd_ptr);
446 		}
447 	}
448 
449 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
450 		ring = &adev->gfx.compute_ring[i];
451 		kfree(adev->gfx.mec.mqd_backup[i]);
452 		amdgpu_bo_free_kernel(&ring->mqd_obj,
453 				      &ring->mqd_gpu_addr,
454 				      &ring->mqd_ptr);
455 	}
456 
457 	ring = &adev->gfx.kiq.ring;
458 	kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
459 	amdgpu_bo_free_kernel(&ring->mqd_obj,
460 			      &ring->mqd_gpu_addr,
461 			      &ring->mqd_ptr);
462 }
463 
amdgpu_gfx_disable_kcq(struct amdgpu_device * adev)464 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
465 {
466 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
467 	struct amdgpu_ring *kiq_ring = &kiq->ring;
468 	int i, r = 0;
469 
470 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
471 		return -EINVAL;
472 
473 	spin_lock(&adev->gfx.kiq.ring_lock);
474 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
475 					adev->gfx.num_compute_rings)) {
476 		spin_unlock(&adev->gfx.kiq.ring_lock);
477 		return -ENOMEM;
478 	}
479 
480 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
481 		kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
482 					   RESET_QUEUES, 0, 0);
483 
484 	if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
485 		r = amdgpu_ring_test_helper(kiq_ring);
486 	spin_unlock(&adev->gfx.kiq.ring_lock);
487 
488 	return r;
489 }
490 
amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device * adev,int queue_bit)491 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
492 					int queue_bit)
493 {
494 	int mec, pipe, queue;
495 	int set_resource_bit = 0;
496 
497 	amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
498 
499 	set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
500 
501 	return set_resource_bit;
502 }
503 
amdgpu_gfx_enable_kcq(struct amdgpu_device * adev)504 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
505 {
506 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
507 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
508 	uint64_t queue_mask = 0;
509 	int r, i;
510 
511 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
512 		return -EINVAL;
513 
514 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
515 		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
516 			continue;
517 
518 		/* This situation may be hit in the future if a new HW
519 		 * generation exposes more than 64 queues. If so, the
520 		 * definition of queue_mask needs updating */
521 		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
522 			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
523 			break;
524 		}
525 
526 		queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
527 	}
528 
529 	DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
530 							kiq_ring->queue);
531 	spin_lock(&adev->gfx.kiq.ring_lock);
532 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
533 					adev->gfx.num_compute_rings +
534 					kiq->pmf->set_resources_size);
535 	if (r) {
536 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
537 		spin_unlock(&adev->gfx.kiq.ring_lock);
538 		return r;
539 	}
540 
541 	if (adev->enable_mes)
542 		queue_mask = ~0ULL;
543 
544 	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
545 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
546 		kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
547 
548 	r = amdgpu_ring_test_helper(kiq_ring);
549 	spin_unlock(&adev->gfx.kiq.ring_lock);
550 	if (r)
551 		DRM_ERROR("KCQ enable failed\n");
552 
553 	return r;
554 }
555 
556 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
557  *
558  * @adev: amdgpu_device pointer
559  * @bool enable true: enable gfx off feature, false: disable gfx off feature
560  *
561  * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
562  * 2. other client can send request to disable gfx off feature, the request should be honored.
563  * 3. other client can cancel their request of disable gfx off feature
564  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
565  */
566 
amdgpu_gfx_off_ctrl(struct amdgpu_device * adev,bool enable)567 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
568 {
569 	unsigned long delay = GFX_OFF_DELAY_ENABLE;
570 
571 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
572 		return;
573 
574 	mutex_lock(&adev->gfx.gfx_off_mutex);
575 
576 	if (enable) {
577 		/* If the count is already 0, it means there's an imbalance bug somewhere.
578 		 * Note that the bug may be in a different caller than the one which triggers the
579 		 * WARN_ON_ONCE.
580 		 */
581 		if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
582 			goto unlock;
583 
584 		adev->gfx.gfx_off_req_count--;
585 
586 		if (adev->gfx.gfx_off_req_count == 0 &&
587 		    !adev->gfx.gfx_off_state) {
588 			/* If going to s2idle, no need to wait */
589 			if (adev->in_s0ix) {
590 				if (!amdgpu_dpm_set_powergating_by_smu(adev,
591 						AMD_IP_BLOCK_TYPE_GFX, true))
592 					adev->gfx.gfx_off_state = true;
593 			} else {
594 				schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
595 					      delay);
596 			}
597 		}
598 	} else {
599 		if (adev->gfx.gfx_off_req_count == 0) {
600 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
601 
602 			if (adev->gfx.gfx_off_state &&
603 			    !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
604 				adev->gfx.gfx_off_state = false;
605 
606 				if (adev->gfx.funcs->init_spm_golden) {
607 					dev_dbg(adev->dev,
608 						"GFXOFF is disabled, re-init SPM golden settings\n");
609 					amdgpu_gfx_init_spm_golden(adev);
610 				}
611 			}
612 		}
613 
614 		adev->gfx.gfx_off_req_count++;
615 	}
616 
617 unlock:
618 	mutex_unlock(&adev->gfx.gfx_off_mutex);
619 }
620 
amdgpu_set_gfx_off_residency(struct amdgpu_device * adev,bool value)621 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
622 {
623 	int r = 0;
624 
625 	mutex_lock(&adev->gfx.gfx_off_mutex);
626 
627 	r = amdgpu_dpm_set_residency_gfxoff(adev, value);
628 
629 	mutex_unlock(&adev->gfx.gfx_off_mutex);
630 
631 	return r;
632 }
633 
amdgpu_get_gfx_off_residency(struct amdgpu_device * adev,u32 * value)634 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
635 {
636 	int r = 0;
637 
638 	mutex_lock(&adev->gfx.gfx_off_mutex);
639 
640 	r = amdgpu_dpm_get_residency_gfxoff(adev, value);
641 
642 	mutex_unlock(&adev->gfx.gfx_off_mutex);
643 
644 	return r;
645 }
646 
amdgpu_get_gfx_off_entrycount(struct amdgpu_device * adev,u64 * value)647 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
648 {
649 	int r = 0;
650 
651 	mutex_lock(&adev->gfx.gfx_off_mutex);
652 
653 	r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
654 
655 	mutex_unlock(&adev->gfx.gfx_off_mutex);
656 
657 	return r;
658 }
659 
amdgpu_get_gfx_off_status(struct amdgpu_device * adev,uint32_t * value)660 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
661 {
662 
663 	int r = 0;
664 
665 	mutex_lock(&adev->gfx.gfx_off_mutex);
666 
667 	r = amdgpu_dpm_get_status_gfxoff(adev, value);
668 
669 	mutex_unlock(&adev->gfx.gfx_off_mutex);
670 
671 	return r;
672 }
673 
amdgpu_gfx_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)674 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
675 {
676 	int r;
677 
678 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
679 		if (!amdgpu_persistent_edc_harvesting_supported(adev))
680 			amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
681 
682 		r = amdgpu_ras_block_late_init(adev, ras_block);
683 		if (r)
684 			return r;
685 
686 		if (adev->gfx.cp_ecc_error_irq.funcs) {
687 			r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
688 			if (r)
689 				goto late_fini;
690 		}
691 	} else {
692 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
693 	}
694 
695 	return 0;
696 late_fini:
697 	amdgpu_ras_block_late_fini(adev, ras_block);
698 	return r;
699 }
700 
amdgpu_gfx_process_ras_data_cb(struct amdgpu_device * adev,void * err_data,struct amdgpu_iv_entry * entry)701 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
702 		void *err_data,
703 		struct amdgpu_iv_entry *entry)
704 {
705 	/* TODO ue will trigger an interrupt.
706 	 *
707 	 * When “Full RAS” is enabled, the per-IP interrupt sources should
708 	 * be disabled and the driver should only look for the aggregated
709 	 * interrupt via sync flood
710 	 */
711 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
712 		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
713 		if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
714 		    adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
715 			adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
716 		amdgpu_ras_reset_gpu(adev);
717 	}
718 	return AMDGPU_RAS_SUCCESS;
719 }
720 
amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)721 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
722 				  struct amdgpu_irq_src *source,
723 				  struct amdgpu_iv_entry *entry)
724 {
725 	struct ras_common_if *ras_if = adev->gfx.ras_if;
726 	struct ras_dispatch_if ih_data = {
727 		.entry = entry,
728 	};
729 
730 	if (!ras_if)
731 		return 0;
732 
733 	ih_data.head = *ras_if;
734 
735 	DRM_ERROR("CP ECC ERROR IRQ\n");
736 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
737 	return 0;
738 }
739 
amdgpu_kiq_rreg(struct amdgpu_device * adev,uint32_t reg)740 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
741 {
742 	signed long r, cnt = 0;
743 	unsigned long flags;
744 	uint32_t seq, reg_val_offs = 0, value = 0;
745 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
746 	struct amdgpu_ring *ring = &kiq->ring;
747 
748 	if (amdgpu_device_skip_hw_access(adev))
749 		return 0;
750 
751 	if (adev->mes.ring.sched.ready)
752 		return amdgpu_mes_rreg(adev, reg);
753 
754 	BUG_ON(!ring->funcs->emit_rreg);
755 
756 	spin_lock_irqsave(&kiq->ring_lock, flags);
757 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
758 		pr_err("critical bug! too many kiq readers\n");
759 		goto failed_unlock;
760 	}
761 	amdgpu_ring_alloc(ring, 32);
762 	amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
763 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
764 	if (r)
765 		goto failed_undo;
766 
767 	amdgpu_ring_commit(ring);
768 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
769 
770 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
771 
772 	/* don't wait anymore for gpu reset case because this way may
773 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
774 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
775 	 * never return if we keep waiting in virt_kiq_rreg, which cause
776 	 * gpu_recover() hang there.
777 	 *
778 	 * also don't wait anymore for IRQ context
779 	 * */
780 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
781 		goto failed_kiq_read;
782 
783 	might_sleep();
784 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
785 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
786 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
787 	}
788 
789 	if (cnt > MAX_KIQ_REG_TRY)
790 		goto failed_kiq_read;
791 
792 	mb();
793 	value = adev->wb.wb[reg_val_offs];
794 	amdgpu_device_wb_free(adev, reg_val_offs);
795 	return value;
796 
797 failed_undo:
798 	amdgpu_ring_undo(ring);
799 failed_unlock:
800 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
801 failed_kiq_read:
802 	if (reg_val_offs)
803 		amdgpu_device_wb_free(adev, reg_val_offs);
804 	dev_err(adev->dev, "failed to read reg:%x\n", reg);
805 	return ~0;
806 }
807 
amdgpu_kiq_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)808 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
809 {
810 	signed long r, cnt = 0;
811 	unsigned long flags;
812 	uint32_t seq;
813 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
814 	struct amdgpu_ring *ring = &kiq->ring;
815 
816 	BUG_ON(!ring->funcs->emit_wreg);
817 
818 	if (amdgpu_device_skip_hw_access(adev))
819 		return;
820 
821 	if (adev->mes.ring.sched.ready) {
822 		amdgpu_mes_wreg(adev, reg, v);
823 		return;
824 	}
825 
826 	spin_lock_irqsave(&kiq->ring_lock, flags);
827 	amdgpu_ring_alloc(ring, 32);
828 	amdgpu_ring_emit_wreg(ring, reg, v);
829 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
830 	if (r)
831 		goto failed_undo;
832 
833 	amdgpu_ring_commit(ring);
834 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
835 
836 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
837 
838 	/* don't wait anymore for gpu reset case because this way may
839 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
840 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
841 	 * never return if we keep waiting in virt_kiq_rreg, which cause
842 	 * gpu_recover() hang there.
843 	 *
844 	 * also don't wait anymore for IRQ context
845 	 * */
846 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
847 		goto failed_kiq_write;
848 
849 	might_sleep();
850 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
851 
852 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
853 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
854 	}
855 
856 	if (cnt > MAX_KIQ_REG_TRY)
857 		goto failed_kiq_write;
858 
859 	return;
860 
861 failed_undo:
862 	amdgpu_ring_undo(ring);
863 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
864 failed_kiq_write:
865 	dev_err(adev->dev, "failed to write reg:%x\n", reg);
866 }
867 
amdgpu_gfx_get_num_kcq(struct amdgpu_device * adev)868 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
869 {
870 	if (amdgpu_num_kcq == -1) {
871 		return 8;
872 	} else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
873 		dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
874 		return 8;
875 	}
876 	return amdgpu_num_kcq;
877 }
878 
amdgpu_gfx_cp_init_microcode(struct amdgpu_device * adev,uint32_t ucode_id)879 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
880 				  uint32_t ucode_id)
881 {
882 	const struct gfx_firmware_header_v1_0 *cp_hdr;
883 	const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
884 	struct amdgpu_firmware_info *info = NULL;
885 	const struct firmware *ucode_fw;
886 	unsigned int fw_size;
887 
888 	switch (ucode_id) {
889 	case AMDGPU_UCODE_ID_CP_PFP:
890 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
891 			adev->gfx.pfp_fw->data;
892 		adev->gfx.pfp_fw_version =
893 			le32_to_cpu(cp_hdr->header.ucode_version);
894 		adev->gfx.pfp_feature_version =
895 			le32_to_cpu(cp_hdr->ucode_feature_version);
896 		ucode_fw = adev->gfx.pfp_fw;
897 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
898 		break;
899 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
900 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
901 			adev->gfx.pfp_fw->data;
902 		adev->gfx.pfp_fw_version =
903 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
904 		adev->gfx.pfp_feature_version =
905 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
906 		ucode_fw = adev->gfx.pfp_fw;
907 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
908 		break;
909 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
910 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
911 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
912 			adev->gfx.pfp_fw->data;
913 		ucode_fw = adev->gfx.pfp_fw;
914 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
915 		break;
916 	case AMDGPU_UCODE_ID_CP_ME:
917 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
918 			adev->gfx.me_fw->data;
919 		adev->gfx.me_fw_version =
920 			le32_to_cpu(cp_hdr->header.ucode_version);
921 		adev->gfx.me_feature_version =
922 			le32_to_cpu(cp_hdr->ucode_feature_version);
923 		ucode_fw = adev->gfx.me_fw;
924 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
925 		break;
926 	case AMDGPU_UCODE_ID_CP_RS64_ME:
927 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
928 			adev->gfx.me_fw->data;
929 		adev->gfx.me_fw_version =
930 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
931 		adev->gfx.me_feature_version =
932 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
933 		ucode_fw = adev->gfx.me_fw;
934 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
935 		break;
936 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
937 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
938 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
939 			adev->gfx.me_fw->data;
940 		ucode_fw = adev->gfx.me_fw;
941 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
942 		break;
943 	case AMDGPU_UCODE_ID_CP_CE:
944 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
945 			adev->gfx.ce_fw->data;
946 		adev->gfx.ce_fw_version =
947 			le32_to_cpu(cp_hdr->header.ucode_version);
948 		adev->gfx.ce_feature_version =
949 			le32_to_cpu(cp_hdr->ucode_feature_version);
950 		ucode_fw = adev->gfx.ce_fw;
951 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
952 		break;
953 	case AMDGPU_UCODE_ID_CP_MEC1:
954 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
955 			adev->gfx.mec_fw->data;
956 		adev->gfx.mec_fw_version =
957 			le32_to_cpu(cp_hdr->header.ucode_version);
958 		adev->gfx.mec_feature_version =
959 			le32_to_cpu(cp_hdr->ucode_feature_version);
960 		ucode_fw = adev->gfx.mec_fw;
961 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
962 			  le32_to_cpu(cp_hdr->jt_size) * 4;
963 		break;
964 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
965 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
966 			adev->gfx.mec_fw->data;
967 		ucode_fw = adev->gfx.mec_fw;
968 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
969 		break;
970 	case AMDGPU_UCODE_ID_CP_MEC2:
971 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
972 			adev->gfx.mec2_fw->data;
973 		adev->gfx.mec2_fw_version =
974 			le32_to_cpu(cp_hdr->header.ucode_version);
975 		adev->gfx.mec2_feature_version =
976 			le32_to_cpu(cp_hdr->ucode_feature_version);
977 		ucode_fw = adev->gfx.mec2_fw;
978 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
979 			  le32_to_cpu(cp_hdr->jt_size) * 4;
980 		break;
981 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
982 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
983 			adev->gfx.mec2_fw->data;
984 		ucode_fw = adev->gfx.mec2_fw;
985 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
986 		break;
987 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
988 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
989 			adev->gfx.mec_fw->data;
990 		adev->gfx.mec_fw_version =
991 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
992 		adev->gfx.mec_feature_version =
993 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
994 		ucode_fw = adev->gfx.mec_fw;
995 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
996 		break;
997 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
998 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
999 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1000 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1001 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1002 			adev->gfx.mec_fw->data;
1003 		ucode_fw = adev->gfx.mec_fw;
1004 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1005 		break;
1006 	default:
1007 		break;
1008 	}
1009 
1010 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1011 		info = &adev->firmware.ucode[ucode_id];
1012 		info->ucode_id = ucode_id;
1013 		info->fw = ucode_fw;
1014 		adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1015 	}
1016 }
1017