1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_blend.h>
29 #include <drm/drm_gem_atomic_helper.h>
30 #include <drm/drm_plane_helper.h>
31 #include <drm/drm_fourcc.h>
32
33 #include "amdgpu.h"
34 #include "dal_asic_id.h"
35 #include "amdgpu_display.h"
36 #include "amdgpu_dm_trace.h"
37 #include "amdgpu_dm_plane.h"
38 #include "gc/gc_11_0_0_offset.h"
39 #include "gc/gc_11_0_0_sh_mask.h"
40
41 /*
42 * TODO: these are currently initialized to rgb formats only.
43 * For future use cases we should either initialize them dynamically based on
44 * plane capabilities, or initialize this array to all formats, so internal drm
45 * check will succeed, and let DC implement proper check
46 */
47 static const uint32_t rgb_formats[] = {
48 DRM_FORMAT_XRGB8888,
49 DRM_FORMAT_ARGB8888,
50 DRM_FORMAT_RGBA8888,
51 DRM_FORMAT_XRGB2101010,
52 DRM_FORMAT_XBGR2101010,
53 DRM_FORMAT_ARGB2101010,
54 DRM_FORMAT_ABGR2101010,
55 DRM_FORMAT_XRGB16161616,
56 DRM_FORMAT_XBGR16161616,
57 DRM_FORMAT_ARGB16161616,
58 DRM_FORMAT_ABGR16161616,
59 DRM_FORMAT_XBGR8888,
60 DRM_FORMAT_ABGR8888,
61 DRM_FORMAT_RGB565,
62 };
63
64 static const uint32_t overlay_formats[] = {
65 DRM_FORMAT_XRGB8888,
66 DRM_FORMAT_ARGB8888,
67 DRM_FORMAT_RGBA8888,
68 DRM_FORMAT_XBGR8888,
69 DRM_FORMAT_ABGR8888,
70 DRM_FORMAT_RGB565
71 };
72
73 static const u32 cursor_formats[] = {
74 DRM_FORMAT_ARGB8888
75 };
76
77 enum dm_micro_swizzle {
78 MICRO_SWIZZLE_Z = 0,
79 MICRO_SWIZZLE_S = 1,
80 MICRO_SWIZZLE_D = 2,
81 MICRO_SWIZZLE_R = 3
82 };
83
amd_get_format_info(const struct drm_mode_fb_cmd2 * cmd)84 const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
85 {
86 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
87 }
88
fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * pre_multiplied_alpha,bool * global_alpha,int * global_alpha_value)89 void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
90 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
91 bool *global_alpha, int *global_alpha_value)
92 {
93 *per_pixel_alpha = false;
94 *pre_multiplied_alpha = true;
95 *global_alpha = false;
96 *global_alpha_value = 0xff;
97
98 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
99 return;
100
101 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
102 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
103 static const uint32_t alpha_formats[] = {
104 DRM_FORMAT_ARGB8888,
105 DRM_FORMAT_RGBA8888,
106 DRM_FORMAT_ABGR8888,
107 };
108 uint32_t format = plane_state->fb->format->format;
109 unsigned int i;
110
111 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
112 if (format == alpha_formats[i]) {
113 *per_pixel_alpha = true;
114 break;
115 }
116 }
117
118 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
119 *pre_multiplied_alpha = false;
120 }
121
122 if (plane_state->alpha < 0xffff) {
123 *global_alpha = true;
124 *global_alpha_value = plane_state->alpha >> 8;
125 }
126 }
127
add_modifier(uint64_t ** mods,uint64_t * size,uint64_t * cap,uint64_t mod)128 static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
129 {
130 if (!*mods)
131 return;
132
133 if (*cap - *size < 1) {
134 uint64_t new_cap = *cap * 2;
135 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
136
137 if (!new_mods) {
138 kfree(*mods);
139 *mods = NULL;
140 return;
141 }
142
143 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
144 kfree(*mods);
145 *mods = new_mods;
146 *cap = new_cap;
147 }
148
149 (*mods)[*size] = mod;
150 *size += 1;
151 }
152
modifier_has_dcc(uint64_t modifier)153 static bool modifier_has_dcc(uint64_t modifier)
154 {
155 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
156 }
157
modifier_gfx9_swizzle_mode(uint64_t modifier)158 static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
159 {
160 if (modifier == DRM_FORMAT_MOD_LINEAR)
161 return 0;
162
163 return AMD_FMT_MOD_GET(TILE, modifier);
164 }
165
fill_gfx8_tiling_info_from_flags(union dc_tiling_info * tiling_info,uint64_t tiling_flags)166 static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
167 uint64_t tiling_flags)
168 {
169 /* Fill GFX8 params */
170 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
171 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
172
173 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
174 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
175 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
176 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
177 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
178
179 /* XXX fix me for VI */
180 tiling_info->gfx8.num_banks = num_banks;
181 tiling_info->gfx8.array_mode =
182 DC_ARRAY_2D_TILED_THIN1;
183 tiling_info->gfx8.tile_split = tile_split;
184 tiling_info->gfx8.bank_width = bankw;
185 tiling_info->gfx8.bank_height = bankh;
186 tiling_info->gfx8.tile_aspect = mtaspect;
187 tiling_info->gfx8.tile_mode =
188 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
189 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
190 == DC_ARRAY_1D_TILED_THIN1) {
191 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
192 }
193
194 tiling_info->gfx8.pipe_config =
195 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
196 }
197
fill_gfx9_tiling_info_from_device(const struct amdgpu_device * adev,union dc_tiling_info * tiling_info)198 static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
199 union dc_tiling_info *tiling_info)
200 {
201 /* Fill GFX9 params */
202 tiling_info->gfx9.num_pipes =
203 adev->gfx.config.gb_addr_config_fields.num_pipes;
204 tiling_info->gfx9.num_banks =
205 adev->gfx.config.gb_addr_config_fields.num_banks;
206 tiling_info->gfx9.pipe_interleave =
207 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
208 tiling_info->gfx9.num_shader_engines =
209 adev->gfx.config.gb_addr_config_fields.num_se;
210 tiling_info->gfx9.max_compressed_frags =
211 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
212 tiling_info->gfx9.num_rb_per_se =
213 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
214 tiling_info->gfx9.shaderEnable = 1;
215 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
216 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
217 }
218
fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device * adev,union dc_tiling_info * tiling_info,uint64_t modifier)219 static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
220 union dc_tiling_info *tiling_info,
221 uint64_t modifier)
222 {
223 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
224 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
225 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
226 unsigned int pipes_log2;
227
228 pipes_log2 = min(5u, mod_pipe_xor_bits);
229
230 fill_gfx9_tiling_info_from_device(adev, tiling_info);
231
232 if (!IS_AMD_FMT_MOD(modifier))
233 return;
234
235 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
236 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
237
238 if (adev->family >= AMDGPU_FAMILY_NV) {
239 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
240 } else {
241 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
242
243 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
244 }
245 }
246
validate_dcc(struct amdgpu_device * adev,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const union dc_tiling_info * tiling_info,const struct dc_plane_dcc_param * dcc,const struct dc_plane_address * address,const struct plane_size * plane_size)247 static int validate_dcc(struct amdgpu_device *adev,
248 const enum surface_pixel_format format,
249 const enum dc_rotation_angle rotation,
250 const union dc_tiling_info *tiling_info,
251 const struct dc_plane_dcc_param *dcc,
252 const struct dc_plane_address *address,
253 const struct plane_size *plane_size)
254 {
255 struct dc *dc = adev->dm.dc;
256 struct dc_dcc_surface_param input;
257 struct dc_surface_dcc_cap output;
258
259 memset(&input, 0, sizeof(input));
260 memset(&output, 0, sizeof(output));
261
262 if (!dcc->enable)
263 return 0;
264
265 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
266 !dc->cap_funcs.get_dcc_compression_cap)
267 return -EINVAL;
268
269 input.format = format;
270 input.surface_size.width = plane_size->surface_size.width;
271 input.surface_size.height = plane_size->surface_size.height;
272 input.swizzle_mode = tiling_info->gfx9.swizzle;
273
274 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
275 input.scan = SCAN_DIRECTION_HORIZONTAL;
276 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
277 input.scan = SCAN_DIRECTION_VERTICAL;
278
279 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
280 return -EINVAL;
281
282 if (!output.capable)
283 return -EINVAL;
284
285 if (dcc->independent_64b_blks == 0 &&
286 output.grph.rgb.independent_64b_blks != 0)
287 return -EINVAL;
288
289 return 0;
290 }
291
fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,union dc_tiling_info * tiling_info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,const bool force_disable_dcc)292 static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
293 const struct amdgpu_framebuffer *afb,
294 const enum surface_pixel_format format,
295 const enum dc_rotation_angle rotation,
296 const struct plane_size *plane_size,
297 union dc_tiling_info *tiling_info,
298 struct dc_plane_dcc_param *dcc,
299 struct dc_plane_address *address,
300 const bool force_disable_dcc)
301 {
302 const uint64_t modifier = afb->base.modifier;
303 int ret = 0;
304
305 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
306 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
307
308 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
309 uint64_t dcc_address = afb->address + afb->base.offsets[1];
310 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
311 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
312
313 dcc->enable = 1;
314 dcc->meta_pitch = afb->base.pitches[1];
315 dcc->independent_64b_blks = independent_64b_blks;
316 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
317 if (independent_64b_blks && independent_128b_blks)
318 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
319 else if (independent_128b_blks)
320 dcc->dcc_ind_blk = hubp_ind_block_128b;
321 else if (independent_64b_blks && !independent_128b_blks)
322 dcc->dcc_ind_blk = hubp_ind_block_64b;
323 else
324 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
325 } else {
326 if (independent_64b_blks)
327 dcc->dcc_ind_blk = hubp_ind_block_64b;
328 else
329 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
330 }
331
332 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
333 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
334 }
335
336 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
337 if (ret)
338 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
339
340 return ret;
341 }
342
add_gfx10_1_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)343 static void add_gfx10_1_modifiers(const struct amdgpu_device *adev,
344 uint64_t **mods, uint64_t *size, uint64_t *capacity)
345 {
346 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
347
348 add_modifier(mods, size, capacity, AMD_FMT_MOD |
349 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
350 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
351 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
352 AMD_FMT_MOD_SET(DCC, 1) |
353 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
354 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
355 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
356
357 add_modifier(mods, size, capacity, AMD_FMT_MOD |
358 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
359 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
360 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
361 AMD_FMT_MOD_SET(DCC, 1) |
362 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
363 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
364 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
365 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
366
367 add_modifier(mods, size, capacity, AMD_FMT_MOD |
368 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
369 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
370 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
371
372 add_modifier(mods, size, capacity, AMD_FMT_MOD |
373 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
374 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
375 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
376
377
378 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
379 add_modifier(mods, size, capacity, AMD_FMT_MOD |
380 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
381 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
382
383 add_modifier(mods, size, capacity, AMD_FMT_MOD |
384 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
385 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
386 }
387
add_gfx9_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)388 static void add_gfx9_modifiers(const struct amdgpu_device *adev,
389 uint64_t **mods, uint64_t *size, uint64_t *capacity)
390 {
391 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
392 int pipe_xor_bits = min(8, pipes +
393 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
394 int bank_xor_bits = min(8 - pipe_xor_bits,
395 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
396 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
397 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
398
399
400 if (adev->family == AMDGPU_FAMILY_RV) {
401 /* Raven2 and later */
402 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
403
404 /*
405 * No _D DCC swizzles yet because we only allow 32bpp, which
406 * doesn't support _D on DCN
407 */
408
409 if (has_constant_encode) {
410 add_modifier(mods, size, capacity, AMD_FMT_MOD |
411 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
412 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
413 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
414 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
415 AMD_FMT_MOD_SET(DCC, 1) |
416 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
417 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
418 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
419 }
420
421 add_modifier(mods, size, capacity, AMD_FMT_MOD |
422 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
423 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
424 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
425 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
426 AMD_FMT_MOD_SET(DCC, 1) |
427 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
428 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
429 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
430
431 if (has_constant_encode) {
432 add_modifier(mods, size, capacity, AMD_FMT_MOD |
433 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
434 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
435 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
436 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
437 AMD_FMT_MOD_SET(DCC, 1) |
438 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
439 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
440 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
441
442 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
443 AMD_FMT_MOD_SET(RB, rb) |
444 AMD_FMT_MOD_SET(PIPE, pipes));
445 }
446
447 add_modifier(mods, size, capacity, AMD_FMT_MOD |
448 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
449 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
450 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
451 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
452 AMD_FMT_MOD_SET(DCC, 1) |
453 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
454 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
455 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
456 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
457 AMD_FMT_MOD_SET(RB, rb) |
458 AMD_FMT_MOD_SET(PIPE, pipes));
459 }
460
461 /*
462 * Only supported for 64bpp on Raven, will be filtered on format in
463 * dm_plane_format_mod_supported.
464 */
465 add_modifier(mods, size, capacity, AMD_FMT_MOD |
466 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
467 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
468 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
469 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
470
471 if (adev->family == AMDGPU_FAMILY_RV) {
472 add_modifier(mods, size, capacity, AMD_FMT_MOD |
473 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
474 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
475 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
476 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
477 }
478
479 /*
480 * Only supported for 64bpp on Raven, will be filtered on format in
481 * dm_plane_format_mod_supported.
482 */
483 add_modifier(mods, size, capacity, AMD_FMT_MOD |
484 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
485 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
486
487 if (adev->family == AMDGPU_FAMILY_RV) {
488 add_modifier(mods, size, capacity, AMD_FMT_MOD |
489 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
490 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
491 }
492 }
493
add_gfx10_3_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)494 static void add_gfx10_3_modifiers(const struct amdgpu_device *adev,
495 uint64_t **mods, uint64_t *size, uint64_t *capacity)
496 {
497 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
498 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
499
500 add_modifier(mods, size, capacity, AMD_FMT_MOD |
501 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
502 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
503 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
504 AMD_FMT_MOD_SET(PACKERS, pkrs) |
505 AMD_FMT_MOD_SET(DCC, 1) |
506 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
507 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
508 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
509 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
510
511 add_modifier(mods, size, capacity, AMD_FMT_MOD |
512 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
513 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
514 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
515 AMD_FMT_MOD_SET(PACKERS, pkrs) |
516 AMD_FMT_MOD_SET(DCC, 1) |
517 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
518 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
519 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
520
521 add_modifier(mods, size, capacity, AMD_FMT_MOD |
522 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
523 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
524 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
525 AMD_FMT_MOD_SET(PACKERS, pkrs) |
526 AMD_FMT_MOD_SET(DCC, 1) |
527 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
528 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
529 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
530 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
531 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
532
533 add_modifier(mods, size, capacity, AMD_FMT_MOD |
534 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
535 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
536 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
537 AMD_FMT_MOD_SET(PACKERS, pkrs) |
538 AMD_FMT_MOD_SET(DCC, 1) |
539 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
540 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
541 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
542 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
543
544 add_modifier(mods, size, capacity, AMD_FMT_MOD |
545 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
546 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
547 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
548 AMD_FMT_MOD_SET(PACKERS, pkrs));
549
550 add_modifier(mods, size, capacity, AMD_FMT_MOD |
551 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
552 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
553 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
554 AMD_FMT_MOD_SET(PACKERS, pkrs));
555
556 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
557 add_modifier(mods, size, capacity, AMD_FMT_MOD |
558 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
559 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
560
561 add_modifier(mods, size, capacity, AMD_FMT_MOD |
562 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
563 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
564 }
565
add_gfx11_modifiers(struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)566 static void add_gfx11_modifiers(struct amdgpu_device *adev,
567 uint64_t **mods, uint64_t *size, uint64_t *capacity)
568 {
569 int num_pipes = 0;
570 int pipe_xor_bits = 0;
571 int num_pkrs = 0;
572 int pkrs = 0;
573 u32 gb_addr_config;
574 u8 i = 0;
575 unsigned swizzle_r_x;
576 uint64_t modifier_r_x;
577 uint64_t modifier_dcc_best;
578 uint64_t modifier_dcc_4k;
579
580 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
581 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes}
582 */
583 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
584 ASSERT(gb_addr_config != 0);
585
586 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
587 pkrs = ilog2(num_pkrs);
588 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
589 pipe_xor_bits = ilog2(num_pipes);
590
591 for (i = 0; i < 2; i++) {
592 /* Insert the best one first. */
593 /* R_X swizzle modes are the best for rendering and DCC requires them. */
594 if (num_pipes > 16)
595 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
596 else
597 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
598
599 modifier_r_x = AMD_FMT_MOD |
600 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
601 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
602 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
603 AMD_FMT_MOD_SET(PACKERS, pkrs);
604
605 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
606 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
607 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
608 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
609 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
610
611 /* DCC settings for 4K and greater resolutions. (required by display hw) */
612 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
613 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
614 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
615 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
616
617 add_modifier(mods, size, capacity, modifier_dcc_best);
618 add_modifier(mods, size, capacity, modifier_dcc_4k);
619
620 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
621 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
622
623 add_modifier(mods, size, capacity, modifier_r_x);
624 }
625
626 add_modifier(mods, size, capacity, AMD_FMT_MOD |
627 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
628 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
629 }
630
get_plane_modifiers(struct amdgpu_device * adev,unsigned int plane_type,uint64_t ** mods)631 static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
632 {
633 uint64_t size = 0, capacity = 128;
634 *mods = NULL;
635
636 /* We have not hooked up any pre-GFX9 modifiers. */
637 if (adev->family < AMDGPU_FAMILY_AI)
638 return 0;
639
640 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
641
642 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
643 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
644 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
645 return *mods ? 0 : -ENOMEM;
646 }
647
648 switch (adev->family) {
649 case AMDGPU_FAMILY_AI:
650 case AMDGPU_FAMILY_RV:
651 add_gfx9_modifiers(adev, mods, &size, &capacity);
652 break;
653 case AMDGPU_FAMILY_NV:
654 case AMDGPU_FAMILY_VGH:
655 case AMDGPU_FAMILY_YC:
656 case AMDGPU_FAMILY_GC_10_3_6:
657 case AMDGPU_FAMILY_GC_10_3_7:
658 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
659 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
660 else
661 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
662 break;
663 case AMDGPU_FAMILY_GC_11_0_0:
664 case AMDGPU_FAMILY_GC_11_0_1:
665 add_gfx11_modifiers(adev, mods, &size, &capacity);
666 break;
667 }
668
669 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
670
671 /* INVALID marks the end of the list. */
672 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
673
674 if (!*mods)
675 return -ENOMEM;
676
677 return 0;
678 }
679
get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)680 static int get_plane_formats(const struct drm_plane *plane,
681 const struct dc_plane_cap *plane_cap,
682 uint32_t *formats, int max_formats)
683 {
684 int i, num_formats = 0;
685
686 /*
687 * TODO: Query support for each group of formats directly from
688 * DC plane caps. This will require adding more formats to the
689 * caps list.
690 */
691
692 switch (plane->type) {
693 case DRM_PLANE_TYPE_PRIMARY:
694 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
695 if (num_formats >= max_formats)
696 break;
697
698 formats[num_formats++] = rgb_formats[i];
699 }
700
701 if (plane_cap && plane_cap->pixel_format_support.nv12)
702 formats[num_formats++] = DRM_FORMAT_NV12;
703 if (plane_cap && plane_cap->pixel_format_support.p010)
704 formats[num_formats++] = DRM_FORMAT_P010;
705 if (plane_cap && plane_cap->pixel_format_support.fp16) {
706 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
707 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
708 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
709 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
710 }
711 break;
712
713 case DRM_PLANE_TYPE_OVERLAY:
714 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
715 if (num_formats >= max_formats)
716 break;
717
718 formats[num_formats++] = overlay_formats[i];
719 }
720 break;
721
722 case DRM_PLANE_TYPE_CURSOR:
723 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
724 if (num_formats >= max_formats)
725 break;
726
727 formats[num_formats++] = cursor_formats[i];
728 }
729 break;
730 }
731
732 return num_formats;
733 }
734
735 #ifdef CONFIG_DRM_AMD_DC_HDR
attach_color_mgmt_properties(struct amdgpu_display_manager * dm,struct drm_plane * plane)736 static int attach_color_mgmt_properties(struct amdgpu_display_manager *dm, struct drm_plane *plane)
737 {
738 drm_object_attach_property(&plane->base,
739 dm->degamma_lut_property,
740 0);
741 drm_object_attach_property(&plane->base,
742 dm->degamma_lut_size_property,
743 MAX_COLOR_LUT_ENTRIES);
744 drm_object_attach_property(&plane->base, dm->ctm_property,
745 0);
746 drm_object_attach_property(&plane->base, dm->sdr_boost_property,
747 DEFAULT_SDR_BOOST);
748
749 return 0;
750 }
751 #endif
752
fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,union dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)753 int fill_plane_buffer_attributes(struct amdgpu_device *adev,
754 const struct amdgpu_framebuffer *afb,
755 const enum surface_pixel_format format,
756 const enum dc_rotation_angle rotation,
757 const uint64_t tiling_flags,
758 union dc_tiling_info *tiling_info,
759 struct plane_size *plane_size,
760 struct dc_plane_dcc_param *dcc,
761 struct dc_plane_address *address,
762 bool tmz_surface,
763 bool force_disable_dcc)
764 {
765 const struct drm_framebuffer *fb = &afb->base;
766 int ret;
767
768 memset(tiling_info, 0, sizeof(*tiling_info));
769 memset(plane_size, 0, sizeof(*plane_size));
770 memset(dcc, 0, sizeof(*dcc));
771 memset(address, 0, sizeof(*address));
772
773 address->tmz_surface = tmz_surface;
774
775 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
776 uint64_t addr = afb->address + fb->offsets[0];
777
778 plane_size->surface_size.x = 0;
779 plane_size->surface_size.y = 0;
780 plane_size->surface_size.width = fb->width;
781 plane_size->surface_size.height = fb->height;
782 plane_size->surface_pitch =
783 fb->pitches[0] / fb->format->cpp[0];
784
785 address->type = PLN_ADDR_TYPE_GRAPHICS;
786 address->grph.addr.low_part = lower_32_bits(addr);
787 address->grph.addr.high_part = upper_32_bits(addr);
788 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
789 uint64_t luma_addr = afb->address + fb->offsets[0];
790 uint64_t chroma_addr = afb->address + fb->offsets[1];
791
792 plane_size->surface_size.x = 0;
793 plane_size->surface_size.y = 0;
794 plane_size->surface_size.width = fb->width;
795 plane_size->surface_size.height = fb->height;
796 plane_size->surface_pitch =
797 fb->pitches[0] / fb->format->cpp[0];
798
799 plane_size->chroma_size.x = 0;
800 plane_size->chroma_size.y = 0;
801 /* TODO: set these based on surface format */
802 plane_size->chroma_size.width = fb->width / 2;
803 plane_size->chroma_size.height = fb->height / 2;
804
805 plane_size->chroma_pitch =
806 fb->pitches[1] / fb->format->cpp[1];
807
808 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
809 address->video_progressive.luma_addr.low_part =
810 lower_32_bits(luma_addr);
811 address->video_progressive.luma_addr.high_part =
812 upper_32_bits(luma_addr);
813 address->video_progressive.chroma_addr.low_part =
814 lower_32_bits(chroma_addr);
815 address->video_progressive.chroma_addr.high_part =
816 upper_32_bits(chroma_addr);
817 }
818
819 if (adev->family >= AMDGPU_FAMILY_AI) {
820 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
821 rotation, plane_size,
822 tiling_info, dcc,
823 address,
824 force_disable_dcc);
825 if (ret)
826 return ret;
827 } else {
828 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
829 }
830
831 return 0;
832 }
833
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)834 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
835 struct drm_plane_state *new_state)
836 {
837 struct amdgpu_framebuffer *afb;
838 struct drm_gem_object *obj;
839 struct amdgpu_device *adev;
840 struct amdgpu_bo *rbo;
841 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
842 uint32_t domain;
843 int r;
844
845 if (!new_state->fb) {
846 DRM_DEBUG_KMS("No FB bound\n");
847 return 0;
848 }
849
850 afb = to_amdgpu_framebuffer(new_state->fb);
851 obj = new_state->fb->obj[0];
852 rbo = gem_to_amdgpu_bo(obj);
853 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
854
855 r = amdgpu_bo_reserve(rbo, true);
856 if (r) {
857 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
858 return r;
859 }
860
861 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
862 if (r) {
863 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
864 goto error_unlock;
865 }
866
867 if (plane->type != DRM_PLANE_TYPE_CURSOR)
868 domain = amdgpu_display_supported_domains(adev, rbo->flags);
869 else
870 domain = AMDGPU_GEM_DOMAIN_VRAM;
871
872 r = amdgpu_bo_pin(rbo, domain);
873 if (unlikely(r != 0)) {
874 if (r != -ERESTARTSYS)
875 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
876 goto error_unlock;
877 }
878
879 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
880 if (unlikely(r != 0)) {
881 DRM_ERROR("%p bind failed\n", rbo);
882 goto error_unpin;
883 }
884
885 r = drm_gem_plane_helper_prepare_fb(plane, new_state);
886 if (unlikely(r != 0))
887 goto error_unpin;
888
889 amdgpu_bo_unreserve(rbo);
890
891 afb->address = amdgpu_bo_gpu_offset(rbo);
892
893 amdgpu_bo_ref(rbo);
894
895 /**
896 * We don't do surface updates on planes that have been newly created,
897 * but we also don't have the afb->address during atomic check.
898 *
899 * Fill in buffer attributes depending on the address here, but only on
900 * newly created planes since they're not being used by DC yet and this
901 * won't modify global state.
902 */
903 dm_plane_state_old = to_dm_plane_state(plane->state);
904 dm_plane_state_new = to_dm_plane_state(new_state);
905
906 if (dm_plane_state_new->dc_state &&
907 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
908 struct dc_plane_state *plane_state =
909 dm_plane_state_new->dc_state;
910 bool force_disable_dcc = !plane_state->dcc.enable;
911
912 fill_plane_buffer_attributes(
913 adev, afb, plane_state->format, plane_state->rotation,
914 afb->tiling_flags,
915 &plane_state->tiling_info, &plane_state->plane_size,
916 &plane_state->dcc, &plane_state->address,
917 afb->tmz_surface, force_disable_dcc);
918 }
919
920 return 0;
921
922 error_unpin:
923 amdgpu_bo_unpin(rbo);
924
925 error_unlock:
926 amdgpu_bo_unreserve(rbo);
927 return r;
928 }
929
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)930 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
931 struct drm_plane_state *old_state)
932 {
933 struct amdgpu_bo *rbo;
934 int r;
935
936 if (!old_state->fb)
937 return;
938
939 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
940 r = amdgpu_bo_reserve(rbo, false);
941 if (unlikely(r)) {
942 DRM_ERROR("failed to reserve rbo before unpin\n");
943 return;
944 }
945
946 amdgpu_bo_unpin(rbo);
947 amdgpu_bo_unreserve(rbo);
948 amdgpu_bo_unref(&rbo);
949 }
950
get_min_max_dc_plane_scaling(struct drm_device * dev,struct drm_framebuffer * fb,int * min_downscale,int * max_upscale)951 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
952 struct drm_framebuffer *fb,
953 int *min_downscale, int *max_upscale)
954 {
955 struct amdgpu_device *adev = drm_to_adev(dev);
956 struct dc *dc = adev->dm.dc;
957 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
958 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
959
960 switch (fb->format->format) {
961 case DRM_FORMAT_P010:
962 case DRM_FORMAT_NV12:
963 case DRM_FORMAT_NV21:
964 *max_upscale = plane_cap->max_upscale_factor.nv12;
965 *min_downscale = plane_cap->max_downscale_factor.nv12;
966 break;
967
968 case DRM_FORMAT_XRGB16161616F:
969 case DRM_FORMAT_ARGB16161616F:
970 case DRM_FORMAT_XBGR16161616F:
971 case DRM_FORMAT_ABGR16161616F:
972 *max_upscale = plane_cap->max_upscale_factor.fp16;
973 *min_downscale = plane_cap->max_downscale_factor.fp16;
974 break;
975
976 default:
977 *max_upscale = plane_cap->max_upscale_factor.argb8888;
978 *min_downscale = plane_cap->max_downscale_factor.argb8888;
979 break;
980 }
981
982 /*
983 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
984 * scaling factor of 1.0 == 1000 units.
985 */
986 if (*max_upscale == 1)
987 *max_upscale = 1000;
988
989 if (*min_downscale == 1)
990 *min_downscale = 1000;
991 }
992
dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)993 int dm_plane_helper_check_state(struct drm_plane_state *state,
994 struct drm_crtc_state *new_crtc_state)
995 {
996 struct drm_framebuffer *fb = state->fb;
997 int min_downscale, max_upscale;
998 int min_scale = 0;
999 int max_scale = INT_MAX;
1000
1001 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
1002 if (fb && state->crtc) {
1003 /* Validate viewport to cover the case when only the position changes */
1004 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
1005 int viewport_width = state->crtc_w;
1006 int viewport_height = state->crtc_h;
1007
1008 if (state->crtc_x < 0)
1009 viewport_width += state->crtc_x;
1010 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
1011 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
1012
1013 if (state->crtc_y < 0)
1014 viewport_height += state->crtc_y;
1015 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
1016 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
1017
1018 if (viewport_width < 0 || viewport_height < 0) {
1019 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
1020 return -EINVAL;
1021 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
1022 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
1023 return -EINVAL;
1024 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
1025 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
1026 return -EINVAL;
1027 }
1028
1029 }
1030
1031 /* Get min/max allowed scaling factors from plane caps. */
1032 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
1033 &min_downscale, &max_upscale);
1034 /*
1035 * Convert to drm convention: 16.16 fixed point, instead of dc's
1036 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
1037 * dst/src, so min_scale = 1.0 / max_upscale, etc.
1038 */
1039 min_scale = (1000 << 16) / max_upscale;
1040 max_scale = (1000 << 16) / min_downscale;
1041 }
1042
1043 return drm_atomic_helper_check_plane_state(
1044 state, new_crtc_state, min_scale, max_scale, true, true);
1045 }
1046
fill_dc_scaling_info(struct amdgpu_device * adev,const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)1047 int fill_dc_scaling_info(struct amdgpu_device *adev,
1048 const struct drm_plane_state *state,
1049 struct dc_scaling_info *scaling_info)
1050 {
1051 int scale_w, scale_h, min_downscale, max_upscale;
1052
1053 memset(scaling_info, 0, sizeof(*scaling_info));
1054
1055 /* Source is fixed 16.16 but we ignore mantissa for now... */
1056 scaling_info->src_rect.x = state->src_x >> 16;
1057 scaling_info->src_rect.y = state->src_y >> 16;
1058
1059 /*
1060 * For reasons we don't (yet) fully understand a non-zero
1061 * src_y coordinate into an NV12 buffer can cause a
1062 * system hang on DCN1x.
1063 * To avoid hangs (and maybe be overly cautious)
1064 * let's reject both non-zero src_x and src_y.
1065 *
1066 * We currently know of only one use-case to reproduce a
1067 * scenario with non-zero src_x and src_y for NV12, which
1068 * is to gesture the YouTube Android app into full screen
1069 * on ChromeOS.
1070 */
1071 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1072 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
1073 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
1074 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
1075 return -EINVAL;
1076
1077 scaling_info->src_rect.width = state->src_w >> 16;
1078 if (scaling_info->src_rect.width == 0)
1079 return -EINVAL;
1080
1081 scaling_info->src_rect.height = state->src_h >> 16;
1082 if (scaling_info->src_rect.height == 0)
1083 return -EINVAL;
1084
1085 scaling_info->dst_rect.x = state->crtc_x;
1086 scaling_info->dst_rect.y = state->crtc_y;
1087
1088 if (state->crtc_w == 0)
1089 return -EINVAL;
1090
1091 scaling_info->dst_rect.width = state->crtc_w;
1092
1093 if (state->crtc_h == 0)
1094 return -EINVAL;
1095
1096 scaling_info->dst_rect.height = state->crtc_h;
1097
1098 /* DRM doesn't specify clipping on destination output. */
1099 scaling_info->clip_rect = scaling_info->dst_rect;
1100
1101 /* Validate scaling per-format with DC plane caps */
1102 if (state->plane && state->plane->dev && state->fb) {
1103 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
1104 &min_downscale, &max_upscale);
1105 } else {
1106 min_downscale = 250;
1107 max_upscale = 16000;
1108 }
1109
1110 scale_w = scaling_info->dst_rect.width * 1000 /
1111 scaling_info->src_rect.width;
1112
1113 if (scale_w < min_downscale || scale_w > max_upscale)
1114 return -EINVAL;
1115
1116 scale_h = scaling_info->dst_rect.height * 1000 /
1117 scaling_info->src_rect.height;
1118
1119 if (scale_h < min_downscale || scale_h > max_upscale)
1120 return -EINVAL;
1121
1122 /*
1123 * The "scaling_quality" can be ignored for now, quality = 0 has DC
1124 * assume reasonable defaults based on the format.
1125 */
1126
1127 return 0;
1128 }
1129
dm_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)1130 static int dm_plane_atomic_check(struct drm_plane *plane,
1131 struct drm_atomic_state *state)
1132 {
1133 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1134 plane);
1135 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1136 struct dc *dc = adev->dm.dc;
1137 struct dm_plane_state *dm_plane_state;
1138 struct dc_scaling_info scaling_info;
1139 struct drm_crtc_state *new_crtc_state;
1140 int ret;
1141
1142 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
1143
1144 dm_plane_state = to_dm_plane_state(new_plane_state);
1145
1146 if (!dm_plane_state->dc_state)
1147 return 0;
1148
1149 new_crtc_state =
1150 drm_atomic_get_new_crtc_state(state,
1151 new_plane_state->crtc);
1152 if (!new_crtc_state)
1153 return -EINVAL;
1154
1155 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
1156 if (ret)
1157 return ret;
1158
1159 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
1160 if (ret)
1161 return ret;
1162
1163 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
1164 return 0;
1165
1166 return -EINVAL;
1167 }
1168
dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_atomic_state * state)1169 static int dm_plane_atomic_async_check(struct drm_plane *plane,
1170 struct drm_atomic_state *state)
1171 {
1172 /* Only support async updates on cursor planes. */
1173 if (plane->type != DRM_PLANE_TYPE_CURSOR)
1174 return -EINVAL;
1175
1176 return 0;
1177 }
1178
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)1179 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
1180 struct dc_cursor_position *position)
1181 {
1182 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1183 int x, y;
1184 int xorigin = 0, yorigin = 0;
1185
1186 if (!crtc || !plane->state->fb)
1187 return 0;
1188
1189 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
1190 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
1191 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
1192 __func__,
1193 plane->state->crtc_w,
1194 plane->state->crtc_h);
1195 return -EINVAL;
1196 }
1197
1198 x = plane->state->crtc_x;
1199 y = plane->state->crtc_y;
1200
1201 if (x <= -amdgpu_crtc->max_cursor_width ||
1202 y <= -amdgpu_crtc->max_cursor_height)
1203 return 0;
1204
1205 if (x < 0) {
1206 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1207 x = 0;
1208 }
1209 if (y < 0) {
1210 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1211 y = 0;
1212 }
1213 position->enable = true;
1214 position->translate_by_source = true;
1215 position->x = x;
1216 position->y = y;
1217 position->x_hotspot = xorigin;
1218 position->y_hotspot = yorigin;
1219
1220 return 0;
1221 }
1222
handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)1223 void handle_cursor_update(struct drm_plane *plane,
1224 struct drm_plane_state *old_plane_state)
1225 {
1226 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1227 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
1228 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
1229 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
1230 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1231 uint64_t address = afb ? afb->address : 0;
1232 struct dc_cursor_position position = {0};
1233 struct dc_cursor_attributes attributes;
1234 int ret;
1235
1236 if (!plane->state->fb && !old_plane_state->fb)
1237 return;
1238
1239 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
1240 __func__,
1241 amdgpu_crtc->crtc_id,
1242 plane->state->crtc_w,
1243 plane->state->crtc_h);
1244
1245 ret = get_cursor_position(plane, crtc, &position);
1246 if (ret)
1247 return;
1248
1249 if (!position.enable) {
1250 /* turn off cursor */
1251 if (crtc_state && crtc_state->stream) {
1252 mutex_lock(&adev->dm.dc_lock);
1253 dc_stream_set_cursor_position(crtc_state->stream,
1254 &position);
1255 mutex_unlock(&adev->dm.dc_lock);
1256 }
1257 return;
1258 }
1259
1260 amdgpu_crtc->cursor_width = plane->state->crtc_w;
1261 amdgpu_crtc->cursor_height = plane->state->crtc_h;
1262
1263 memset(&attributes, 0, sizeof(attributes));
1264 attributes.address.high_part = upper_32_bits(address);
1265 attributes.address.low_part = lower_32_bits(address);
1266 attributes.width = plane->state->crtc_w;
1267 attributes.height = plane->state->crtc_h;
1268 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
1269 attributes.rotation_angle = 0;
1270 attributes.attribute_flags.value = 0;
1271
1272 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
1273 * legacy gamma setup.
1274 */
1275 if (crtc_state->cm_is_degamma_srgb &&
1276 adev->dm.dc->caps.color.dpp.gamma_corr)
1277 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
1278
1279 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
1280
1281 if (crtc_state->stream) {
1282 mutex_lock(&adev->dm.dc_lock);
1283 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
1284 &attributes))
1285 DRM_ERROR("DC failed to set cursor attributes\n");
1286
1287 if (!dc_stream_set_cursor_position(crtc_state->stream,
1288 &position))
1289 DRM_ERROR("DC failed to set cursor position\n");
1290 mutex_unlock(&adev->dm.dc_lock);
1291 }
1292 }
1293
dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_atomic_state * state)1294 static void dm_plane_atomic_async_update(struct drm_plane *plane,
1295 struct drm_atomic_state *state)
1296 {
1297 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
1298 plane);
1299 struct drm_plane_state *old_state =
1300 drm_atomic_get_old_plane_state(state, plane);
1301
1302 trace_amdgpu_dm_atomic_update_cursor(new_state);
1303
1304 swap(plane->state->fb, new_state->fb);
1305
1306 plane->state->src_x = new_state->src_x;
1307 plane->state->src_y = new_state->src_y;
1308 plane->state->src_w = new_state->src_w;
1309 plane->state->src_h = new_state->src_h;
1310 plane->state->crtc_x = new_state->crtc_x;
1311 plane->state->crtc_y = new_state->crtc_y;
1312 plane->state->crtc_w = new_state->crtc_w;
1313 plane->state->crtc_h = new_state->crtc_h;
1314
1315 handle_cursor_update(plane, old_state);
1316 }
1317
1318 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1319 .prepare_fb = dm_plane_helper_prepare_fb,
1320 .cleanup_fb = dm_plane_helper_cleanup_fb,
1321 .atomic_check = dm_plane_atomic_check,
1322 .atomic_async_check = dm_plane_atomic_async_check,
1323 .atomic_async_update = dm_plane_atomic_async_update
1324 };
1325
dm_drm_plane_reset(struct drm_plane * plane)1326 static void dm_drm_plane_reset(struct drm_plane *plane)
1327 {
1328 struct dm_plane_state *amdgpu_state = NULL;
1329
1330 if (plane->state)
1331 plane->funcs->atomic_destroy_state(plane, plane->state);
1332
1333 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
1334 WARN_ON(amdgpu_state == NULL);
1335
1336 if (amdgpu_state)
1337 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
1338 #ifdef CONFIG_DRM_AMD_DC_HDR
1339 if (amdgpu_state)
1340 amdgpu_state->sdr_boost = DEFAULT_SDR_BOOST;
1341 #endif
1342 }
1343
1344 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)1345 dm_drm_plane_duplicate_state(struct drm_plane *plane)
1346 {
1347 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
1348
1349 old_dm_plane_state = to_dm_plane_state(plane->state);
1350 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
1351 if (!dm_plane_state)
1352 return NULL;
1353
1354 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
1355
1356 if (old_dm_plane_state->dc_state) {
1357 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
1358 dc_plane_state_retain(dm_plane_state->dc_state);
1359 }
1360
1361 #ifdef CONFIG_DRM_AMD_DC_HDR
1362 if (dm_plane_state->degamma_lut)
1363 drm_property_blob_get(dm_plane_state->degamma_lut);
1364 if (dm_plane_state->ctm)
1365 drm_property_blob_get(dm_plane_state->ctm);
1366
1367 dm_plane_state->sdr_boost = old_dm_plane_state->sdr_boost;
1368 #endif
1369
1370 return &dm_plane_state->base;
1371 }
1372
dm_plane_format_mod_supported(struct drm_plane * plane,uint32_t format,uint64_t modifier)1373 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
1374 uint32_t format,
1375 uint64_t modifier)
1376 {
1377 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1378 const struct drm_format_info *info = drm_format_info(format);
1379 int i;
1380
1381 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
1382
1383 if (!info)
1384 return false;
1385
1386 /*
1387 * We always have to allow these modifiers:
1388 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
1389 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
1390 */
1391 if (modifier == DRM_FORMAT_MOD_LINEAR ||
1392 modifier == DRM_FORMAT_MOD_INVALID) {
1393 return true;
1394 }
1395
1396 /* Check that the modifier is on the list of the plane's supported modifiers. */
1397 for (i = 0; i < plane->modifier_count; i++) {
1398 if (modifier == plane->modifiers[i])
1399 break;
1400 }
1401 if (i == plane->modifier_count)
1402 return false;
1403
1404 /*
1405 * For D swizzle the canonical modifier depends on the bpp, so check
1406 * it here.
1407 */
1408 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
1409 adev->family >= AMDGPU_FAMILY_NV) {
1410 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
1411 return false;
1412 }
1413
1414 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
1415 info->cpp[0] < 8)
1416 return false;
1417
1418 if (modifier_has_dcc(modifier)) {
1419 /* Per radeonsi comments 16/64 bpp are more complicated. */
1420 if (info->cpp[0] != 4)
1421 return false;
1422 /* We support multi-planar formats, but not when combined with
1423 * additional DCC metadata planes.
1424 */
1425 if (info->num_planes > 1)
1426 return false;
1427 }
1428
1429 return true;
1430 }
1431
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1432 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
1433 struct drm_plane_state *state)
1434 {
1435 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1436
1437 #ifdef CONFIG_DRM_AMD_DC_HDR
1438 drm_property_blob_put(dm_plane_state->degamma_lut);
1439 drm_property_blob_put(dm_plane_state->ctm);
1440 #endif
1441 if (dm_plane_state->dc_state)
1442 dc_plane_state_release(dm_plane_state->dc_state);
1443
1444 drm_atomic_helper_plane_destroy_state(plane, state);
1445 }
1446
1447 #ifdef CONFIG_DRM_AMD_DC_HDR
1448 /* copied from drm_atomic_uapi.c */
atomic_replace_property_blob_from_id(struct drm_device * dev,struct drm_property_blob ** blob,uint64_t blob_id,ssize_t expected_size,ssize_t expected_elem_size,bool * replaced)1449 static int atomic_replace_property_blob_from_id(struct drm_device *dev,
1450 struct drm_property_blob **blob,
1451 uint64_t blob_id,
1452 ssize_t expected_size,
1453 ssize_t expected_elem_size,
1454 bool *replaced)
1455 {
1456 struct drm_property_blob *new_blob = NULL;
1457
1458 if (blob_id != 0) {
1459 new_blob = drm_property_lookup_blob(dev, blob_id);
1460 if (new_blob == NULL)
1461 return -EINVAL;
1462
1463 if (expected_size > 0 &&
1464 new_blob->length != expected_size) {
1465 drm_property_blob_put(new_blob);
1466 return -EINVAL;
1467 }
1468 if (expected_elem_size > 0 &&
1469 new_blob->length % expected_elem_size != 0) {
1470 drm_property_blob_put(new_blob);
1471 return -EINVAL;
1472 }
1473 }
1474
1475 *replaced |= drm_property_replace_blob(blob, new_blob);
1476 drm_property_blob_put(new_blob);
1477
1478 return 0;
1479 }
1480
dm_drm_plane_set_property(struct drm_plane * plane,struct drm_plane_state * state,struct drm_property * property,uint64_t val)1481 int dm_drm_plane_set_property(struct drm_plane *plane,
1482 struct drm_plane_state *state,
1483 struct drm_property *property,
1484 uint64_t val)
1485 {
1486 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1487 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1488 int ret = 0;
1489 bool replaced;
1490
1491 if (property == adev->dm.degamma_lut_property) {
1492 ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
1493 &dm_plane_state->degamma_lut,
1494 val, -1, sizeof(struct drm_color_lut),
1495 &replaced);
1496 } else if (property == adev->dm.ctm_property) {
1497 ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
1498 &dm_plane_state->ctm,
1499 val,
1500 sizeof(struct drm_color_ctm), -1,
1501 &replaced);
1502 } else if (property == adev->dm.sdr_boost_property) {
1503 dm_plane_state->sdr_boost = val;
1504 } else {
1505 return -EINVAL;
1506 }
1507
1508 return ret;
1509 }
1510
dm_drm_plane_get_property(struct drm_plane * plane,const struct drm_plane_state * state,struct drm_property * property,uint64_t * val)1511 int dm_drm_plane_get_property(struct drm_plane *plane,
1512 const struct drm_plane_state *state,
1513 struct drm_property *property,
1514 uint64_t *val)
1515 {
1516 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1517 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1518
1519 if (property == adev->dm.degamma_lut_property) {
1520 *val = (dm_plane_state->degamma_lut) ?
1521 dm_plane_state->degamma_lut->base.id : 0;
1522 } else if (property == adev->dm.ctm_property) {
1523 *val = (dm_plane_state->ctm) ? dm_plane_state->ctm->base.id : 0;
1524 } else if (property == adev->dm.sdr_boost_property) {
1525 *val = dm_plane_state->sdr_boost;
1526 } else {
1527 return -EINVAL;
1528 }
1529
1530 return 0;
1531 }
1532 #endif
1533
1534 static const struct drm_plane_funcs dm_plane_funcs = {
1535 .update_plane = drm_atomic_helper_update_plane,
1536 .disable_plane = drm_atomic_helper_disable_plane,
1537 .destroy = drm_plane_helper_destroy,
1538 .reset = dm_drm_plane_reset,
1539 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
1540 .atomic_destroy_state = dm_drm_plane_destroy_state,
1541 .format_mod_supported = dm_plane_format_mod_supported,
1542 #ifdef CONFIG_DRM_AMD_DC_HDR
1543 .atomic_set_property = dm_drm_plane_set_property,
1544 .atomic_get_property = dm_drm_plane_get_property,
1545 #endif
1546 };
1547
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)1548 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1549 struct drm_plane *plane,
1550 unsigned long possible_crtcs,
1551 const struct dc_plane_cap *plane_cap)
1552 {
1553 uint32_t formats[32];
1554 int num_formats;
1555 int res = -EPERM;
1556 unsigned int supported_rotations;
1557 uint64_t *modifiers = NULL;
1558
1559 num_formats = get_plane_formats(plane, plane_cap, formats,
1560 ARRAY_SIZE(formats));
1561
1562 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
1563 if (res)
1564 return res;
1565
1566 if (modifiers == NULL)
1567 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
1568
1569 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
1570 &dm_plane_funcs, formats, num_formats,
1571 modifiers, plane->type, NULL);
1572 kfree(modifiers);
1573 if (res)
1574 return res;
1575
1576 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
1577 plane_cap && plane_cap->per_pixel_alpha) {
1578 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1579 BIT(DRM_MODE_BLEND_PREMULTI) |
1580 BIT(DRM_MODE_BLEND_COVERAGE);
1581
1582 drm_plane_create_alpha_property(plane);
1583 drm_plane_create_blend_mode_property(plane, blend_caps);
1584 }
1585
1586 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
1587 plane_cap &&
1588 (plane_cap->pixel_format_support.nv12 ||
1589 plane_cap->pixel_format_support.p010)) {
1590 /* This only affects YUV formats. */
1591 drm_plane_create_color_properties(
1592 plane,
1593 BIT(DRM_COLOR_YCBCR_BT601) |
1594 BIT(DRM_COLOR_YCBCR_BT709) |
1595 BIT(DRM_COLOR_YCBCR_BT2020),
1596 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1597 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1598 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
1599 }
1600
1601 supported_rotations =
1602 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1603 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1604
1605 if (dm->adev->asic_type >= CHIP_BONAIRE &&
1606 plane->type != DRM_PLANE_TYPE_CURSOR)
1607 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1608 supported_rotations);
1609
1610 if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) &&
1611 plane->type != DRM_PLANE_TYPE_CURSOR)
1612 drm_plane_enable_fb_damage_clips(plane);
1613
1614 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
1615
1616 #ifdef CONFIG_DRM_AMD_DC_HDR
1617 attach_color_mgmt_properties(dm, plane);
1618 #endif
1619 /* Create (reset) the plane state */
1620 if (plane->funcs->reset)
1621 plane->funcs->reset(plane);
1622
1623 return 0;
1624 }
1625
1626