• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /**
27  * This file defines external dependencies of Display Core.
28  */
29 
30 #ifndef __DM_SERVICES_H__
31 
32 #define __DM_SERVICES_H__
33 
34 #include "amdgpu_dm_trace.h"
35 
36 /* TODO: remove when DC is complete. */
37 #include "dm_services_types.h"
38 #include "logger_interface.h"
39 #include "link_service_types.h"
40 
41 #undef DEPRECATED
42 
43 struct dmub_srv;
44 struct dc_dmub_srv;
45 
46 irq_handler_idx dm_register_interrupt(
47 	struct dc_context *ctx,
48 	struct dc_interrupt_params *int_params,
49 	interrupt_handler ih,
50 	void *handler_args);
51 
52 
53 /*
54  *
55  * GPU registers access
56  *
57  */
58 uint32_t dm_read_reg_func(
59 	const struct dc_context *ctx,
60 	uint32_t address,
61 	const char *func_name);
62 /* enable for debugging new code, this adds 50k to the driver size. */
63 /* #define DM_CHECK_ADDR_0 */
64 
65 #define dm_read_reg(ctx, address)	\
66 		dm_read_reg_func(ctx, address, __func__)
67 
68 
69 
70 #define dm_write_reg(ctx, address, value)	\
71 	dm_write_reg_func(ctx, address, value, __func__)
72 
dm_write_reg_func(const struct dc_context * ctx,uint32_t address,uint32_t value,const char * func_name)73 static inline void dm_write_reg_func(
74 	const struct dc_context *ctx,
75 	uint32_t address,
76 	uint32_t value,
77 	const char *func_name)
78 {
79 #ifdef DM_CHECK_ADDR_0
80 	if (address == 0) {
81 		DC_ERR("invalid register write. address = 0");
82 		return;
83 	}
84 #endif
85 	cgs_write_register(ctx->cgs_device, address, value);
86 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
87 }
88 
dm_read_index_reg(const struct dc_context * ctx,enum cgs_ind_reg addr_space,uint32_t index)89 static inline uint32_t dm_read_index_reg(
90 	const struct dc_context *ctx,
91 	enum cgs_ind_reg addr_space,
92 	uint32_t index)
93 {
94 	return cgs_read_ind_register(ctx->cgs_device, addr_space, index);
95 }
96 
dm_write_index_reg(const struct dc_context * ctx,enum cgs_ind_reg addr_space,uint32_t index,uint32_t value)97 static inline void dm_write_index_reg(
98 	const struct dc_context *ctx,
99 	enum cgs_ind_reg addr_space,
100 	uint32_t index,
101 	uint32_t value)
102 {
103 	cgs_write_ind_register(ctx->cgs_device, addr_space, index, value);
104 }
105 
get_reg_field_value_ex(uint32_t reg_value,uint32_t mask,uint8_t shift)106 static inline uint32_t get_reg_field_value_ex(
107 	uint32_t reg_value,
108 	uint32_t mask,
109 	uint8_t shift)
110 {
111 	return (mask & reg_value) >> shift;
112 }
113 
114 #define get_reg_field_value(reg_value, reg_name, reg_field)\
115 	get_reg_field_value_ex(\
116 		(reg_value),\
117 		reg_name ## __ ## reg_field ## _MASK,\
118 		reg_name ## __ ## reg_field ## __SHIFT)
119 
set_reg_field_value_ex(uint32_t reg_value,uint32_t value,uint32_t mask,uint8_t shift)120 static inline uint32_t set_reg_field_value_ex(
121 	uint32_t reg_value,
122 	uint32_t value,
123 	uint32_t mask,
124 	uint8_t shift)
125 {
126 	ASSERT(mask != 0);
127 	return (reg_value & ~mask) | (mask & (value << shift));
128 }
129 
130 #define set_reg_field_value(reg_value, value, reg_name, reg_field)\
131 	(reg_value) = set_reg_field_value_ex(\
132 		(reg_value),\
133 		(value),\
134 		reg_name ## __ ## reg_field ## _MASK,\
135 		reg_name ## __ ## reg_field ## __SHIFT)
136 
137 uint32_t generic_reg_set_ex(const struct dc_context *ctx,
138 		uint32_t addr, uint32_t reg_val, int n,
139 		uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
140 
141 uint32_t generic_reg_update_ex(const struct dc_context *ctx,
142 		uint32_t addr, int n,
143 		uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
144 
145 struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub);
146 void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv);
147 
148 void reg_sequence_start_gather(const struct dc_context *ctx);
149 void reg_sequence_start_execute(const struct dc_context *ctx);
150 void reg_sequence_wait_done(const struct dc_context *ctx);
151 
152 #define FD(reg_field)	reg_field ## __SHIFT, \
153 						reg_field ## _MASK
154 
155 /*
156  * return number of poll before condition is met
157  * return 0 if condition is not meet after specified time out tries
158  */
159 void generic_reg_wait(const struct dc_context *ctx,
160 	uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value,
161 	unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
162 	const char *func_name, int line);
163 
164 unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...);
165 
166 /* These macros need to be used with soc15 registers in order to retrieve
167  * the actual offset.
168  */
169 #define dm_write_reg_soc15(ctx, reg, inst_offset, value)	\
170 		dm_write_reg_func(ctx, reg + DCE_BASE.instance[0].segment[reg##_BASE_IDX] + inst_offset, value, __func__)
171 
172 #define dm_read_reg_soc15(ctx, reg, inst_offset)	\
173 		dm_read_reg_func(ctx, reg + DCE_BASE.instance[0].segment[reg##_BASE_IDX] + inst_offset, __func__)
174 
175 #define generic_reg_update_soc15(ctx, inst_offset, reg_name, n, ...)\
176 		generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] +  mm##reg_name + inst_offset, \
177 		n, __VA_ARGS__)
178 
179 #define generic_reg_set_soc15(ctx, inst_offset, reg_name, n, ...)\
180 		generic_reg_set_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
181 		n, __VA_ARGS__)
182 
183 #define get_reg_field_value_soc15(reg_value, block, reg_num, reg_name, reg_field)\
184 	get_reg_field_value_ex(\
185 		(reg_value),\
186 		block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
187 		block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
188 
189 #define set_reg_field_value_soc15(reg_value, value, block, reg_num, reg_name, reg_field)\
190 	(reg_value) = set_reg_field_value_ex(\
191 		(reg_value),\
192 		(value),\
193 		block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
194 		block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
195 
196 /**************************************
197  * Power Play (PP) interfaces
198  **************************************/
199 
200 /* Gets valid clocks levels from pplib
201  *
202  * input: clk_type - display clk / sclk / mem clk
203  *
204  * output: array of valid clock levels for given type in ascending order,
205  * with invalid levels filtered out
206  *
207  */
208 bool dm_pp_get_clock_levels_by_type(
209 	const struct dc_context *ctx,
210 	enum dm_pp_clock_type clk_type,
211 	struct dm_pp_clock_levels *clk_level_info);
212 
213 bool dm_pp_get_clock_levels_by_type_with_latency(
214 	const struct dc_context *ctx,
215 	enum dm_pp_clock_type clk_type,
216 	struct dm_pp_clock_levels_with_latency *clk_level_info);
217 
218 bool dm_pp_get_clock_levels_by_type_with_voltage(
219 	const struct dc_context *ctx,
220 	enum dm_pp_clock_type clk_type,
221 	struct dm_pp_clock_levels_with_voltage *clk_level_info);
222 
223 bool dm_pp_notify_wm_clock_changes(
224 	const struct dc_context *ctx,
225 	struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
226 
227 void dm_pp_get_funcs(struct dc_context *ctx,
228 		struct pp_smu_funcs *funcs);
229 
230 /* DAL calls this function to notify PP about completion of Mode Set.
231  * For PP it means that current DCE clocks are those which were returned
232  * by dc_service_pp_pre_dce_clock_change(), in the 'output' parameter.
233  *
234  * If the clocks are higher than before, then PP does nothing.
235  *
236  * If the clocks are lower than before, then PP reduces the voltage.
237  *
238  * \returns	true - call is successful
239  *		false - call failed
240  */
241 bool dm_pp_apply_display_requirements(
242 	const struct dc_context *ctx,
243 	const struct dm_pp_display_configuration *pp_display_cfg);
244 
245 bool dm_pp_apply_power_level_change_request(
246 	const struct dc_context *ctx,
247 	struct dm_pp_power_level_change_request *level_change_req);
248 
249 bool dm_pp_apply_clock_for_voltage_request(
250 	const struct dc_context *ctx,
251 	struct dm_pp_clock_for_voltage_req *clock_for_voltage_req);
252 
253 bool dm_pp_get_static_clocks(
254 	const struct dc_context *ctx,
255 	struct dm_pp_static_clock_info *static_clk_info);
256 
257 /****** end of PP interfaces ******/
258 
259 struct persistent_data_flag {
260 	bool save_per_link;
261 	bool save_per_edid;
262 };
263 
264 bool dm_query_extended_brightness_caps
265 	(struct dc_context *ctx, enum dm_acpi_display_type display,
266 			struct dm_acpi_atif_backlight_caps *pCaps);
267 
268 bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
269 
270 /*
271  *
272  * print-out services
273  *
274  */
275 #define dm_log_to_buffer(buffer, size, fmt, args)\
276 	vsnprintf(buffer, size, fmt, args)
277 
dm_get_timestamp(struct dc_context * ctx)278 static inline unsigned long long dm_get_timestamp(struct dc_context *ctx)
279 {
280 	return ktime_get_raw_ns();
281 }
282 
283 unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
284 		unsigned long long current_time_stamp,
285 		unsigned long long last_time_stamp);
286 
287 /*
288  * performance tracing
289  */
290 #define PERF_TRACE()	trace_amdgpu_dc_performance(CTX->perf_trace->read_count,\
291 		CTX->perf_trace->write_count, &CTX->perf_trace->last_entry_read,\
292 		&CTX->perf_trace->last_entry_write, __func__, __LINE__)
293 #define PERF_TRACE_CTX(__CTX)	trace_amdgpu_dc_performance(__CTX->perf_trace->read_count,\
294 		__CTX->perf_trace->write_count, &__CTX->perf_trace->last_entry_read,\
295 		&__CTX->perf_trace->last_entry_write, __func__, __LINE__)
296 
297 
298 /*
299  * Debug and verification hooks
300  */
301 
302 void dm_dtn_log_begin(struct dc_context *ctx,
303 	struct dc_log_buffer_ctx *log_ctx);
304 void dm_dtn_log_append_v(struct dc_context *ctx,
305 	struct dc_log_buffer_ctx *log_ctx,
306 	const char *msg, ...);
307 void dm_dtn_log_end(struct dc_context *ctx,
308 	struct dc_log_buffer_ctx *log_ctx);
309 
310 #endif /* __DM_SERVICES_H__ */
311