• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 
20 #include "mdp5_kms.h"
21 #include "mdp5_smp.h"
22 
23 
24 /* SMP - Shared Memory Pool
25  *
26  * These are shared between all the clients, where each plane in a
27  * scanout buffer is a SMP client.  Ie. scanout of 3 plane I420 on
28  * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
29  *
30  * Based on the size of the attached scanout buffer, a certain # of
31  * blocks must be allocated to that client out of the shared pool.
32  *
33  * In some hw, some blocks are statically allocated for certain pipes
34  * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35  *
36  * For each block that can be dynamically allocated, it can be either
37  *     free:
38  *     The block is free.
39  *
40  *     pending:
41  *     The block is allocated to some client and not free.
42  *
43  *     configured:
44  *     The block is allocated to some client, and assigned to that
45  *     client in MDP5_SMP_ALLOC registers.
46  *
47  *     inuse:
48  *     The block is being actively used by a client.
49  *
50  * The updates happen in the following steps:
51  *
52  *  1) mdp5_smp_request():
53  *     When plane scanout is setup, calculate required number of
54  *     blocks needed per client, and request. Blocks neither inuse nor
55  *     configured nor pending by any other client are added to client's
56  *     pending set.
57  *     For shrinking, blocks in pending but not in configured can be freed
58  *     directly, but those already in configured will be freed later by
59  *     mdp5_smp_commit.
60  *
61  *  2) mdp5_smp_configure():
62  *     As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
63  *     are configured for the union(pending, inuse)
64  *     Current pending is copied to configured.
65  *     It is assumed that mdp5_smp_request and mdp5_smp_configure not run
66  *     concurrently for the same pipe.
67  *
68  *  3) mdp5_smp_commit():
69  *     After next vblank, copy configured -> inuse.  Optionally update
70  *     MDP5_SMP_ALLOC registers if there are newly unused blocks
71  *
72  *  4) mdp5_smp_release():
73  *     Must be called after the pipe is disabled and no longer uses any SMB
74  *
75  * On the next vblank after changes have been committed to hw, the
76  * client's pending blocks become it's in-use blocks (and no-longer
77  * in-use blocks become available to other clients).
78  *
79  * btw, hurray for confusing overloaded acronyms!  :-/
80  *
81  * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
82  * should happen at (or before)? atomic->check().  And we'd need
83  * an API to discard previous requests if update is aborted or
84  * (test-only).
85  *
86  * TODO would perhaps be nice to have debugfs to dump out kernel
87  * inuse and pending state of all clients..
88  */
89 
90 struct mdp5_smp {
91 	struct drm_device *dev;
92 
93 	uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
94 
95 	int blk_cnt;
96 	int blk_size;
97 
98 	spinlock_t state_lock;
99 	mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
100 
101 	struct mdp5_client_smp_state client_state[MAX_CLIENTS];
102 };
103 
104 static void update_smp_state(struct mdp5_smp *smp,
105 		u32 cid, mdp5_smp_state_t *assigned);
106 
107 static inline
get_kms(struct mdp5_smp * smp)108 struct mdp5_kms *get_kms(struct mdp5_smp *smp)
109 {
110 	struct msm_drm_private *priv = smp->dev->dev_private;
111 
112 	return to_mdp5_kms(to_mdp_kms(priv->kms));
113 }
114 
pipe2client(enum mdp5_pipe pipe,int plane)115 static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
116 {
117 #define CID_UNUSED	0
118 
119 	if (WARN_ON(plane >= pipe2nclients(pipe)))
120 		return CID_UNUSED;
121 
122 	/*
123 	 * Note on SMP clients:
124 	 * For ViG pipes, fetch Y/Cr/Cb-components clients are always
125 	 * consecutive, and in that order.
126 	 *
127 	 * e.g.:
128 	 * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
129 	 *	Y  plane's client ID is N
130 	 *	Cr plane's client ID is N + 1
131 	 *	Cb plane's client ID is N + 2
132 	 */
133 
134 	return mdp5_cfg->smp.clients[pipe] + plane;
135 }
136 
137 /* step #1: update # of blocks pending for the client: */
smp_request_block(struct mdp5_smp * smp,u32 cid,int nblks)138 static int smp_request_block(struct mdp5_smp *smp,
139 		u32 cid, int nblks)
140 {
141 	struct mdp5_kms *mdp5_kms = get_kms(smp);
142 	struct mdp5_client_smp_state *ps = &smp->client_state[cid];
143 	int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
144 	uint8_t reserved;
145 	unsigned long flags;
146 
147 	reserved = smp->reserved[cid];
148 
149 	spin_lock_irqsave(&smp->state_lock, flags);
150 
151 	if (reserved) {
152 		nblks = max(0, nblks - reserved);
153 		DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
154 	}
155 
156 	avail = cnt - bitmap_weight(smp->state, cnt);
157 	if (nblks > avail) {
158 		dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
159 				nblks, avail);
160 		ret = -ENOSPC;
161 		goto fail;
162 	}
163 
164 	cur_nblks = bitmap_weight(ps->pending, cnt);
165 	if (nblks > cur_nblks) {
166 		/* grow the existing pending reservation: */
167 		for (i = cur_nblks; i < nblks; i++) {
168 			int blk = find_first_zero_bit(smp->state, cnt);
169 			set_bit(blk, ps->pending);
170 			set_bit(blk, smp->state);
171 		}
172 	} else {
173 		/* shrink the existing pending reservation: */
174 		for (i = cur_nblks; i > nblks; i--) {
175 			int blk = find_first_bit(ps->pending, cnt);
176 			clear_bit(blk, ps->pending);
177 
178 			/* clear in global smp_state if not in configured
179 			 * otherwise until _commit()
180 			 */
181 			if (!test_bit(blk, ps->configured))
182 				clear_bit(blk, smp->state);
183 		}
184 	}
185 
186 fail:
187 	spin_unlock_irqrestore(&smp->state_lock, flags);
188 	return 0;
189 }
190 
set_fifo_thresholds(struct mdp5_smp * smp,enum mdp5_pipe pipe,int nblks)191 static void set_fifo_thresholds(struct mdp5_smp *smp,
192 		enum mdp5_pipe pipe, int nblks)
193 {
194 	struct mdp5_kms *mdp5_kms = get_kms(smp);
195 	u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
196 	u32 val;
197 
198 	/* 1/4 of SMP pool that is being fetched */
199 	val = (nblks * smp_entries_per_blk) / 4;
200 
201 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
202 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
203 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
204 }
205 
206 /*
207  * NOTE: looks like if horizontal decimation is used (if we supported that)
208  * then the width used to calculate SMP block requirements is the post-
209  * decimated width.  Ie. SMP buffering sits downstream of decimation (which
210  * presumably happens during the dma from scanout buffer).
211  */
mdp5_smp_request(struct mdp5_smp * smp,enum mdp5_pipe pipe,const struct mdp_format * format,u32 width,bool hdecim)212 int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
213 		const struct mdp_format *format, u32 width, bool hdecim)
214 {
215 	struct mdp5_kms *mdp5_kms = get_kms(smp);
216 	struct drm_device *dev = mdp5_kms->dev;
217 	int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
218 	int i, hsub, nplanes, nlines, nblks, ret;
219 	u32 fmt = format->base.pixel_format;
220 
221 	nplanes = drm_format_num_planes(fmt);
222 	hsub = drm_format_horz_chroma_subsampling(fmt);
223 
224 	/* different if BWC (compressed framebuffer?) enabled: */
225 	nlines = 2;
226 
227 	/* Newer MDPs have split/packing logic, which fetches sub-sampled
228 	 * U and V components (splits them from Y if necessary) and packs
229 	 * them together, writes to SMP using a single client.
230 	 */
231 	if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
232 		fmt = DRM_FORMAT_NV24;
233 		nplanes = 2;
234 
235 		/* if decimation is enabled, HW decimates less on the
236 		 * sub sampled chroma components
237 		 */
238 		if (hdecim && (hsub > 1))
239 			hsub = 1;
240 	}
241 
242 	for (i = 0, nblks = 0; i < nplanes; i++) {
243 		int n, fetch_stride, cpp;
244 
245 		cpp = drm_format_plane_cpp(fmt, i);
246 		fetch_stride = width * cpp / (i ? hsub : 1);
247 
248 		n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
249 
250 		/* for hw rev v1.00 */
251 		if (rev == 0)
252 			n = roundup_pow_of_two(n);
253 
254 		DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
255 		ret = smp_request_block(smp, pipe2client(pipe, i), n);
256 		if (ret) {
257 			dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
258 					n, ret);
259 			return ret;
260 		}
261 
262 		nblks += n;
263 	}
264 
265 	set_fifo_thresholds(smp, pipe, nblks);
266 
267 	return 0;
268 }
269 
270 /* Release SMP blocks for all clients of the pipe */
mdp5_smp_release(struct mdp5_smp * smp,enum mdp5_pipe pipe)271 void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
272 {
273 	int i;
274 	unsigned long flags;
275 	int cnt = smp->blk_cnt;
276 
277 	for (i = 0; i < pipe2nclients(pipe); i++) {
278 		mdp5_smp_state_t assigned;
279 		u32 cid = pipe2client(pipe, i);
280 		struct mdp5_client_smp_state *ps = &smp->client_state[cid];
281 
282 		spin_lock_irqsave(&smp->state_lock, flags);
283 
284 		/* clear hw assignment */
285 		bitmap_or(assigned, ps->inuse, ps->configured, cnt);
286 		update_smp_state(smp, CID_UNUSED, &assigned);
287 
288 		/* free to global pool */
289 		bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
290 		bitmap_andnot(smp->state, smp->state, assigned, cnt);
291 
292 		/* clear client's infor */
293 		bitmap_zero(ps->pending, cnt);
294 		bitmap_zero(ps->configured, cnt);
295 		bitmap_zero(ps->inuse, cnt);
296 
297 		spin_unlock_irqrestore(&smp->state_lock, flags);
298 	}
299 
300 	set_fifo_thresholds(smp, pipe, 0);
301 }
302 
update_smp_state(struct mdp5_smp * smp,u32 cid,mdp5_smp_state_t * assigned)303 static void update_smp_state(struct mdp5_smp *smp,
304 		u32 cid, mdp5_smp_state_t *assigned)
305 {
306 	struct mdp5_kms *mdp5_kms = get_kms(smp);
307 	int cnt = smp->blk_cnt;
308 	u32 blk, val;
309 
310 	for_each_set_bit(blk, *assigned, cnt) {
311 		int idx = blk / 3;
312 		int fld = blk % 3;
313 
314 		val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
315 
316 		switch (fld) {
317 		case 0:
318 			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
319 			val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
320 			break;
321 		case 1:
322 			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
323 			val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
324 			break;
325 		case 2:
326 			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
327 			val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
328 			break;
329 		}
330 
331 		mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
332 		mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
333 	}
334 }
335 
336 /* step #2: configure hw for union(pending, inuse): */
mdp5_smp_configure(struct mdp5_smp * smp,enum mdp5_pipe pipe)337 void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
338 {
339 	int cnt = smp->blk_cnt;
340 	mdp5_smp_state_t assigned;
341 	int i;
342 
343 	for (i = 0; i < pipe2nclients(pipe); i++) {
344 		u32 cid = pipe2client(pipe, i);
345 		struct mdp5_client_smp_state *ps = &smp->client_state[cid];
346 
347 		/*
348 		 * if vblank has not happened since last smp_configure
349 		 * skip the configure for now
350 		 */
351 		if (!bitmap_equal(ps->inuse, ps->configured, cnt))
352 			continue;
353 
354 		bitmap_copy(ps->configured, ps->pending, cnt);
355 		bitmap_or(assigned, ps->inuse, ps->configured, cnt);
356 		update_smp_state(smp, cid, &assigned);
357 	}
358 }
359 
360 /* step #3: after vblank, copy configured -> inuse: */
mdp5_smp_commit(struct mdp5_smp * smp,enum mdp5_pipe pipe)361 void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
362 {
363 	int cnt = smp->blk_cnt;
364 	mdp5_smp_state_t released;
365 	int i;
366 
367 	for (i = 0; i < pipe2nclients(pipe); i++) {
368 		u32 cid = pipe2client(pipe, i);
369 		struct mdp5_client_smp_state *ps = &smp->client_state[cid];
370 
371 		/*
372 		 * Figure out if there are any blocks we where previously
373 		 * using, which can be released and made available to other
374 		 * clients:
375 		 */
376 		if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
377 			unsigned long flags;
378 
379 			spin_lock_irqsave(&smp->state_lock, flags);
380 			/* clear released blocks: */
381 			bitmap_andnot(smp->state, smp->state, released, cnt);
382 			spin_unlock_irqrestore(&smp->state_lock, flags);
383 
384 			update_smp_state(smp, CID_UNUSED, &released);
385 		}
386 
387 		bitmap_copy(ps->inuse, ps->configured, cnt);
388 	}
389 }
390 
mdp5_smp_destroy(struct mdp5_smp * smp)391 void mdp5_smp_destroy(struct mdp5_smp *smp)
392 {
393 	kfree(smp);
394 }
395 
mdp5_smp_init(struct drm_device * dev,const struct mdp5_smp_block * cfg)396 struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
397 {
398 	struct mdp5_smp *smp = NULL;
399 	int ret;
400 
401 	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
402 	if (unlikely(!smp)) {
403 		ret = -ENOMEM;
404 		goto fail;
405 	}
406 
407 	smp->dev = dev;
408 	smp->blk_cnt = cfg->mmb_count;
409 	smp->blk_size = cfg->mmb_size;
410 
411 	/* statically tied MMBs cannot be re-allocated: */
412 	bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
413 	memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
414 	spin_lock_init(&smp->state_lock);
415 
416 	return smp;
417 fail:
418 	if (smp)
419 		mdp5_smp_destroy(smp);
420 
421 	return ERR_PTR(ret);
422 }
423