• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* backing_ops.c - query/set operations on saved SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * These register operations allow SPUFS to operate on saved
7  * SPU contexts rather than hardware.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 #include <linux/errno.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/smp.h>
30 #include <linux/stddef.h>
31 #include <linux/unistd.h>
32 #include <linux/poll.h>
33 
34 #include <asm/io.h>
35 #include <asm/spu.h>
36 #include <asm/spu_csa.h>
37 #include <asm/spu_info.h>
38 #include <asm/mmu_context.h>
39 #include "spufs.h"
40 
41 /*
42  * Reads/writes to various problem and priv2 registers require
43  * state changes, i.e.  generate SPU events, modify channel
44  * counts, etc.
45  */
46 
gen_spu_event(struct spu_context * ctx,u32 event)47 static void gen_spu_event(struct spu_context *ctx, u32 event)
48 {
49 	u64 ch0_cnt;
50 	u64 ch0_data;
51 	u64 ch1_data;
52 
53 	ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
54 	ch0_data = ctx->csa.spu_chnldata_RW[0];
55 	ch1_data = ctx->csa.spu_chnldata_RW[1];
56 	ctx->csa.spu_chnldata_RW[0] |= event;
57 	if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
58 		ctx->csa.spu_chnlcnt_RW[0] = 1;
59 	}
60 }
61 
spu_backing_mbox_read(struct spu_context * ctx,u32 * data)62 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
63 {
64 	u32 mbox_stat;
65 	int ret = 0;
66 
67 	spin_lock(&ctx->csa.register_lock);
68 	mbox_stat = ctx->csa.prob.mb_stat_R;
69 	if (mbox_stat & 0x0000ff) {
70 		/* Read the first available word.
71 		 * Implementation note: the depth
72 		 * of pu_mb_R is currently 1.
73 		 */
74 		*data = ctx->csa.prob.pu_mb_R;
75 		ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
76 		ctx->csa.spu_chnlcnt_RW[28] = 1;
77 		gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
78 		ret = 4;
79 	}
80 	spin_unlock(&ctx->csa.register_lock);
81 	return ret;
82 }
83 
spu_backing_mbox_stat_read(struct spu_context * ctx)84 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
85 {
86 	return ctx->csa.prob.mb_stat_R;
87 }
88 
spu_backing_mbox_stat_poll(struct spu_context * ctx,unsigned int events)89 static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
90 					  unsigned int events)
91 {
92 	int ret;
93 	u32 stat;
94 
95 	ret = 0;
96 	spin_lock_irq(&ctx->csa.register_lock);
97 	stat = ctx->csa.prob.mb_stat_R;
98 
99 	/* if the requested event is there, return the poll
100 	   mask, otherwise enable the interrupt to get notified,
101 	   but first mark any pending interrupts as done so
102 	   we don't get woken up unnecessarily */
103 
104 	if (events & (POLLIN | POLLRDNORM)) {
105 		if (stat & 0xff0000)
106 			ret |= POLLIN | POLLRDNORM;
107 		else {
108 			ctx->csa.priv1.int_stat_class2_RW &=
109 				~CLASS2_MAILBOX_INTR;
110 			ctx->csa.priv1.int_mask_class2_RW |=
111 				CLASS2_ENABLE_MAILBOX_INTR;
112 		}
113 	}
114 	if (events & (POLLOUT | POLLWRNORM)) {
115 		if (stat & 0x00ff00)
116 			ret = POLLOUT | POLLWRNORM;
117 		else {
118 			ctx->csa.priv1.int_stat_class2_RW &=
119 				~CLASS2_MAILBOX_THRESHOLD_INTR;
120 			ctx->csa.priv1.int_mask_class2_RW |=
121 				CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
122 		}
123 	}
124 	spin_unlock_irq(&ctx->csa.register_lock);
125 	return ret;
126 }
127 
spu_backing_ibox_read(struct spu_context * ctx,u32 * data)128 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
129 {
130 	int ret;
131 
132 	spin_lock(&ctx->csa.register_lock);
133 	if (ctx->csa.prob.mb_stat_R & 0xff0000) {
134 		/* Read the first available word.
135 		 * Implementation note: the depth
136 		 * of puint_mb_R is currently 1.
137 		 */
138 		*data = ctx->csa.priv2.puint_mb_R;
139 		ctx->csa.prob.mb_stat_R &= ~(0xff0000);
140 		ctx->csa.spu_chnlcnt_RW[30] = 1;
141 		gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
142 		ret = 4;
143 	} else {
144 		/* make sure we get woken up by the interrupt */
145 		ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
146 		ret = 0;
147 	}
148 	spin_unlock(&ctx->csa.register_lock);
149 	return ret;
150 }
151 
spu_backing_wbox_write(struct spu_context * ctx,u32 data)152 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
153 {
154 	int ret;
155 
156 	spin_lock(&ctx->csa.register_lock);
157 	if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
158 		int slot = ctx->csa.spu_chnlcnt_RW[29];
159 		int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
160 
161 		/* We have space to write wbox_data.
162 		 * Implementation note: the depth
163 		 * of spu_mb_W is currently 4.
164 		 */
165 		BUG_ON(avail != (4 - slot));
166 		ctx->csa.spu_mailbox_data[slot] = data;
167 		ctx->csa.spu_chnlcnt_RW[29] = ++slot;
168 		ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
169 		ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
170 		gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
171 		ret = 4;
172 	} else {
173 		/* make sure we get woken up by the interrupt when space
174 		   becomes available */
175 		ctx->csa.priv1.int_mask_class2_RW |=
176 			CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
177 		ret = 0;
178 	}
179 	spin_unlock(&ctx->csa.register_lock);
180 	return ret;
181 }
182 
spu_backing_signal1_read(struct spu_context * ctx)183 static u32 spu_backing_signal1_read(struct spu_context *ctx)
184 {
185 	return ctx->csa.spu_chnldata_RW[3];
186 }
187 
spu_backing_signal1_write(struct spu_context * ctx,u32 data)188 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
189 {
190 	spin_lock(&ctx->csa.register_lock);
191 	if (ctx->csa.priv2.spu_cfg_RW & 0x1)
192 		ctx->csa.spu_chnldata_RW[3] |= data;
193 	else
194 		ctx->csa.spu_chnldata_RW[3] = data;
195 	ctx->csa.spu_chnlcnt_RW[3] = 1;
196 	gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
197 	spin_unlock(&ctx->csa.register_lock);
198 }
199 
spu_backing_signal2_read(struct spu_context * ctx)200 static u32 spu_backing_signal2_read(struct spu_context *ctx)
201 {
202 	return ctx->csa.spu_chnldata_RW[4];
203 }
204 
spu_backing_signal2_write(struct spu_context * ctx,u32 data)205 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
206 {
207 	spin_lock(&ctx->csa.register_lock);
208 	if (ctx->csa.priv2.spu_cfg_RW & 0x2)
209 		ctx->csa.spu_chnldata_RW[4] |= data;
210 	else
211 		ctx->csa.spu_chnldata_RW[4] = data;
212 	ctx->csa.spu_chnlcnt_RW[4] = 1;
213 	gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
214 	spin_unlock(&ctx->csa.register_lock);
215 }
216 
spu_backing_signal1_type_set(struct spu_context * ctx,u64 val)217 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
218 {
219 	u64 tmp;
220 
221 	spin_lock(&ctx->csa.register_lock);
222 	tmp = ctx->csa.priv2.spu_cfg_RW;
223 	if (val)
224 		tmp |= 1;
225 	else
226 		tmp &= ~1;
227 	ctx->csa.priv2.spu_cfg_RW = tmp;
228 	spin_unlock(&ctx->csa.register_lock);
229 }
230 
spu_backing_signal1_type_get(struct spu_context * ctx)231 static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
232 {
233 	return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
234 }
235 
spu_backing_signal2_type_set(struct spu_context * ctx,u64 val)236 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
237 {
238 	u64 tmp;
239 
240 	spin_lock(&ctx->csa.register_lock);
241 	tmp = ctx->csa.priv2.spu_cfg_RW;
242 	if (val)
243 		tmp |= 2;
244 	else
245 		tmp &= ~2;
246 	ctx->csa.priv2.spu_cfg_RW = tmp;
247 	spin_unlock(&ctx->csa.register_lock);
248 }
249 
spu_backing_signal2_type_get(struct spu_context * ctx)250 static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
251 {
252 	return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
253 }
254 
spu_backing_npc_read(struct spu_context * ctx)255 static u32 spu_backing_npc_read(struct spu_context *ctx)
256 {
257 	return ctx->csa.prob.spu_npc_RW;
258 }
259 
spu_backing_npc_write(struct spu_context * ctx,u32 val)260 static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
261 {
262 	ctx->csa.prob.spu_npc_RW = val;
263 }
264 
spu_backing_status_read(struct spu_context * ctx)265 static u32 spu_backing_status_read(struct spu_context *ctx)
266 {
267 	return ctx->csa.prob.spu_status_R;
268 }
269 
spu_backing_get_ls(struct spu_context * ctx)270 static char *spu_backing_get_ls(struct spu_context *ctx)
271 {
272 	return ctx->csa.lscsa->ls;
273 }
274 
spu_backing_privcntl_write(struct spu_context * ctx,u64 val)275 static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
276 {
277 	ctx->csa.priv2.spu_privcntl_RW = val;
278 }
279 
spu_backing_runcntl_read(struct spu_context * ctx)280 static u32 spu_backing_runcntl_read(struct spu_context *ctx)
281 {
282 	return ctx->csa.prob.spu_runcntl_RW;
283 }
284 
spu_backing_runcntl_write(struct spu_context * ctx,u32 val)285 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
286 {
287 	spin_lock(&ctx->csa.register_lock);
288 	ctx->csa.prob.spu_runcntl_RW = val;
289 	if (val & SPU_RUNCNTL_RUNNABLE) {
290 		ctx->csa.prob.spu_status_R &=
291 			~SPU_STATUS_STOPPED_BY_STOP &
292 			~SPU_STATUS_STOPPED_BY_HALT &
293 			~SPU_STATUS_SINGLE_STEP &
294 			~SPU_STATUS_INVALID_INSTR &
295 			~SPU_STATUS_INVALID_CH;
296 		ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
297 	} else {
298 		ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
299 	}
300 	spin_unlock(&ctx->csa.register_lock);
301 }
302 
spu_backing_runcntl_stop(struct spu_context * ctx)303 static void spu_backing_runcntl_stop(struct spu_context *ctx)
304 {
305 	spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
306 }
307 
spu_backing_master_start(struct spu_context * ctx)308 static void spu_backing_master_start(struct spu_context *ctx)
309 {
310 	struct spu_state *csa = &ctx->csa;
311 	u64 sr1;
312 
313 	spin_lock(&csa->register_lock);
314 	sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
315 	csa->priv1.mfc_sr1_RW = sr1;
316 	spin_unlock(&csa->register_lock);
317 }
318 
spu_backing_master_stop(struct spu_context * ctx)319 static void spu_backing_master_stop(struct spu_context *ctx)
320 {
321 	struct spu_state *csa = &ctx->csa;
322 	u64 sr1;
323 
324 	spin_lock(&csa->register_lock);
325 	sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
326 	csa->priv1.mfc_sr1_RW = sr1;
327 	spin_unlock(&csa->register_lock);
328 }
329 
spu_backing_set_mfc_query(struct spu_context * ctx,u32 mask,u32 mode)330 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
331 					u32 mode)
332 {
333 	struct spu_problem_collapsed *prob = &ctx->csa.prob;
334 	int ret;
335 
336 	spin_lock(&ctx->csa.register_lock);
337 	ret = -EAGAIN;
338 	if (prob->dma_querytype_RW)
339 		goto out;
340 	ret = 0;
341 	/* FIXME: what are the side-effects of this? */
342 	prob->dma_querymask_RW = mask;
343 	prob->dma_querytype_RW = mode;
344 	/* In the current implementation, the SPU context is always
345 	 * acquired in runnable state when new bits are added to the
346 	 * mask (tagwait), so it's sufficient just to mask
347 	 * dma_tagstatus_R with the 'mask' parameter here.
348 	 */
349 	ctx->csa.prob.dma_tagstatus_R &= mask;
350 out:
351 	spin_unlock(&ctx->csa.register_lock);
352 
353 	return ret;
354 }
355 
spu_backing_read_mfc_tagstatus(struct spu_context * ctx)356 static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
357 {
358 	return ctx->csa.prob.dma_tagstatus_R;
359 }
360 
spu_backing_get_mfc_free_elements(struct spu_context * ctx)361 static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
362 {
363 	return ctx->csa.prob.dma_qstatus_R;
364 }
365 
spu_backing_send_mfc_command(struct spu_context * ctx,struct mfc_dma_command * cmd)366 static int spu_backing_send_mfc_command(struct spu_context *ctx,
367 					struct mfc_dma_command *cmd)
368 {
369 	int ret;
370 
371 	spin_lock(&ctx->csa.register_lock);
372 	ret = -EAGAIN;
373 	/* FIXME: set up priv2->puq */
374 	spin_unlock(&ctx->csa.register_lock);
375 
376 	return ret;
377 }
378 
spu_backing_restart_dma(struct spu_context * ctx)379 static void spu_backing_restart_dma(struct spu_context *ctx)
380 {
381 	ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
382 }
383 
384 struct spu_context_ops spu_backing_ops = {
385 	.mbox_read = spu_backing_mbox_read,
386 	.mbox_stat_read = spu_backing_mbox_stat_read,
387 	.mbox_stat_poll = spu_backing_mbox_stat_poll,
388 	.ibox_read = spu_backing_ibox_read,
389 	.wbox_write = spu_backing_wbox_write,
390 	.signal1_read = spu_backing_signal1_read,
391 	.signal1_write = spu_backing_signal1_write,
392 	.signal2_read = spu_backing_signal2_read,
393 	.signal2_write = spu_backing_signal2_write,
394 	.signal1_type_set = spu_backing_signal1_type_set,
395 	.signal1_type_get = spu_backing_signal1_type_get,
396 	.signal2_type_set = spu_backing_signal2_type_set,
397 	.signal2_type_get = spu_backing_signal2_type_get,
398 	.npc_read = spu_backing_npc_read,
399 	.npc_write = spu_backing_npc_write,
400 	.status_read = spu_backing_status_read,
401 	.get_ls = spu_backing_get_ls,
402 	.privcntl_write = spu_backing_privcntl_write,
403 	.runcntl_read = spu_backing_runcntl_read,
404 	.runcntl_write = spu_backing_runcntl_write,
405 	.runcntl_stop = spu_backing_runcntl_stop,
406 	.master_start = spu_backing_master_start,
407 	.master_stop = spu_backing_master_stop,
408 	.set_mfc_query = spu_backing_set_mfc_query,
409 	.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
410 	.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
411 	.send_mfc_command = spu_backing_send_mfc_command,
412 	.restart_dma = spu_backing_restart_dma,
413 };
414