• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * proc.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Processor interface at the driver level.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18 
19 #include <linux/types.h>
20 /* ------------------------------------ Host OS */
21 #include <linux/dma-mapping.h>
22 #include <linux/scatterlist.h>
23 #include <dspbridge/host_os.h>
24 
25 /*  ----------------------------------- DSP/BIOS Bridge */
26 #include <dspbridge/dbdefs.h>
27 
28 /*  ----------------------------------- OS Adaptation Layer */
29 #include <dspbridge/ntfy.h>
30 #include <dspbridge/sync.h>
31 /*  ----------------------------------- Bridge Driver */
32 #include <dspbridge/dspdefs.h>
33 #include <dspbridge/dspdeh.h>
34 /*  ----------------------------------- Platform Manager */
35 #include <dspbridge/cod.h>
36 #include <dspbridge/dev.h>
37 #include <dspbridge/procpriv.h>
38 #include <dspbridge/dmm.h>
39 
40 /*  ----------------------------------- Resource Manager */
41 #include <dspbridge/mgr.h>
42 #include <dspbridge/node.h>
43 #include <dspbridge/nldr.h>
44 #include <dspbridge/rmm.h>
45 
46 /*  ----------------------------------- Others */
47 #include <dspbridge/dbdcd.h>
48 #include <dspbridge/msg.h>
49 #include <dspbridge/dspioctl.h>
50 #include <dspbridge/drv.h>
51 
52 /*  ----------------------------------- This */
53 #include <dspbridge/proc.h>
54 #include <dspbridge/pwr.h>
55 
56 #include <dspbridge/resourcecleanup.h>
57 /*  ----------------------------------- Defines, Data Structures, Typedefs */
58 #define MAXCMDLINELEN       255
59 #define PROC_ENVPROCID      "PROC_ID=%d"
60 #define MAXPROCIDLEN	(8 + 5)
61 #define PROC_DFLT_TIMEOUT   10000	/* Time out in milliseconds */
62 #define PWR_TIMEOUT	 500	/* Sleep/wake timout in msec */
63 #define EXTEND	      "_EXT_END"	/* Extmem end addr in DSP binary */
64 
65 #define DSP_CACHE_LINE 128
66 
67 #define BUFMODE_MASK	(3 << 14)
68 
69 /* Buffer modes from DSP perspective */
70 #define RBUF		0x4000		/* Input buffer */
71 #define WBUF		0x8000		/* Output Buffer */
72 
73 extern struct device *bridge;
74 
75 /*  ----------------------------------- Globals */
76 
77 /* The proc_object structure. */
78 struct proc_object {
79 	struct list_head link;	/* Link to next proc_object */
80 	struct dev_object *dev_obj;	/* Device this PROC represents */
81 	u32 process;		/* Process owning this Processor */
82 	struct mgr_object *mgr_obj;	/* Manager Object Handle */
83 	u32 attach_count;	/* Processor attach count */
84 	u32 processor_id;	/* Processor number */
85 	u32 timeout;		/* Time out count */
86 	enum dsp_procstate proc_state;	/* Processor state */
87 	u32 unit;		/* DDSP unit number */
88 	bool is_already_attached;	/*
89 					 * True if the Device below has
90 					 * GPP Client attached
91 					 */
92 	struct ntfy_object *ntfy_obj;	/* Manages  notifications */
93 	/* Bridge Context Handle */
94 	struct bridge_dev_context *bridge_context;
95 	/* Function interface to Bridge driver */
96 	struct bridge_drv_interface *intf_fxns;
97 	char *last_coff;
98 	struct list_head proc_list;
99 };
100 
101 DEFINE_MUTEX(proc_lock);	/* For critical sections */
102 
103 /*  ----------------------------------- Function Prototypes */
104 static int proc_monitor(struct proc_object *proc_obj);
105 static s32 get_envp_count(char **envp);
106 static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
107 			   s32 cnew_envp, char *sz_var);
108 
109 /* remember mapping information */
add_mapping_info(struct process_context * pr_ctxt,u32 mpu_addr,u32 dsp_addr,u32 size)110 static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
111 				u32 mpu_addr, u32 dsp_addr, u32 size)
112 {
113 	struct dmm_map_object *map_obj;
114 
115 	u32 num_usr_pgs = size / PG_SIZE4K;
116 
117 	pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
118 						__func__, mpu_addr,
119 						dsp_addr, size);
120 
121 	map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
122 	if (!map_obj)
123 		return NULL;
124 
125 	INIT_LIST_HEAD(&map_obj->link);
126 
127 	map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
128 				 GFP_KERNEL);
129 	if (!map_obj->pages) {
130 		kfree(map_obj);
131 		return NULL;
132 	}
133 
134 	map_obj->mpu_addr = mpu_addr;
135 	map_obj->dsp_addr = dsp_addr;
136 	map_obj->size = size;
137 	map_obj->num_usr_pgs = num_usr_pgs;
138 
139 	spin_lock(&pr_ctxt->dmm_map_lock);
140 	list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
141 	spin_unlock(&pr_ctxt->dmm_map_lock);
142 
143 	return map_obj;
144 }
145 
match_exact_map_obj(struct dmm_map_object * map_obj,u32 dsp_addr,u32 size)146 static int match_exact_map_obj(struct dmm_map_object *map_obj,
147 					u32 dsp_addr, u32 size)
148 {
149 	if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
150 		pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
151 				__func__, dsp_addr, map_obj->size, size);
152 
153 	return map_obj->dsp_addr == dsp_addr &&
154 		map_obj->size == size;
155 }
156 
remove_mapping_information(struct process_context * pr_ctxt,u32 dsp_addr,u32 size)157 static void remove_mapping_information(struct process_context *pr_ctxt,
158 						u32 dsp_addr, u32 size)
159 {
160 	struct dmm_map_object *map_obj;
161 
162 	pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
163 							dsp_addr, size);
164 
165 	spin_lock(&pr_ctxt->dmm_map_lock);
166 	list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
167 		pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
168 							__func__,
169 							map_obj->mpu_addr,
170 							map_obj->dsp_addr,
171 							map_obj->size);
172 
173 		if (match_exact_map_obj(map_obj, dsp_addr, size)) {
174 			pr_debug("%s: match, deleting map info\n", __func__);
175 			list_del(&map_obj->link);
176 			kfree(map_obj->dma_info.sg);
177 			kfree(map_obj->pages);
178 			kfree(map_obj);
179 			goto out;
180 		}
181 		pr_debug("%s: candidate didn't match\n", __func__);
182 	}
183 
184 	pr_err("%s: failed to find given map info\n", __func__);
185 out:
186 	spin_unlock(&pr_ctxt->dmm_map_lock);
187 }
188 
match_containing_map_obj(struct dmm_map_object * map_obj,u32 mpu_addr,u32 size)189 static int match_containing_map_obj(struct dmm_map_object *map_obj,
190 					u32 mpu_addr, u32 size)
191 {
192 	u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
193 
194 	return mpu_addr >= map_obj->mpu_addr &&
195 		mpu_addr + size <= map_obj_end;
196 }
197 
find_containing_mapping(struct process_context * pr_ctxt,u32 mpu_addr,u32 size)198 static struct dmm_map_object *find_containing_mapping(
199 				struct process_context *pr_ctxt,
200 				u32 mpu_addr, u32 size)
201 {
202 	struct dmm_map_object *map_obj;
203 	pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
204 						mpu_addr, size);
205 
206 	spin_lock(&pr_ctxt->dmm_map_lock);
207 	list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
208 		pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
209 						__func__,
210 						map_obj->mpu_addr,
211 						map_obj->dsp_addr,
212 						map_obj->size);
213 		if (match_containing_map_obj(map_obj, mpu_addr, size)) {
214 			pr_debug("%s: match!\n", __func__);
215 			goto out;
216 		}
217 
218 		pr_debug("%s: no match!\n", __func__);
219 	}
220 
221 	map_obj = NULL;
222 out:
223 	spin_unlock(&pr_ctxt->dmm_map_lock);
224 	return map_obj;
225 }
226 
find_first_page_in_cache(struct dmm_map_object * map_obj,unsigned long mpu_addr)227 static int find_first_page_in_cache(struct dmm_map_object *map_obj,
228 					unsigned long mpu_addr)
229 {
230 	u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
231 	u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
232 	int pg_index = requested_base_page - mapped_base_page;
233 
234 	if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
235 		pr_err("%s: failed (got %d)\n", __func__, pg_index);
236 		return -1;
237 	}
238 
239 	pr_debug("%s: first page is %d\n", __func__, pg_index);
240 	return pg_index;
241 }
242 
get_mapping_page(struct dmm_map_object * map_obj,int pg_i)243 static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
244 								int pg_i)
245 {
246 	pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
247 					pg_i, map_obj->num_usr_pgs);
248 
249 	if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
250 		pr_err("%s: requested pg_i %d is out of mapped range\n",
251 				__func__, pg_i);
252 		return NULL;
253 	}
254 
255 	return map_obj->pages[pg_i];
256 }
257 
258 /*
259  *  ======== proc_attach ========
260  *  Purpose:
261  *      Prepare for communication with a particular DSP processor, and return
262  *      a handle to the processor object.
263  */
264 int
proc_attach(u32 processor_id,const struct dsp_processorattrin * attr_in,void ** ph_processor,struct process_context * pr_ctxt)265 proc_attach(u32 processor_id,
266 	    const struct dsp_processorattrin *attr_in,
267 	    void **ph_processor, struct process_context *pr_ctxt)
268 {
269 	int status = 0;
270 	struct dev_object *hdev_obj;
271 	struct proc_object *p_proc_object = NULL;
272 	struct mgr_object *hmgr_obj = NULL;
273 	struct drv_object *hdrv_obj = NULL;
274 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
275 	u8 dev_type;
276 
277 	if (pr_ctxt->processor) {
278 		*ph_processor = pr_ctxt->processor;
279 		return status;
280 	}
281 
282 	/* Get the Driver and Manager Object Handles */
283 	if (!drv_datap || !drv_datap->drv_object || !drv_datap->mgr_object) {
284 		status = -ENODATA;
285 		pr_err("%s: Failed to get object handles\n", __func__);
286 	} else {
287 		hdrv_obj = drv_datap->drv_object;
288 		hmgr_obj = drv_datap->mgr_object;
289 	}
290 
291 	if (!status) {
292 		/* Get the Device Object */
293 		status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
294 	}
295 	if (!status)
296 		status = dev_get_dev_type(hdev_obj, &dev_type);
297 
298 	if (status)
299 		goto func_end;
300 
301 	/* If we made it this far, create the Processor object: */
302 	p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
303 	/* Fill out the Processor Object: */
304 	if (p_proc_object == NULL) {
305 		status = -ENOMEM;
306 		goto func_end;
307 	}
308 	p_proc_object->dev_obj = hdev_obj;
309 	p_proc_object->mgr_obj = hmgr_obj;
310 	p_proc_object->processor_id = dev_type;
311 	/* Store TGID instead of process handle */
312 	p_proc_object->process = current->tgid;
313 
314 	INIT_LIST_HEAD(&p_proc_object->proc_list);
315 
316 	if (attr_in)
317 		p_proc_object->timeout = attr_in->timeout;
318 	else
319 		p_proc_object->timeout = PROC_DFLT_TIMEOUT;
320 
321 	status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
322 	if (!status) {
323 		status = dev_get_bridge_context(hdev_obj,
324 					     &p_proc_object->bridge_context);
325 		if (status)
326 			kfree(p_proc_object);
327 	} else
328 		kfree(p_proc_object);
329 
330 	if (status)
331 		goto func_end;
332 
333 	/* Create the Notification Object */
334 	/* This is created with no event mask, no notify mask
335 	 * and no valid handle to the notification. They all get
336 	 * filled up when proc_register_notify is called */
337 	p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
338 							GFP_KERNEL);
339 	if (p_proc_object->ntfy_obj)
340 		ntfy_init(p_proc_object->ntfy_obj);
341 	else
342 		status = -ENOMEM;
343 
344 	if (!status) {
345 		/* Insert the Processor Object into the DEV List.
346 		 * Return handle to this Processor Object:
347 		 * Find out if the Device is already attached to a
348 		 * Processor. If so, return AlreadyAttached status */
349 		status = dev_insert_proc_object(p_proc_object->dev_obj,
350 						(u32) p_proc_object,
351 						&p_proc_object->
352 						is_already_attached);
353 		if (!status) {
354 			if (p_proc_object->is_already_attached)
355 				status = 0;
356 		} else {
357 			if (p_proc_object->ntfy_obj) {
358 				ntfy_delete(p_proc_object->ntfy_obj);
359 				kfree(p_proc_object->ntfy_obj);
360 			}
361 
362 			kfree(p_proc_object);
363 		}
364 		if (!status) {
365 			*ph_processor = (void *)p_proc_object;
366 			pr_ctxt->processor = *ph_processor;
367 			(void)proc_notify_clients(p_proc_object,
368 						  DSP_PROCESSORATTACH);
369 		}
370 	} else {
371 		/* Don't leak memory if status is failed */
372 		kfree(p_proc_object);
373 	}
374 func_end:
375 	return status;
376 }
377 
get_exec_file(struct cfg_devnode * dev_node_obj,struct dev_object * hdev_obj,u32 size,char * exec_file)378 static int get_exec_file(struct cfg_devnode *dev_node_obj,
379 				struct dev_object *hdev_obj,
380 				u32 size, char *exec_file)
381 {
382 	u8 dev_type;
383 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
384 
385 	dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
386 
387 	if (!exec_file)
388 		return -EFAULT;
389 
390 	if (dev_type == DSP_UNIT) {
391 		if (!drv_datap || !drv_datap->base_img)
392 			return -EFAULT;
393 
394 		if (strlen(drv_datap->base_img) >= size)
395 			return -EINVAL;
396 
397 		strcpy(exec_file, drv_datap->base_img);
398 	} else {
399 		return -ENOENT;
400 	}
401 
402 	return 0;
403 }
404 
405 /*
406  *  ======== proc_auto_start ======== =
407  *  Purpose:
408  *      A Particular device gets loaded with the default image
409  *      if the AutoStart flag is set.
410  *  Parameters:
411  *      hdev_obj:     Handle to the Device
412  *  Returns:
413  *      0:   On Successful Loading
414  *      -EPERM  General Failure
415  *  Requires:
416  *      hdev_obj != NULL
417  *  Ensures:
418  */
proc_auto_start(struct cfg_devnode * dev_node_obj,struct dev_object * hdev_obj)419 int proc_auto_start(struct cfg_devnode *dev_node_obj,
420 			   struct dev_object *hdev_obj)
421 {
422 	int status = -EPERM;
423 	struct proc_object *p_proc_object;
424 	char sz_exec_file[MAXCMDLINELEN];
425 	char *argv[2];
426 	struct mgr_object *hmgr_obj = NULL;
427 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
428 	u8 dev_type;
429 
430 	/* Create a Dummy PROC Object */
431 	if (!drv_datap || !drv_datap->mgr_object) {
432 		status = -ENODATA;
433 		pr_err("%s: Failed to retrieve the object handle\n", __func__);
434 		goto func_end;
435 	} else {
436 		hmgr_obj = drv_datap->mgr_object;
437 	}
438 
439 	p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
440 	if (p_proc_object == NULL) {
441 		status = -ENOMEM;
442 		goto func_end;
443 	}
444 	p_proc_object->dev_obj = hdev_obj;
445 	p_proc_object->mgr_obj = hmgr_obj;
446 	status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
447 	if (!status)
448 		status = dev_get_bridge_context(hdev_obj,
449 					     &p_proc_object->bridge_context);
450 	if (status)
451 		goto func_cont;
452 
453 	/* Stop the Device, put it into standby mode */
454 	status = proc_stop(p_proc_object);
455 
456 	if (status)
457 		goto func_cont;
458 
459 	/* Get the default executable for this board... */
460 	dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
461 	p_proc_object->processor_id = dev_type;
462 	status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
463 			       sz_exec_file);
464 	if (!status) {
465 		argv[0] = sz_exec_file;
466 		argv[1] = NULL;
467 		/* ...and try to load it: */
468 		status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
469 		if (!status)
470 			status = proc_start(p_proc_object);
471 	}
472 	kfree(p_proc_object->last_coff);
473 	p_proc_object->last_coff = NULL;
474 func_cont:
475 	kfree(p_proc_object);
476 func_end:
477 	return status;
478 }
479 
480 /*
481  *  ======== proc_ctrl ========
482  *  Purpose:
483  *      Pass control information to the GPP device driver managing the
484  *      DSP processor.
485  *
486  *      This will be an OEM-only function, and not part of the DSP/BIOS Bridge
487  *      application developer's API.
488  *      Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
489  *      Operation. arg can be null.
490  */
proc_ctrl(void * hprocessor,u32 dw_cmd,struct dsp_cbdata * arg)491 int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata *arg)
492 {
493 	int status = 0;
494 	struct proc_object *p_proc_object = hprocessor;
495 	u32 timeout = 0;
496 
497 	if (p_proc_object) {
498 		/* intercept PWR deep sleep command */
499 		if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
500 			timeout = arg->cb_data;
501 			status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
502 		}
503 		/* intercept PWR emergency sleep command */
504 		else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) {
505 			timeout = arg->cb_data;
506 			status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout);
507 		} else if (dw_cmd == PWR_DEEPSLEEP) {
508 			/* timeout = arg->cb_data; */
509 			status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
510 		}
511 		/* intercept PWR wake commands */
512 		else if (dw_cmd == BRDIOCTL_WAKEUP) {
513 			timeout = arg->cb_data;
514 			status = pwr_wake_dsp(timeout);
515 		} else if (dw_cmd == PWR_WAKEUP) {
516 			/* timeout = arg->cb_data; */
517 			status = pwr_wake_dsp(timeout);
518 		} else
519 		    if (!((*p_proc_object->intf_fxns->dev_cntrl)
520 				      (p_proc_object->bridge_context, dw_cmd,
521 				       arg))) {
522 			status = 0;
523 		} else {
524 			status = -EPERM;
525 		}
526 	} else {
527 		status = -EFAULT;
528 	}
529 
530 	return status;
531 }
532 
533 /*
534  *  ======== proc_detach ========
535  *  Purpose:
536  *      Destroys the  Processor Object. Removes the notification from the Dev
537  *      List.
538  */
proc_detach(struct process_context * pr_ctxt)539 int proc_detach(struct process_context *pr_ctxt)
540 {
541 	int status = 0;
542 	struct proc_object *p_proc_object = NULL;
543 
544 	p_proc_object = (struct proc_object *)pr_ctxt->processor;
545 
546 	if (p_proc_object) {
547 		/* Notify the Client */
548 		ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH);
549 		/* Remove the notification memory */
550 		if (p_proc_object->ntfy_obj) {
551 			ntfy_delete(p_proc_object->ntfy_obj);
552 			kfree(p_proc_object->ntfy_obj);
553 		}
554 
555 		kfree(p_proc_object->last_coff);
556 		p_proc_object->last_coff = NULL;
557 		/* Remove the Proc from the DEV List */
558 		(void)dev_remove_proc_object(p_proc_object->dev_obj,
559 					     (u32) p_proc_object);
560 		/* Free the Processor Object */
561 		kfree(p_proc_object);
562 		pr_ctxt->processor = NULL;
563 	} else {
564 		status = -EFAULT;
565 	}
566 
567 	return status;
568 }
569 
570 /*
571  *  ======== proc_enum_nodes ========
572  *  Purpose:
573  *      Enumerate and get configuration information about nodes allocated
574  *      on a DSP processor.
575  */
proc_enum_nodes(void * hprocessor,void ** node_tab,u32 node_tab_size,u32 * pu_num_nodes,u32 * pu_allocated)576 int proc_enum_nodes(void *hprocessor, void **node_tab,
577 			   u32 node_tab_size, u32 *pu_num_nodes,
578 			   u32 *pu_allocated)
579 {
580 	int status = -EPERM;
581 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
582 	struct node_mgr *hnode_mgr = NULL;
583 
584 	if (p_proc_object) {
585 		if (!(dev_get_node_manager(p_proc_object->dev_obj,
586 						       &hnode_mgr))) {
587 			if (hnode_mgr) {
588 				status = node_enum_nodes(hnode_mgr, node_tab,
589 							 node_tab_size,
590 							 pu_num_nodes,
591 							 pu_allocated);
592 			}
593 		}
594 	} else {
595 		status = -EFAULT;
596 	}
597 
598 	return status;
599 }
600 
601 /* Cache operation against kernel address instead of users */
build_dma_sg(struct dmm_map_object * map_obj,unsigned long start,ssize_t len,int pg_i)602 static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
603 						ssize_t len, int pg_i)
604 {
605 	struct page *page;
606 	unsigned long offset;
607 	ssize_t rest;
608 	int ret = 0, i = 0;
609 	struct scatterlist *sg = map_obj->dma_info.sg;
610 
611 	while (len) {
612 		page = get_mapping_page(map_obj, pg_i);
613 		if (!page) {
614 			pr_err("%s: no page for %08lx\n", __func__, start);
615 			ret = -EINVAL;
616 			goto out;
617 		} else if (IS_ERR(page)) {
618 			pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
619 			       PTR_ERR(page));
620 			ret = PTR_ERR(page);
621 			goto out;
622 		}
623 
624 		offset = start & ~PAGE_MASK;
625 		rest = min_t(ssize_t, PAGE_SIZE - offset, len);
626 
627 		sg_set_page(&sg[i], page, rest, offset);
628 
629 		len -= rest;
630 		start += rest;
631 		pg_i++, i++;
632 	}
633 
634 	if (i != map_obj->dma_info.num_pages) {
635 		pr_err("%s: bad number of sg iterations\n", __func__);
636 		ret = -EFAULT;
637 		goto out;
638 	}
639 
640 out:
641 	return ret;
642 }
643 
memory_regain_ownership(struct dmm_map_object * map_obj,unsigned long start,ssize_t len,enum dma_data_direction dir)644 static int memory_regain_ownership(struct dmm_map_object *map_obj,
645 		unsigned long start, ssize_t len, enum dma_data_direction dir)
646 {
647 	int ret = 0;
648 	unsigned long first_data_page = start >> PAGE_SHIFT;
649 	unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
650 	/* calculating the number of pages this area spans */
651 	unsigned long num_pages = last_data_page - first_data_page + 1;
652 	struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
653 
654 	if (!dma_info->sg)
655 		goto out;
656 
657 	if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
658 		pr_err("%s: dma info doesn't match given params\n", __func__);
659 		return -EINVAL;
660 	}
661 
662 	dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
663 
664 	pr_debug("%s: dma_map_sg unmapped\n", __func__);
665 
666 	kfree(dma_info->sg);
667 
668 	map_obj->dma_info.sg = NULL;
669 
670 out:
671 	return ret;
672 }
673 
674 /* Cache operation against kernel address instead of users */
memory_give_ownership(struct dmm_map_object * map_obj,unsigned long start,ssize_t len,enum dma_data_direction dir)675 static int memory_give_ownership(struct dmm_map_object *map_obj,
676 		unsigned long start, ssize_t len, enum dma_data_direction dir)
677 {
678 	int pg_i, ret, sg_num;
679 	struct scatterlist *sg;
680 	unsigned long first_data_page = start >> PAGE_SHIFT;
681 	unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
682 	/* calculating the number of pages this area spans */
683 	unsigned long num_pages = last_data_page - first_data_page + 1;
684 
685 	pg_i = find_first_page_in_cache(map_obj, start);
686 	if (pg_i < 0) {
687 		pr_err("%s: failed to find first page in cache\n", __func__);
688 		ret = -EINVAL;
689 		goto out;
690 	}
691 
692 	sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
693 	if (!sg) {
694 		ret = -ENOMEM;
695 		goto out;
696 	}
697 
698 	sg_init_table(sg, num_pages);
699 
700 	/* cleanup a previous sg allocation */
701 	/* this may happen if application doesn't signal for e/o DMA */
702 	kfree(map_obj->dma_info.sg);
703 
704 	map_obj->dma_info.sg = sg;
705 	map_obj->dma_info.dir = dir;
706 	map_obj->dma_info.num_pages = num_pages;
707 
708 	ret = build_dma_sg(map_obj, start, len, pg_i);
709 	if (ret)
710 		goto kfree_sg;
711 
712 	sg_num = dma_map_sg(bridge, sg, num_pages, dir);
713 	if (sg_num < 1) {
714 		pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
715 		ret = -EFAULT;
716 		goto kfree_sg;
717 	}
718 
719 	pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
720 	map_obj->dma_info.sg_num = sg_num;
721 
722 	return 0;
723 
724 kfree_sg:
725 	kfree(sg);
726 	map_obj->dma_info.sg = NULL;
727 out:
728 	return ret;
729 }
730 
proc_begin_dma(void * hprocessor,void * pmpu_addr,u32 ul_size,enum dma_data_direction dir)731 int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
732 				enum dma_data_direction dir)
733 {
734 	/* Keep STATUS here for future additions to this function */
735 	int status = 0;
736 	struct process_context *pr_ctxt = (struct process_context *) hprocessor;
737 	struct dmm_map_object *map_obj;
738 
739 	if (!pr_ctxt) {
740 		status = -EFAULT;
741 		goto err_out;
742 	}
743 
744 	pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
745 							(u32)pmpu_addr,
746 							ul_size, dir);
747 
748 	mutex_lock(&proc_lock);
749 
750 	/* find requested memory are in cached mapping information */
751 	map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
752 	if (!map_obj) {
753 		pr_err("%s: find_containing_mapping failed\n", __func__);
754 		status = -EFAULT;
755 		goto no_map;
756 	}
757 
758 	if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
759 		pr_err("%s: InValid address parameters %p %x\n",
760 			       __func__, pmpu_addr, ul_size);
761 		status = -EFAULT;
762 	}
763 
764 no_map:
765 	mutex_unlock(&proc_lock);
766 err_out:
767 
768 	return status;
769 }
770 
proc_end_dma(void * hprocessor,void * pmpu_addr,u32 ul_size,enum dma_data_direction dir)771 int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
772 			enum dma_data_direction dir)
773 {
774 	/* Keep STATUS here for future additions to this function */
775 	int status = 0;
776 	struct process_context *pr_ctxt = (struct process_context *) hprocessor;
777 	struct dmm_map_object *map_obj;
778 
779 	if (!pr_ctxt) {
780 		status = -EFAULT;
781 		goto err_out;
782 	}
783 
784 	pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
785 							(u32)pmpu_addr,
786 							ul_size, dir);
787 
788 	mutex_lock(&proc_lock);
789 
790 	/* find requested memory are in cached mapping information */
791 	map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
792 	if (!map_obj) {
793 		pr_err("%s: find_containing_mapping failed\n", __func__);
794 		status = -EFAULT;
795 		goto no_map;
796 	}
797 
798 	if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
799 		pr_err("%s: InValid address parameters %p %x\n",
800 		       __func__, pmpu_addr, ul_size);
801 		status = -EFAULT;
802 	}
803 
804 no_map:
805 	mutex_unlock(&proc_lock);
806 err_out:
807 	return status;
808 }
809 
810 /*
811  *  ======== proc_flush_memory ========
812  *  Purpose:
813  *     Flush cache
814  */
proc_flush_memory(void * hprocessor,void * pmpu_addr,u32 ul_size,u32 ul_flags)815 int proc_flush_memory(void *hprocessor, void *pmpu_addr,
816 			     u32 ul_size, u32 ul_flags)
817 {
818 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
819 
820 	return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
821 }
822 
823 /*
824  *  ======== proc_invalidate_memory ========
825  *  Purpose:
826  *     Invalidates the memory specified
827  */
proc_invalidate_memory(void * hprocessor,void * pmpu_addr,u32 size)828 int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
829 {
830 	enum dma_data_direction dir = DMA_FROM_DEVICE;
831 
832 	return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
833 }
834 
835 /*
836  *  ======== proc_get_resource_info ========
837  *  Purpose:
838  *      Enumerate the resources currently available on a processor.
839  */
proc_get_resource_info(void * hprocessor,u32 resource_type,struct dsp_resourceinfo * resource_info,u32 resource_info_size)840 int proc_get_resource_info(void *hprocessor, u32 resource_type,
841 				  struct dsp_resourceinfo *resource_info,
842 				  u32 resource_info_size)
843 {
844 	int status = -EPERM;
845 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
846 	struct node_mgr *hnode_mgr = NULL;
847 	struct nldr_object *nldr_obj = NULL;
848 	struct rmm_target_obj *rmm = NULL;
849 	struct io_mgr *hio_mgr = NULL;	/* IO manager handle */
850 
851 	if (!p_proc_object) {
852 		status = -EFAULT;
853 		goto func_end;
854 	}
855 	switch (resource_type) {
856 	case DSP_RESOURCE_DYNDARAM:
857 	case DSP_RESOURCE_DYNSARAM:
858 	case DSP_RESOURCE_DYNEXTERNAL:
859 	case DSP_RESOURCE_DYNSRAM:
860 		status = dev_get_node_manager(p_proc_object->dev_obj,
861 					      &hnode_mgr);
862 		if (!hnode_mgr) {
863 			status = -EFAULT;
864 			goto func_end;
865 		}
866 
867 		status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
868 		if (!status) {
869 			status = nldr_get_rmm_manager(nldr_obj, &rmm);
870 			if (rmm) {
871 				if (!rmm_stat(rmm,
872 					      (enum dsp_memtype)resource_type,
873 					      (struct dsp_memstat *)
874 					      &(resource_info->result.
875 						mem_stat)))
876 					status = -EINVAL;
877 			} else {
878 				status = -EFAULT;
879 			}
880 		}
881 		break;
882 	case DSP_RESOURCE_PROCLOAD:
883 		status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
884 		if (hio_mgr)
885 			status =
886 			    p_proc_object->intf_fxns->
887 			    io_get_proc_load(hio_mgr,
888 						 (struct dsp_procloadstat *)
889 						 &(resource_info->result.
890 						   proc_load_stat));
891 		else
892 			status = -EFAULT;
893 		break;
894 	default:
895 		status = -EPERM;
896 		break;
897 	}
898 func_end:
899 	return status;
900 }
901 
902 /*
903  *  ======== proc_get_dev_object ========
904  *  Purpose:
905  *      Return the Dev Object handle for a given Processor.
906  *
907  */
proc_get_dev_object(void * hprocessor,struct dev_object ** device_obj)908 int proc_get_dev_object(void *hprocessor,
909 			       struct dev_object **device_obj)
910 {
911 	int status = -EPERM;
912 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
913 
914 	if (p_proc_object) {
915 		*device_obj = p_proc_object->dev_obj;
916 		status = 0;
917 	} else {
918 		*device_obj = NULL;
919 		status = -EFAULT;
920 	}
921 
922 	return status;
923 }
924 
925 /*
926  *  ======== proc_get_state ========
927  *  Purpose:
928  *      Report the state of the specified DSP processor.
929  */
proc_get_state(void * hprocessor,struct dsp_processorstate * proc_state_obj,u32 state_info_size)930 int proc_get_state(void *hprocessor,
931 			  struct dsp_processorstate *proc_state_obj,
932 			  u32 state_info_size)
933 {
934 	int status = 0;
935 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
936 	int brd_status;
937 
938 	if (p_proc_object) {
939 		/* First, retrieve BRD state information */
940 		status = (*p_proc_object->intf_fxns->brd_status)
941 		    (p_proc_object->bridge_context, &brd_status);
942 		if (!status) {
943 			switch (brd_status) {
944 			case BRD_STOPPED:
945 				proc_state_obj->proc_state = PROC_STOPPED;
946 				break;
947 			case BRD_SLEEP_TRANSITION:
948 			case BRD_DSP_HIBERNATION:
949 				/* Fall through */
950 			case BRD_RUNNING:
951 				proc_state_obj->proc_state = PROC_RUNNING;
952 				break;
953 			case BRD_LOADED:
954 				proc_state_obj->proc_state = PROC_LOADED;
955 				break;
956 			case BRD_ERROR:
957 				proc_state_obj->proc_state = PROC_ERROR;
958 				break;
959 			default:
960 				proc_state_obj->proc_state = 0xFF;
961 				status = -EPERM;
962 				break;
963 			}
964 		}
965 	} else {
966 		status = -EFAULT;
967 	}
968 	dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
969 		__func__, status, proc_state_obj->proc_state);
970 	return status;
971 }
972 
973 /*
974  *  ======== proc_get_trace ========
975  *  Purpose:
976  *      Retrieve the current contents of the trace buffer, located on the
977  *      Processor.  Predefined symbols for the trace buffer must have been
978  *      configured into the DSP executable.
979  *  Details:
980  *      We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
981  *      trace buffer, only.  Treat it as an undocumented feature.
982  *      This call is destructive, meaning the processor is placed in the monitor
983  *      state as a result of this function.
984  */
proc_get_trace(void * hprocessor,u8 * pbuf,u32 max_size)985 int proc_get_trace(void *hprocessor, u8 *pbuf, u32 max_size)
986 {
987 	int status;
988 	status = -ENOSYS;
989 	return status;
990 }
991 
992 /*
993  *  ======== proc_load ========
994  *  Purpose:
995  *      Reset a processor and load a new base program image.
996  *      This will be an OEM-only function, and not part of the DSP/BIOS Bridge
997  *      application developer's API.
998  */
proc_load(void * hprocessor,const s32 argc_index,const char ** user_args,const char ** user_envp)999 int proc_load(void *hprocessor, const s32 argc_index,
1000 		     const char **user_args, const char **user_envp)
1001 {
1002 	int status = 0;
1003 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1004 	struct io_mgr *hio_mgr;	/* IO manager handle */
1005 	struct msg_mgr *hmsg_mgr;
1006 	struct cod_manager *cod_mgr;	/* Code manager handle */
1007 	char *pargv0;		/* temp argv[0] ptr */
1008 	char **new_envp;	/* Updated envp[] array. */
1009 	char sz_proc_id[MAXPROCIDLEN];	/* Size of "PROC_ID=<n>" */
1010 	s32 envp_elems;		/* Num elements in envp[]. */
1011 	s32 cnew_envp;		/* "  " in new_envp[] */
1012 	s32 nproc_id = 0;	/* Anticipate MP version. */
1013 	struct dcd_manager *hdcd_handle;
1014 	struct dmm_object *dmm_mgr;
1015 	u32 dw_ext_end;
1016 	u32 proc_id;
1017 	int brd_state;
1018 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
1019 
1020 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1021 	struct timeval tv1;
1022 	struct timeval tv2;
1023 #endif
1024 
1025 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1026 	struct dspbridge_platform_data *pdata =
1027 	    omap_dspbridge_dev->dev.platform_data;
1028 #endif
1029 
1030 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1031 	do_gettimeofday(&tv1);
1032 #endif
1033 	if (!p_proc_object) {
1034 		status = -EFAULT;
1035 		goto func_end;
1036 	}
1037 	dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
1038 	if (!cod_mgr) {
1039 		status = -EPERM;
1040 		goto func_end;
1041 	}
1042 	status = proc_stop(hprocessor);
1043 	if (status)
1044 		goto func_end;
1045 
1046 	/* Place the board in the monitor state. */
1047 	status = proc_monitor(hprocessor);
1048 	if (status)
1049 		goto func_end;
1050 
1051 	/* Save ptr to  original argv[0]. */
1052 	pargv0 = (char *)user_args[0];
1053 	/*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
1054 	envp_elems = get_envp_count((char **)user_envp);
1055 	cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2));
1056 	new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL);
1057 	if (new_envp) {
1058 		status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID,
1059 				  nproc_id);
1060 		if (status == -1) {
1061 			dev_dbg(bridge, "%s: Proc ID string overflow\n",
1062 				__func__);
1063 			status = -EPERM;
1064 		} else {
1065 			new_envp =
1066 			    prepend_envp(new_envp, (char **)user_envp,
1067 					 envp_elems, cnew_envp, sz_proc_id);
1068 			/* Get the DCD Handle */
1069 			status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
1070 						    (u32 *) &hdcd_handle);
1071 			if (!status) {
1072 				/*  Before proceeding with new load,
1073 				 *  check if a previously registered COFF
1074 				 *  exists.
1075 				 *  If yes, unregister nodes in previously
1076 				 *  registered COFF.  If any error occurred,
1077 				 *  set previously registered COFF to NULL. */
1078 				if (p_proc_object->last_coff != NULL) {
1079 					status =
1080 					    dcd_auto_unregister(hdcd_handle,
1081 								p_proc_object->
1082 								last_coff);
1083 					/* Regardless of auto unregister status,
1084 					 *  free previously allocated
1085 					 *  memory. */
1086 					kfree(p_proc_object->last_coff);
1087 					p_proc_object->last_coff = NULL;
1088 				}
1089 			}
1090 			/* On success, do cod_open_base() */
1091 			status = cod_open_base(cod_mgr, (char *)user_args[0],
1092 					       COD_SYMB);
1093 		}
1094 	} else {
1095 		status = -ENOMEM;
1096 	}
1097 	if (!status) {
1098 		/* Auto-register data base */
1099 		/* Get the DCD Handle */
1100 		status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
1101 					    (u32 *) &hdcd_handle);
1102 		if (!status) {
1103 			/*  Auto register nodes in specified COFF
1104 			 *  file.  If registration did not fail,
1105 			 *  (status = 0 or -EACCES)
1106 			 *  save the name of the COFF file for
1107 			 *  de-registration in the future. */
1108 			status =
1109 			    dcd_auto_register(hdcd_handle,
1110 					      (char *)user_args[0]);
1111 			if (status == -EACCES)
1112 				status = 0;
1113 
1114 			if (status) {
1115 				status = -EPERM;
1116 			} else {
1117 				/* Allocate memory for pszLastCoff */
1118 				p_proc_object->last_coff =
1119 						kzalloc((strlen(user_args[0]) +
1120 						1), GFP_KERNEL);
1121 				/* If memory allocated, save COFF file name */
1122 				if (p_proc_object->last_coff) {
1123 					strncpy(p_proc_object->last_coff,
1124 						(char *)user_args[0],
1125 						(strlen((char *)user_args[0]) +
1126 						 1));
1127 				}
1128 			}
1129 		}
1130 	}
1131 	/* Update shared memory address and size */
1132 	if (!status) {
1133 		/*  Create the message manager. This must be done
1134 		 *  before calling the IOOnLoaded function. */
1135 		dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
1136 		if (!hmsg_mgr) {
1137 			status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
1138 					    (msg_onexit) node_on_exit);
1139 			dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
1140 		}
1141 	}
1142 	if (!status) {
1143 		/* Set the Device object's message manager */
1144 		status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
1145 		if (hio_mgr)
1146 			status = (*p_proc_object->intf_fxns->io_on_loaded)
1147 								(hio_mgr);
1148 		else
1149 			status = -EFAULT;
1150 	}
1151 	if (!status) {
1152 		/* Now, attempt to load an exec: */
1153 
1154 		/* Boost the OPP level to Maximum level supported by baseport */
1155 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1156 		if (pdata->cpu_set_freq)
1157 			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]);
1158 #endif
1159 		status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
1160 				       dev_brd_write_fxn,
1161 				       p_proc_object->dev_obj, NULL);
1162 		if (status) {
1163 			if (status == -EBADF) {
1164 				dev_dbg(bridge, "%s: Failure to Load the EXE\n",
1165 					__func__);
1166 			}
1167 			if (status == -ESPIPE) {
1168 				pr_err("%s: Couldn't parse the file\n",
1169 				       __func__);
1170 			}
1171 		}
1172 		/* Requesting the lowest opp supported */
1173 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1174 		if (pdata->cpu_set_freq)
1175 			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1176 #endif
1177 
1178 	}
1179 	if (!status) {
1180 		/* Update the Processor status to loaded */
1181 		status = (*p_proc_object->intf_fxns->brd_set_state)
1182 		    (p_proc_object->bridge_context, BRD_LOADED);
1183 		if (!status) {
1184 			p_proc_object->proc_state = PROC_LOADED;
1185 			if (p_proc_object->ntfy_obj)
1186 				proc_notify_clients(p_proc_object,
1187 						    DSP_PROCESSORSTATECHANGE);
1188 		}
1189 	}
1190 	if (!status) {
1191 		status = proc_get_processor_id(hprocessor, &proc_id);
1192 		if (proc_id == DSP_UNIT) {
1193 			/* Use all available DSP address space after EXTMEM
1194 			 * for DMM */
1195 			if (!status)
1196 				status = cod_get_sym_value(cod_mgr, EXTEND,
1197 							   &dw_ext_end);
1198 
1199 			/* Reset DMM structs and add an initial free chunk */
1200 			if (!status) {
1201 				status =
1202 				    dev_get_dmm_mgr(p_proc_object->dev_obj,
1203 						    &dmm_mgr);
1204 				if (dmm_mgr) {
1205 					/* Set dw_ext_end to DMM START u8
1206 					 * address */
1207 					dw_ext_end =
1208 					    (dw_ext_end + 1) * DSPWORDSIZE;
1209 					/* DMM memory is from EXT_END */
1210 					status = dmm_create_tables(dmm_mgr,
1211 								   dw_ext_end,
1212 								   DMMPOOLSIZE);
1213 				} else {
1214 					status = -EFAULT;
1215 				}
1216 			}
1217 		}
1218 	}
1219 	/* Restore the original argv[0] */
1220 	kfree(new_envp);
1221 	user_args[0] = pargv0;
1222 	if (!status) {
1223 		if (!((*p_proc_object->intf_fxns->brd_status)
1224 				(p_proc_object->bridge_context, &brd_state))) {
1225 			pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
1226 			kfree(drv_datap->base_img);
1227 			drv_datap->base_img = kstrdup(pargv0, GFP_KERNEL);
1228 			if (!drv_datap->base_img)
1229 				status = -ENOMEM;
1230 		}
1231 	}
1232 
1233 func_end:
1234 	if (status) {
1235 		pr_err("%s: Processor failed to load\n", __func__);
1236 		proc_stop(p_proc_object);
1237 	}
1238 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1239 	do_gettimeofday(&tv2);
1240 	if (tv2.tv_usec < tv1.tv_usec) {
1241 		tv2.tv_usec += 1000000;
1242 		tv2.tv_sec--;
1243 	}
1244 	dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__,
1245 		tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec);
1246 #endif
1247 	return status;
1248 }
1249 
1250 /*
1251  *  ======== proc_map ========
1252  *  Purpose:
1253  *      Maps a MPU buffer to DSP address space.
1254  */
proc_map(void * hprocessor,void * pmpu_addr,u32 ul_size,void * req_addr,void ** pp_map_addr,u32 ul_map_attr,struct process_context * pr_ctxt)1255 int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1256 		    void *req_addr, void **pp_map_addr, u32 ul_map_attr,
1257 		    struct process_context *pr_ctxt)
1258 {
1259 	u32 va_align;
1260 	u32 pa_align;
1261 	struct dmm_object *dmm_mgr;
1262 	u32 size_align;
1263 	int status = 0;
1264 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1265 	struct dmm_map_object *map_obj;
1266 	u32 tmp_addr = 0;
1267 
1268 #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1269 	if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
1270 		if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) ||
1271 		    !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) {
1272 			pr_err("%s: not aligned: 0x%x (%d)\n", __func__,
1273 						(u32)pmpu_addr, ul_size);
1274 			return -EFAULT;
1275 		}
1276 	}
1277 #endif
1278 
1279 	/* Calculate the page-aligned PA, VA and size */
1280 	va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K);
1281 	pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K);
1282 	size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align,
1283 				   PG_SIZE4K);
1284 
1285 	if (!p_proc_object) {
1286 		status = -EFAULT;
1287 		goto func_end;
1288 	}
1289 	/* Critical section */
1290 	mutex_lock(&proc_lock);
1291 	dmm_get_handle(p_proc_object, &dmm_mgr);
1292 	if (dmm_mgr)
1293 		status = dmm_map_memory(dmm_mgr, va_align, size_align);
1294 	else
1295 		status = -EFAULT;
1296 
1297 	/* Add mapping to the page tables. */
1298 	if (!status) {
1299 
1300 		/* Mapped address = MSB of VA | LSB of PA */
1301 		tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
1302 		/* mapped memory resource tracking */
1303 		map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
1304 						size_align);
1305 		if (!map_obj)
1306 			status = -ENOMEM;
1307 		else
1308 			status = (*p_proc_object->intf_fxns->brd_mem_map)
1309 			    (p_proc_object->bridge_context, pa_align, va_align,
1310 			     size_align, ul_map_attr, map_obj->pages);
1311 	}
1312 	if (!status) {
1313 		/* Mapped address = MSB of VA | LSB of PA */
1314 		*pp_map_addr = (void *) tmp_addr;
1315 	} else {
1316 		remove_mapping_information(pr_ctxt, tmp_addr, size_align);
1317 		dmm_un_map_memory(dmm_mgr, va_align, &size_align);
1318 	}
1319 	mutex_unlock(&proc_lock);
1320 
1321 	if (status)
1322 		goto func_end;
1323 
1324 func_end:
1325 	dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
1326 		"req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
1327 		"pa_align %x, size_align %x status 0x%x\n", __func__,
1328 		hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr,
1329 		pp_map_addr, va_align, pa_align, size_align, status);
1330 
1331 	return status;
1332 }
1333 
1334 /*
1335  *  ======== proc_register_notify ========
1336  *  Purpose:
1337  *      Register to be notified of specific processor events.
1338  */
proc_register_notify(void * hprocessor,u32 event_mask,u32 notify_type,struct dsp_notification * hnotification)1339 int proc_register_notify(void *hprocessor, u32 event_mask,
1340 				u32 notify_type, struct dsp_notification
1341 				*hnotification)
1342 {
1343 	int status = 0;
1344 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1345 	struct deh_mgr *hdeh_mgr;
1346 
1347 	/* Check processor handle */
1348 	if (!p_proc_object) {
1349 		status = -EFAULT;
1350 		goto func_end;
1351 	}
1352 	/* Check if event mask is a valid processor related event */
1353 	if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH |
1354 			DSP_PROCESSORDETACH | DSP_PROCESSORRESTART |
1355 			DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR |
1356 			DSP_WDTOVERFLOW))
1357 		status = -EINVAL;
1358 
1359 	/* Check if notify type is valid */
1360 	if (notify_type != DSP_SIGNALEVENT)
1361 		status = -EINVAL;
1362 
1363 	if (!status) {
1364 		/* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
1365 		 * or DSP_PWRERROR then register event immediately. */
1366 		if (event_mask &
1367 		    ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR |
1368 				DSP_WDTOVERFLOW)) {
1369 			status = ntfy_register(p_proc_object->ntfy_obj,
1370 					       hnotification, event_mask,
1371 					       notify_type);
1372 			/* Special case alert, special case alert!
1373 			 * If we're trying to *deregister* (i.e. event_mask
1374 			 * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
1375 			 * we have to deregister with the DEH manager.
1376 			 * There's no way to know, based on event_mask which
1377 			 * manager the notification event was registered with,
1378 			 * so if we're trying to deregister and ntfy_register
1379 			 * failed, we'll give the deh manager a shot.
1380 			 */
1381 			if ((event_mask == 0) && status) {
1382 				status =
1383 				    dev_get_deh_mgr(p_proc_object->dev_obj,
1384 						    &hdeh_mgr);
1385 				status =
1386 					bridge_deh_register_notify(hdeh_mgr,
1387 							event_mask,
1388 							notify_type,
1389 							hnotification);
1390 			}
1391 		} else {
1392 			status = dev_get_deh_mgr(p_proc_object->dev_obj,
1393 						 &hdeh_mgr);
1394 			status =
1395 			    bridge_deh_register_notify(hdeh_mgr,
1396 					    event_mask,
1397 					    notify_type,
1398 					    hnotification);
1399 
1400 		}
1401 	}
1402 func_end:
1403 	return status;
1404 }
1405 
1406 /*
1407  *  ======== proc_reserve_memory ========
1408  *  Purpose:
1409  *      Reserve a virtually contiguous region of DSP address space.
1410  */
proc_reserve_memory(void * hprocessor,u32 ul_size,void ** pp_rsv_addr,struct process_context * pr_ctxt)1411 int proc_reserve_memory(void *hprocessor, u32 ul_size,
1412 			       void **pp_rsv_addr,
1413 			       struct process_context *pr_ctxt)
1414 {
1415 	struct dmm_object *dmm_mgr;
1416 	int status = 0;
1417 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1418 	struct dmm_rsv_object *rsv_obj;
1419 
1420 	if (!p_proc_object) {
1421 		status = -EFAULT;
1422 		goto func_end;
1423 	}
1424 
1425 	status = dmm_get_handle(p_proc_object, &dmm_mgr);
1426 	if (!dmm_mgr) {
1427 		status = -EFAULT;
1428 		goto func_end;
1429 	}
1430 
1431 	status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
1432 	if (status != 0)
1433 		goto func_end;
1434 
1435 	/*
1436 	 * A successful reserve should be followed by insertion of rsv_obj
1437 	 * into dmm_rsv_list, so that reserved memory resource tracking
1438 	 * remains uptodate
1439 	 */
1440 	rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1441 	if (rsv_obj) {
1442 		rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1443 		spin_lock(&pr_ctxt->dmm_rsv_lock);
1444 		list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1445 		spin_unlock(&pr_ctxt->dmm_rsv_lock);
1446 	}
1447 
1448 func_end:
1449 	dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1450 		"status 0x%x\n", __func__, hprocessor,
1451 		ul_size, pp_rsv_addr, status);
1452 	return status;
1453 }
1454 
1455 /*
1456  *  ======== proc_start ========
1457  *  Purpose:
1458  *      Start a processor running.
1459  */
proc_start(void * hprocessor)1460 int proc_start(void *hprocessor)
1461 {
1462 	int status = 0;
1463 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1464 	struct cod_manager *cod_mgr;	/* Code manager handle */
1465 	u32 dw_dsp_addr;	/* Loaded code's entry point. */
1466 	int brd_state;
1467 
1468 	if (!p_proc_object) {
1469 		status = -EFAULT;
1470 		goto func_end;
1471 	}
1472 	/* Call the bridge_brd_start */
1473 	if (p_proc_object->proc_state != PROC_LOADED) {
1474 		status = -EBADR;
1475 		goto func_end;
1476 	}
1477 	status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
1478 	if (!cod_mgr) {
1479 		status = -EFAULT;
1480 		goto func_cont;
1481 	}
1482 
1483 	status = cod_get_entry(cod_mgr, &dw_dsp_addr);
1484 	if (status)
1485 		goto func_cont;
1486 
1487 	status = (*p_proc_object->intf_fxns->brd_start)
1488 	    (p_proc_object->bridge_context, dw_dsp_addr);
1489 	if (status)
1490 		goto func_cont;
1491 
1492 	/* Call dev_create2 */
1493 	status = dev_create2(p_proc_object->dev_obj);
1494 	if (!status) {
1495 		p_proc_object->proc_state = PROC_RUNNING;
1496 		/* Deep sleep switces off the peripheral clocks.
1497 		 * we just put the DSP CPU in idle in the idle loop.
1498 		 * so there is no need to send a command to DSP */
1499 
1500 		if (p_proc_object->ntfy_obj) {
1501 			proc_notify_clients(p_proc_object,
1502 					    DSP_PROCESSORSTATECHANGE);
1503 		}
1504 	} else {
1505 		/* Failed to Create Node Manager and DISP Object
1506 		 * Stop the Processor from running. Put it in STOPPED State */
1507 		(void)(*p_proc_object->intf_fxns->
1508 		       brd_stop) (p_proc_object->bridge_context);
1509 		p_proc_object->proc_state = PROC_STOPPED;
1510 	}
1511 func_cont:
1512 	if (!status) {
1513 		if (!((*p_proc_object->intf_fxns->brd_status)
1514 				(p_proc_object->bridge_context, &brd_state))) {
1515 			pr_info("%s: dsp in running state\n", __func__);
1516 		}
1517 	} else {
1518 		pr_err("%s: Failed to start the dsp\n", __func__);
1519 		proc_stop(p_proc_object);
1520 	}
1521 
1522 func_end:
1523 	return status;
1524 }
1525 
1526 /*
1527  *  ======== proc_stop ========
1528  *  Purpose:
1529  *      Stop a processor running.
1530  */
proc_stop(void * hprocessor)1531 int proc_stop(void *hprocessor)
1532 {
1533 	int status = 0;
1534 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1535 	struct msg_mgr *hmsg_mgr;
1536 	struct node_mgr *hnode_mgr;
1537 	void *hnode;
1538 	u32 node_tab_size = 1;
1539 	u32 num_nodes = 0;
1540 	u32 nodes_allocated = 0;
1541 
1542 	if (!p_proc_object) {
1543 		status = -EFAULT;
1544 		goto func_end;
1545 	}
1546 	/* check if there are any running nodes */
1547 	status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr);
1548 	if (!status && hnode_mgr) {
1549 		status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
1550 					 &num_nodes, &nodes_allocated);
1551 		if ((status == -EINVAL) || (nodes_allocated > 0)) {
1552 			pr_err("%s: Can't stop device, active nodes = %d\n",
1553 				__func__, nodes_allocated);
1554 			return -EBADR;
1555 		}
1556 	}
1557 	/* Call the bridge_brd_stop */
1558 	/* It is OK to stop a device that does n't have nodes OR not started */
1559 	status =
1560 	    (*p_proc_object->intf_fxns->
1561 	     brd_stop) (p_proc_object->bridge_context);
1562 	if (!status) {
1563 		dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
1564 		p_proc_object->proc_state = PROC_STOPPED;
1565 		/* Destroy the Node Manager, msg_ctrl Manager */
1566 		if (!(dev_destroy2(p_proc_object->dev_obj))) {
1567 			/* Destroy the msg_ctrl by calling msg_delete */
1568 			dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
1569 			if (hmsg_mgr) {
1570 				msg_delete(hmsg_mgr);
1571 				dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
1572 			}
1573 		}
1574 	} else {
1575 		pr_err("%s: Failed to stop the processor\n", __func__);
1576 	}
1577 func_end:
1578 
1579 	return status;
1580 }
1581 
1582 /*
1583  *  ======== proc_un_map ========
1584  *  Purpose:
1585  *      Removes a MPU buffer mapping from the DSP address space.
1586  */
proc_un_map(void * hprocessor,void * map_addr,struct process_context * pr_ctxt)1587 int proc_un_map(void *hprocessor, void *map_addr,
1588 		       struct process_context *pr_ctxt)
1589 {
1590 	int status = 0;
1591 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1592 	struct dmm_object *dmm_mgr;
1593 	u32 va_align;
1594 	u32 size_align;
1595 
1596 	va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1597 	if (!p_proc_object) {
1598 		status = -EFAULT;
1599 		goto func_end;
1600 	}
1601 
1602 	status = dmm_get_handle(hprocessor, &dmm_mgr);
1603 	if (!dmm_mgr) {
1604 		status = -EFAULT;
1605 		goto func_end;
1606 	}
1607 
1608 	/* Critical section */
1609 	mutex_lock(&proc_lock);
1610 	/*
1611 	 * Update DMM structures. Get the size to unmap.
1612 	 * This function returns error if the VA is not mapped
1613 	 */
1614 	status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
1615 	/* Remove mapping from the page tables. */
1616 	if (!status) {
1617 		status = (*p_proc_object->intf_fxns->brd_mem_un_map)
1618 		    (p_proc_object->bridge_context, va_align, size_align);
1619 	}
1620 
1621 	if (status)
1622 		goto unmap_failed;
1623 
1624 	/*
1625 	 * A successful unmap should be followed by removal of map_obj
1626 	 * from dmm_map_list, so that mapped memory resource tracking
1627 	 * remains uptodate
1628 	 */
1629 	remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
1630 
1631 unmap_failed:
1632 	mutex_unlock(&proc_lock);
1633 
1634 func_end:
1635 	dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
1636 		__func__, hprocessor, map_addr, status);
1637 	return status;
1638 }
1639 
1640 /*
1641  *  ======== proc_un_reserve_memory ========
1642  *  Purpose:
1643  *      Frees a previously reserved region of DSP address space.
1644  */
proc_un_reserve_memory(void * hprocessor,void * prsv_addr,struct process_context * pr_ctxt)1645 int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1646 				  struct process_context *pr_ctxt)
1647 {
1648 	struct dmm_object *dmm_mgr;
1649 	int status = 0;
1650 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1651 	struct dmm_rsv_object *rsv_obj;
1652 
1653 	if (!p_proc_object) {
1654 		status = -EFAULT;
1655 		goto func_end;
1656 	}
1657 
1658 	status = dmm_get_handle(p_proc_object, &dmm_mgr);
1659 	if (!dmm_mgr) {
1660 		status = -EFAULT;
1661 		goto func_end;
1662 	}
1663 
1664 	status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
1665 	if (status != 0)
1666 		goto func_end;
1667 
1668 	/*
1669 	 * A successful unreserve should be followed by removal of rsv_obj
1670 	 * from dmm_rsv_list, so that reserved memory resource tracking
1671 	 * remains uptodate
1672 	 */
1673 	spin_lock(&pr_ctxt->dmm_rsv_lock);
1674 	list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1675 		if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1676 			list_del(&rsv_obj->link);
1677 			kfree(rsv_obj);
1678 			break;
1679 		}
1680 	}
1681 	spin_unlock(&pr_ctxt->dmm_rsv_lock);
1682 
1683 func_end:
1684 	dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1685 		__func__, hprocessor, prsv_addr, status);
1686 	return status;
1687 }
1688 
1689 /*
1690  *  ======== = proc_monitor ======== ==
1691  *  Purpose:
1692  *      Place the Processor in Monitor State. This is an internal
1693  *      function and a requirement before Processor is loaded.
1694  *      This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
1695  *      In dev_destroy2 we delete the node manager.
1696  *  Parameters:
1697  *      p_proc_object:    Pointer to Processor Object
1698  *  Returns:
1699  *      0:	Processor placed in monitor mode.
1700  *      !0:       Failed to place processor in monitor mode.
1701  *  Requires:
1702  *      Valid Processor Handle
1703  *  Ensures:
1704  *      Success:	ProcObject state is PROC_IDLE
1705  */
proc_monitor(struct proc_object * proc_obj)1706 static int proc_monitor(struct proc_object *proc_obj)
1707 {
1708 	int status = -EPERM;
1709 	struct msg_mgr *hmsg_mgr;
1710 
1711 	/* This is needed only when Device is loaded when it is
1712 	 * already 'ACTIVE' */
1713 	/* Destroy the Node Manager, msg_ctrl Manager */
1714 	if (!dev_destroy2(proc_obj->dev_obj)) {
1715 		/* Destroy the msg_ctrl by calling msg_delete */
1716 		dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr);
1717 		if (hmsg_mgr) {
1718 			msg_delete(hmsg_mgr);
1719 			dev_set_msg_mgr(proc_obj->dev_obj, NULL);
1720 		}
1721 	}
1722 	/* Place the Board in the Monitor State */
1723 	if (!((*proc_obj->intf_fxns->brd_monitor)
1724 			  (proc_obj->bridge_context))) {
1725 		status = 0;
1726 	}
1727 
1728 	return status;
1729 }
1730 
1731 /*
1732  *  ======== get_envp_count ========
1733  *  Purpose:
1734  *      Return the number of elements in the envp array, including the
1735  *      terminating NULL element.
1736  */
get_envp_count(char ** envp)1737 static s32 get_envp_count(char **envp)
1738 {
1739 	s32 ret = 0;
1740 	if (envp) {
1741 		while (*envp++)
1742 			ret++;
1743 
1744 		ret += 1;	/* Include the terminating NULL in the count. */
1745 	}
1746 
1747 	return ret;
1748 }
1749 
1750 /*
1751  *  ======== prepend_envp ========
1752  *  Purpose:
1753  *      Prepend an environment variable=value pair to the new envp array, and
1754  *      copy in the existing var=value pairs in the old envp array.
1755  */
prepend_envp(char ** new_envp,char ** envp,s32 envp_elems,s32 cnew_envp,char * sz_var)1756 static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
1757 			   s32 cnew_envp, char *sz_var)
1758 {
1759 	char **pp_envp = new_envp;
1760 
1761 	/* Prepend new environ var=value string */
1762 	*new_envp++ = sz_var;
1763 
1764 	/* Copy user's environment into our own. */
1765 	while (envp_elems--)
1766 		*new_envp++ = *envp++;
1767 
1768 	/* Ensure NULL terminates the new environment strings array. */
1769 	if (envp_elems == 0)
1770 		*new_envp = NULL;
1771 
1772 	return pp_envp;
1773 }
1774 
1775 /*
1776  *  ======== proc_notify_clients ========
1777  *  Purpose:
1778  *      Notify the processor the events.
1779  */
proc_notify_clients(void * proc,u32 events)1780 int proc_notify_clients(void *proc, u32 events)
1781 {
1782 	int status = 0;
1783 	struct proc_object *p_proc_object = (struct proc_object *)proc;
1784 
1785 	if (!p_proc_object) {
1786 		status = -EFAULT;
1787 		goto func_end;
1788 	}
1789 
1790 	ntfy_notify(p_proc_object->ntfy_obj, events);
1791 func_end:
1792 	return status;
1793 }
1794 
1795 /*
1796  *  ======== proc_notify_all_clients ========
1797  *  Purpose:
1798  *      Notify the processor the events. This includes notifying all clients
1799  *      attached to a particulat DSP.
1800  */
proc_notify_all_clients(void * proc,u32 events)1801 int proc_notify_all_clients(void *proc, u32 events)
1802 {
1803 	int status = 0;
1804 	struct proc_object *p_proc_object = (struct proc_object *)proc;
1805 
1806 	if (!p_proc_object) {
1807 		status = -EFAULT;
1808 		goto func_end;
1809 	}
1810 
1811 	dev_notify_clients(p_proc_object->dev_obj, events);
1812 
1813 func_end:
1814 	return status;
1815 }
1816 
1817 /*
1818  *  ======== proc_get_processor_id ========
1819  *  Purpose:
1820  *      Retrieves the processor ID.
1821  */
proc_get_processor_id(void * proc,u32 * proc_id)1822 int proc_get_processor_id(void *proc, u32 *proc_id)
1823 {
1824 	int status = 0;
1825 	struct proc_object *p_proc_object = (struct proc_object *)proc;
1826 
1827 	if (p_proc_object)
1828 		*proc_id = p_proc_object->processor_id;
1829 	else
1830 		status = -EFAULT;
1831 
1832 	return status;
1833 }
1834