• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * node.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * DSP/BIOS Bridge Node Manager.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18 
19 #include <linux/types.h>
20 #include <linux/bitmap.h>
21 #include <linux/list.h>
22 
23 /*  ----------------------------------- Host OS */
24 #include <dspbridge/host_os.h>
25 
26 /*  ----------------------------------- DSP/BIOS Bridge */
27 #include <dspbridge/dbdefs.h>
28 
29 /*  ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/memdefs.h>
31 #include <dspbridge/proc.h>
32 #include <dspbridge/strm.h>
33 #include <dspbridge/sync.h>
34 #include <dspbridge/ntfy.h>
35 
36 /*  ----------------------------------- Platform Manager */
37 #include <dspbridge/cmm.h>
38 #include <dspbridge/cod.h>
39 #include <dspbridge/dev.h>
40 #include <dspbridge/msg.h>
41 
42 /*  ----------------------------------- Resource Manager */
43 #include <dspbridge/dbdcd.h>
44 #include <dspbridge/disp.h>
45 #include <dspbridge/rms_sh.h>
46 
47 /*  ----------------------------------- Link Driver */
48 #include <dspbridge/dspdefs.h>
49 #include <dspbridge/dspioctl.h>
50 
51 /*  ----------------------------------- Others */
52 #include <dspbridge/uuidutil.h>
53 
54 /*  ----------------------------------- This */
55 #include <dspbridge/nodepriv.h>
56 #include <dspbridge/node.h>
57 #include <dspbridge/dmm.h>
58 
59 /* Static/Dynamic Loader includes */
60 #include <dspbridge/dbll.h>
61 #include <dspbridge/nldr.h>
62 
63 #include <dspbridge/drv.h>
64 #include <dspbridge/resourcecleanup.h>
65 #include <_tiomap.h>
66 
67 #include <dspbridge/dspdeh.h>
68 
69 #define HOSTPREFIX	  "/host"
70 #define PIPEPREFIX	  "/dbpipe"
71 
72 #define MAX_INPUTS(h)  \
73 		((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
74 #define MAX_OUTPUTS(h) \
75 		((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
76 
77 #define NODE_GET_PRIORITY(h) ((h)->prio)
78 #define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
79 #define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
80 
81 #define MAXPIPES	100	/* Max # of /pipe connections (CSL limit) */
82 #define MAXDEVSUFFIXLEN 2	/* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
83 
84 #define PIPENAMELEN     (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
85 #define HOSTNAMELEN     (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
86 
87 #define MAXDEVNAMELEN	32	/* dsp_ndbprops.ac_name size */
88 #define CREATEPHASE	1
89 #define EXECUTEPHASE	2
90 #define DELETEPHASE	3
91 
92 /* Define default STRM parameters */
93 /*
94  *  TBD: Put in header file, make global DSP_STRMATTRS with defaults,
95  *  or make defaults configurable.
96  */
97 #define DEFAULTBUFSIZE		32
98 #define DEFAULTNBUFS		2
99 #define DEFAULTSEGID		0
100 #define DEFAULTALIGNMENT	0
101 #define DEFAULTTIMEOUT		10000
102 
103 #define RMSQUERYSERVER		0
104 #define RMSCONFIGURESERVER	1
105 #define RMSCREATENODE		2
106 #define RMSEXECUTENODE		3
107 #define RMSDELETENODE		4
108 #define RMSCHANGENODEPRIORITY	5
109 #define RMSREADMEMORY		6
110 #define RMSWRITEMEMORY		7
111 #define RMSCOPY			8
112 #define MAXTIMEOUT		2000
113 
114 #define NUMRMSFXNS		9
115 
116 #define PWR_TIMEOUT		500	/* default PWR timeout in msec */
117 
118 #define STACKSEGLABEL "L1DSRAM_HEAP"	/* Label for DSP Stack Segment Addr */
119 
120 /*
121  *  ======== node_mgr ========
122  */
123 struct node_mgr {
124 	struct dev_object *dev_obj;	/* Device object */
125 	/* Function interface to Bridge driver */
126 	struct bridge_drv_interface *intf_fxns;
127 	struct dcd_manager *dcd_mgr;	/* Proc/Node data manager */
128 	struct disp_object *disp_obj;	/* Node dispatcher */
129 	struct list_head node_list;	/* List of all allocated nodes */
130 	u32 num_nodes;		/* Number of nodes in node_list */
131 	u32 num_created;	/* Number of nodes *created* on DSP */
132 	DECLARE_BITMAP(pipe_map, MAXPIPES); /* Pipe connection bitmap */
133 	DECLARE_BITMAP(pipe_done_map, MAXPIPES); /* Pipes that are half free */
134 	/* Channel allocation bitmap */
135 	DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS);
136 	/* DMA Channel allocation bitmap */
137 	DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS);
138 	/* Zero-Copy Channel alloc bitmap */
139 	DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS);
140 	struct ntfy_object *ntfy_obj;	/* Manages registered notifications */
141 	struct mutex node_mgr_lock;	/* For critical sections */
142 	u32 fxn_addrs[NUMRMSFXNS];	/* RMS function addresses */
143 	struct msg_mgr *msg_mgr_obj;
144 
145 	/* Processor properties needed by Node Dispatcher */
146 	u32 num_chnls;		/* Total number of channels */
147 	u32 chnl_offset;	/* Offset of chnl ids rsvd for RMS */
148 	u32 chnl_buf_size;	/* Buffer size for data to RMS */
149 	int proc_family;	/* eg, 5000 */
150 	int proc_type;		/* eg, 5510 */
151 	u32 dsp_word_size;	/* Size of DSP word on host bytes */
152 	u32 dsp_data_mau_size;	/* Size of DSP data MAU */
153 	u32 dsp_mau_size;	/* Size of MAU */
154 	s32 min_pri;		/* Minimum runtime priority for node */
155 	s32 max_pri;		/* Maximum runtime priority for node */
156 
157 	struct strm_mgr *strm_mgr_obj;	/* STRM manager */
158 
159 	/* Loader properties */
160 	struct nldr_object *nldr_obj;	/* Handle to loader */
161 	struct node_ldr_fxns nldr_fxns;	/* Handle to loader functions */
162 };
163 
164 /*
165  *  ======== connecttype ========
166  */
167 enum connecttype {
168 	NOTCONNECTED = 0,
169 	NODECONNECT,
170 	HOSTCONNECT,
171 	DEVICECONNECT,
172 };
173 
174 /*
175  *  ======== stream_chnl ========
176  */
177 struct stream_chnl {
178 	enum connecttype type;	/* Type of stream connection */
179 	u32 dev_id;		/* pipe or channel id */
180 };
181 
182 /*
183  *  ======== node_object ========
184  */
185 struct node_object {
186 	struct list_head list_elem;
187 	struct node_mgr *node_mgr;	/* The manager of this node */
188 	struct proc_object *processor;	/* Back pointer to processor */
189 	struct dsp_uuid node_uuid;	/* Node's ID */
190 	s32 prio;		/* Node's current priority */
191 	u32 timeout;		/* Timeout for blocking NODE calls */
192 	u32 heap_size;		/* Heap Size */
193 	u32 dsp_heap_virt_addr;	/* Heap Size */
194 	u32 gpp_heap_virt_addr;	/* Heap Size */
195 	enum node_type ntype;	/* Type of node: message, task, etc */
196 	enum node_state node_state;	/* NODE_ALLOCATED, NODE_CREATED, ... */
197 	u32 num_inputs;		/* Current number of inputs */
198 	u32 num_outputs;	/* Current number of outputs */
199 	u32 max_input_index;	/* Current max input stream index */
200 	u32 max_output_index;	/* Current max output stream index */
201 	struct stream_chnl *inputs;	/* Node's input streams */
202 	struct stream_chnl *outputs;	/* Node's output streams */
203 	struct node_createargs create_args;	/* Args for node create func */
204 	nodeenv node_env;	/* Environment returned by RMS */
205 	struct dcd_genericobj dcd_props;	/* Node properties from DCD */
206 	struct dsp_cbdata *args;	/* Optional args to pass to node */
207 	struct ntfy_object *ntfy_obj;	/* Manages registered notifications */
208 	char *str_dev_name;	/* device name, if device node */
209 	struct sync_object *sync_done;	/* Synchronize node_terminate */
210 	s32 exit_status;	/* execute function return status */
211 
212 	/* Information needed for node_get_attr() */
213 	void *device_owner;	/* If dev node, task that owns it */
214 	u32 num_gpp_inputs;	/* Current # of from GPP streams */
215 	u32 num_gpp_outputs;	/* Current # of to GPP streams */
216 	/* Current stream connections */
217 	struct dsp_streamconnect *stream_connect;
218 
219 	/* Message queue */
220 	struct msg_queue *msg_queue_obj;
221 
222 	/* These fields used for SM messaging */
223 	struct cmm_xlatorobject *xlator;	/* Node's SM addr translator */
224 
225 	/* Handle to pass to dynamic loader */
226 	struct nldr_nodeobject *nldr_node_obj;
227 	bool loaded;		/* Code is (dynamically) loaded */
228 	bool phase_split;	/* Phases split in many libs or ovly */
229 
230 };
231 
232 /* Default buffer attributes */
233 static struct dsp_bufferattr node_dfltbufattrs = {
234 	.cb_struct = 0,
235 	.segment_id = 1,
236 	.buf_alignment = 0,
237 };
238 
239 static void delete_node(struct node_object *hnode,
240 			struct process_context *pr_ctxt);
241 static void delete_node_mgr(struct node_mgr *hnode_mgr);
242 static void fill_stream_connect(struct node_object *node1,
243 				struct node_object *node2, u32 stream1,
244 				u32 stream2);
245 static void fill_stream_def(struct node_object *hnode,
246 			    struct node_strmdef *pstrm_def,
247 			    struct dsp_strmattr *pattrs);
248 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
249 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
250 				  u32 phase);
251 static int get_node_props(struct dcd_manager *hdcd_mgr,
252 				 struct node_object *hnode,
253 				 const struct dsp_uuid *node_uuid,
254 				 struct dcd_genericobj *dcd_prop);
255 static int get_proc_props(struct node_mgr *hnode_mgr,
256 				 struct dev_object *hdev_obj);
257 static int get_rms_fxns(struct node_mgr *hnode_mgr);
258 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
259 		u32 ul_num_bytes, u32 mem_space);
260 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
261 		     u32 ul_num_bytes, u32 mem_space);
262 
263 /* Dynamic loader functions. */
264 static struct node_ldr_fxns nldr_fxns = {
265 	nldr_allocate,
266 	nldr_create,
267 	nldr_delete,
268 	nldr_get_fxn_addr,
269 	nldr_load,
270 	nldr_unload,
271 };
272 
node_get_state(void * hnode)273 enum node_state node_get_state(void *hnode)
274 {
275 	struct node_object *pnode = (struct node_object *)hnode;
276 	if (!pnode)
277 		return -1;
278 	return pnode->node_state;
279 }
280 
281 /*
282  *  ======== node_allocate ========
283  *  Purpose:
284  *      Allocate GPP resources to manage a node on the DSP.
285  */
node_allocate(struct proc_object * hprocessor,const struct dsp_uuid * node_uuid,const struct dsp_cbdata * pargs,const struct dsp_nodeattrin * attr_in,struct node_res_object ** noderes,struct process_context * pr_ctxt)286 int node_allocate(struct proc_object *hprocessor,
287 			const struct dsp_uuid *node_uuid,
288 			const struct dsp_cbdata *pargs,
289 			const struct dsp_nodeattrin *attr_in,
290 			struct node_res_object **noderes,
291 			struct process_context *pr_ctxt)
292 {
293 	struct node_mgr *hnode_mgr;
294 	struct dev_object *hdev_obj;
295 	struct node_object *pnode = NULL;
296 	enum node_type node_type = NODE_TASK;
297 	struct node_msgargs *pmsg_args;
298 	struct node_taskargs *ptask_args;
299 	u32 num_streams;
300 	struct bridge_drv_interface *intf_fxns;
301 	int status = 0;
302 	struct cmm_object *hcmm_mgr = NULL;	/* Shared memory manager hndl */
303 	u32 proc_id;
304 	u32 pul_value;
305 	u32 dynext_base;
306 	u32 off_set = 0;
307 	u32 ul_stack_seg_val;
308 	struct cfg_hostres *host_res;
309 	struct bridge_dev_context *pbridge_context;
310 	u32 mapped_addr = 0;
311 	u32 map_attrs = 0x0;
312 	struct dsp_processorstate proc_state;
313 #ifdef DSP_DMM_DEBUG
314 	struct dmm_object *dmm_mgr;
315 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
316 #endif
317 
318 	void *node_res;
319 
320 	*noderes = NULL;
321 
322 	status = proc_get_processor_id(hprocessor, &proc_id);
323 
324 	if (proc_id != DSP_UNIT)
325 		goto func_end;
326 
327 	status = proc_get_dev_object(hprocessor, &hdev_obj);
328 	if (!status) {
329 		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
330 		if (hnode_mgr == NULL)
331 			status = -EPERM;
332 
333 	}
334 
335 	if (status)
336 		goto func_end;
337 
338 	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
339 	if (!pbridge_context) {
340 		status = -EFAULT;
341 		goto func_end;
342 	}
343 
344 	status = proc_get_state(hprocessor, &proc_state,
345 				sizeof(struct dsp_processorstate));
346 	if (status)
347 		goto func_end;
348 	/* If processor is in error state then don't attempt
349 	   to send the message */
350 	if (proc_state.proc_state == PROC_ERROR) {
351 		status = -EPERM;
352 		goto func_end;
353 	}
354 
355 	/* Assuming that 0 is not a valid function address */
356 	if (hnode_mgr->fxn_addrs[0] == 0) {
357 		/* No RMS on target - we currently can't handle this */
358 		pr_err("%s: Failed, no RMS in base image\n", __func__);
359 		status = -EPERM;
360 	} else {
361 		/* Validate attr_in fields, if non-NULL */
362 		if (attr_in) {
363 			/* Check if attr_in->prio is within range */
364 			if (attr_in->prio < hnode_mgr->min_pri ||
365 			    attr_in->prio > hnode_mgr->max_pri)
366 				status = -EDOM;
367 		}
368 	}
369 	/* Allocate node object and fill in */
370 	if (status)
371 		goto func_end;
372 
373 	pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
374 	if (pnode == NULL) {
375 		status = -ENOMEM;
376 		goto func_end;
377 	}
378 	pnode->node_mgr = hnode_mgr;
379 	/* This critical section protects get_node_props */
380 	mutex_lock(&hnode_mgr->node_mgr_lock);
381 
382 	/* Get dsp_ndbprops from node database */
383 	status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
384 				&(pnode->dcd_props));
385 	if (status)
386 		goto func_cont;
387 
388 	pnode->node_uuid = *node_uuid;
389 	pnode->processor = hprocessor;
390 	pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
391 	pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout;
392 	pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
393 
394 	/* Currently only C64 DSP builds support Node Dynamic * heaps */
395 	/* Allocate memory for node heap */
396 	pnode->create_args.asa.task_arg_obj.heap_size = 0;
397 	pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0;
398 	pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0;
399 	pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0;
400 	if (!attr_in)
401 		goto func_cont;
402 
403 	/* Check if we have a user allocated node heap */
404 	if (!(attr_in->pgpp_virt_addr))
405 		goto func_cont;
406 
407 	/* check for page aligned Heap size */
408 	if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
409 		pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
410 		       __func__, attr_in->heap_size);
411 		status = -EINVAL;
412 	} else {
413 		pnode->create_args.asa.task_arg_obj.heap_size =
414 		    attr_in->heap_size;
415 		pnode->create_args.asa.task_arg_obj.gpp_heap_addr =
416 		    (u32) attr_in->pgpp_virt_addr;
417 	}
418 	if (status)
419 		goto func_cont;
420 
421 	status = proc_reserve_memory(hprocessor,
422 				     pnode->create_args.asa.task_arg_obj.
423 				     heap_size + PAGE_SIZE,
424 				     (void **)&(pnode->create_args.asa.
425 					task_arg_obj.dsp_heap_res_addr),
426 				     pr_ctxt);
427 	if (status) {
428 		pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
429 		       __func__, status);
430 		goto func_cont;
431 	}
432 #ifdef DSP_DMM_DEBUG
433 	status = dmm_get_handle(p_proc_object, &dmm_mgr);
434 	if (!dmm_mgr) {
435 		status = DSP_EHANDLE;
436 		goto func_cont;
437 	}
438 
439 	dmm_mem_map_dump(dmm_mgr);
440 #endif
441 
442 	map_attrs |= DSP_MAPLITTLEENDIAN;
443 	map_attrs |= DSP_MAPELEMSIZE32;
444 	map_attrs |= DSP_MAPVIRTUALADDR;
445 	status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
446 			  pnode->create_args.asa.task_arg_obj.heap_size,
447 			  (void *)pnode->create_args.asa.task_arg_obj.
448 			  dsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
449 			  pr_ctxt);
450 	if (status)
451 		pr_err("%s: Failed to map memory for Heap: 0x%x\n",
452 		       __func__, status);
453 	else
454 		pnode->create_args.asa.task_arg_obj.dsp_heap_addr =
455 		    (u32) mapped_addr;
456 
457 func_cont:
458 	mutex_unlock(&hnode_mgr->node_mgr_lock);
459 	if (attr_in != NULL) {
460 		/* Overrides of NBD properties */
461 		pnode->timeout = attr_in->timeout;
462 		pnode->prio = attr_in->prio;
463 	}
464 	/* Create object to manage notifications */
465 	if (!status) {
466 		pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
467 							GFP_KERNEL);
468 		if (pnode->ntfy_obj)
469 			ntfy_init(pnode->ntfy_obj);
470 		else
471 			status = -ENOMEM;
472 	}
473 
474 	if (!status) {
475 		node_type = node_get_type(pnode);
476 		/*  Allocate dsp_streamconnect array for device, task, and
477 		 *  dais socket nodes. */
478 		if (node_type != NODE_MESSAGE) {
479 			num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
480 			pnode->stream_connect = kzalloc(num_streams *
481 					sizeof(struct dsp_streamconnect),
482 					GFP_KERNEL);
483 			if (num_streams > 0 && pnode->stream_connect == NULL)
484 				status = -ENOMEM;
485 
486 		}
487 		if (!status && (node_type == NODE_TASK ||
488 					      node_type == NODE_DAISSOCKET)) {
489 			/* Allocate arrays for maintainig stream connections */
490 			pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
491 					sizeof(struct stream_chnl), GFP_KERNEL);
492 			pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
493 					sizeof(struct stream_chnl), GFP_KERNEL);
494 			ptask_args = &(pnode->create_args.asa.task_arg_obj);
495 			ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
496 						sizeof(struct node_strmdef),
497 						GFP_KERNEL);
498 			ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
499 						sizeof(struct node_strmdef),
500 						GFP_KERNEL);
501 			if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
502 						       ptask_args->strm_in_def
503 						       == NULL))
504 			    || (MAX_OUTPUTS(pnode) > 0
505 				&& (pnode->outputs == NULL
506 				    || ptask_args->strm_out_def == NULL)))
507 				status = -ENOMEM;
508 		}
509 	}
510 	if (!status && (node_type != NODE_DEVICE)) {
511 		/* Create an event that will be posted when RMS_EXIT is
512 		 * received. */
513 		pnode->sync_done = kzalloc(sizeof(struct sync_object),
514 								GFP_KERNEL);
515 		if (pnode->sync_done)
516 			sync_init_event(pnode->sync_done);
517 		else
518 			status = -ENOMEM;
519 
520 		if (!status) {
521 			/*Get the shared mem mgr for this nodes dev object */
522 			status = cmm_get_handle(hprocessor, &hcmm_mgr);
523 			if (!status) {
524 				/* Allocate a SM addr translator for this node
525 				 * w/ deflt attr */
526 				status = cmm_xlator_create(&pnode->xlator,
527 							   hcmm_mgr, NULL);
528 			}
529 		}
530 		if (!status) {
531 			/* Fill in message args */
532 			if ((pargs != NULL) && (pargs->cb_data > 0)) {
533 				pmsg_args =
534 				    &(pnode->create_args.asa.node_msg_args);
535 				pmsg_args->pdata = kzalloc(pargs->cb_data,
536 								GFP_KERNEL);
537 				if (pmsg_args->pdata == NULL) {
538 					status = -ENOMEM;
539 				} else {
540 					pmsg_args->arg_length = pargs->cb_data;
541 					memcpy(pmsg_args->pdata,
542 					       pargs->node_data,
543 					       pargs->cb_data);
544 				}
545 			}
546 		}
547 	}
548 
549 	if (!status && node_type != NODE_DEVICE) {
550 		/* Create a message queue for this node */
551 		intf_fxns = hnode_mgr->intf_fxns;
552 		status =
553 		    (*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj,
554 							&pnode->msg_queue_obj,
555 							0,
556 							pnode->create_args.asa.
557 							node_msg_args.max_msgs,
558 							pnode);
559 	}
560 
561 	if (!status) {
562 		/* Create object for dynamic loading */
563 
564 		status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
565 							   (void *)pnode,
566 							   &pnode->dcd_props.
567 							   obj_data.node_obj,
568 							   &pnode->
569 							   nldr_node_obj,
570 							   &pnode->phase_split);
571 	}
572 
573 	/* Compare value read from Node Properties and check if it is same as
574 	 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
575 	 * GPP Address, Read the value in that address and override the
576 	 * stack_seg value in task args */
577 	if (!status &&
578 	    (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
579 	    stack_seg_name != NULL) {
580 		if (strcmp((char *)
581 			   pnode->dcd_props.obj_data.node_obj.ndb_props.
582 			   stack_seg_name, STACKSEGLABEL) == 0) {
583 			void __iomem *stack_seg;
584 			u32 stack_seg_pa;
585 
586 			status =
587 			    hnode_mgr->nldr_fxns.
588 			    get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
589 					     &dynext_base);
590 			if (status)
591 				pr_err("%s: Failed to get addr for DYNEXT_BEG"
592 				       " status = 0x%x\n", __func__, status);
593 
594 			status =
595 			    hnode_mgr->nldr_fxns.
596 			    get_fxn_addr(pnode->nldr_node_obj,
597 					     "L1DSRAM_HEAP", &pul_value);
598 
599 			if (status)
600 				pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
601 				       " status = 0x%x\n", __func__, status);
602 
603 			host_res = pbridge_context->resources;
604 			if (!host_res)
605 				status = -EPERM;
606 
607 			if (status) {
608 				pr_err("%s: Failed to get host resource, status"
609 				       " = 0x%x\n", __func__, status);
610 				goto func_end;
611 			}
612 
613 			off_set = pul_value - dynext_base;
614 			stack_seg_pa = host_res->mem_phys[1] + off_set;
615 			stack_seg = ioremap(stack_seg_pa, SZ_32);
616 			if (!stack_seg) {
617 				status = -ENOMEM;
618 				goto func_end;
619 			}
620 
621 			ul_stack_seg_val = readl(stack_seg);
622 
623 			iounmap(stack_seg);
624 
625 			dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
626 				" 0x%x\n", __func__, ul_stack_seg_val,
627 				host_res->mem_base[1] + off_set);
628 
629 			pnode->create_args.asa.task_arg_obj.stack_seg =
630 			    ul_stack_seg_val;
631 
632 		}
633 	}
634 
635 	if (!status) {
636 		/* Add the node to the node manager's list of allocated
637 		 * nodes. */
638 		NODE_SET_STATE(pnode, NODE_ALLOCATED);
639 
640 		mutex_lock(&hnode_mgr->node_mgr_lock);
641 
642 		list_add_tail(&pnode->list_elem, &hnode_mgr->node_list);
643 		++(hnode_mgr->num_nodes);
644 
645 		/* Exit critical section */
646 		mutex_unlock(&hnode_mgr->node_mgr_lock);
647 
648 		/* Preset this to assume phases are split
649 		 * (for overlay and dll) */
650 		pnode->phase_split = true;
651 
652 		/* Notify all clients registered for DSP_NODESTATECHANGE. */
653 		proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
654 	} else {
655 		/* Cleanup */
656 		if (pnode)
657 			delete_node(pnode, pr_ctxt);
658 
659 	}
660 
661 	if (!status) {
662 		status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
663 		if (status) {
664 			delete_node(pnode, pr_ctxt);
665 			goto func_end;
666 		}
667 
668 		*noderes = (struct node_res_object *)node_res;
669 		drv_proc_node_update_heap_status(node_res, true);
670 		drv_proc_node_update_status(node_res, true);
671 	}
672 func_end:
673 	dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
674 		"node_res: %p status: 0x%x\n", __func__, hprocessor,
675 		node_uuid, pargs, attr_in, noderes, status);
676 	return status;
677 }
678 
679 /*
680  *  ======== node_alloc_msg_buf ========
681  *  Purpose:
682  *      Allocates buffer for zero copy messaging.
683  */
node_alloc_msg_buf(struct node_object * hnode,u32 usize,struct dsp_bufferattr * pattr,u8 ** pbuffer)684 DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
685 			 struct dsp_bufferattr *pattr,
686 			 u8 **pbuffer)
687 {
688 	struct node_object *pnode = (struct node_object *)hnode;
689 	int status = 0;
690 	bool va_flag = false;
691 	bool set_info;
692 	u32 proc_id;
693 
694 	if (!pnode)
695 		status = -EFAULT;
696 	else if (node_get_type(pnode) == NODE_DEVICE)
697 		status = -EPERM;
698 
699 	if (status)
700 		goto func_end;
701 
702 	if (pattr == NULL)
703 		pattr = &node_dfltbufattrs;	/* set defaults */
704 
705 	status = proc_get_processor_id(pnode->processor, &proc_id);
706 	if (proc_id != DSP_UNIT) {
707 		goto func_end;
708 	}
709 	/*  If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
710 	 *  virt  address, so set this info in this node's translator
711 	 *  object for  future ref. If MEM_GETVIRTUALSEGID then retrieve
712 	 *  virtual address  from node's translator. */
713 	if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
714 	    (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
715 		va_flag = true;
716 		set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
717 		    true : false;
718 		/* Clear mask bits */
719 		pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
720 		/* Set/get this node's translators virtual address base/size */
721 		status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
722 					 pattr->segment_id, set_info);
723 	}
724 	if (!status && (!va_flag)) {
725 		if (pattr->segment_id != 1) {
726 			/* Node supports single SM segment only. */
727 			status = -EBADR;
728 		}
729 		/*  Arbitrary SM buffer alignment not supported for host side
730 		 *  allocs, but guaranteed for the following alignment
731 		 *  values. */
732 		switch (pattr->buf_alignment) {
733 		case 0:
734 		case 1:
735 		case 2:
736 		case 4:
737 			break;
738 		default:
739 			/* alignment value not supportted */
740 			status = -EPERM;
741 			break;
742 		}
743 		if (!status) {
744 			/* allocate physical buffer from seg_id in node's
745 			 * translator */
746 			(void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
747 						   usize);
748 			if (*pbuffer == NULL) {
749 				pr_err("%s: error - Out of shared memory\n",
750 				       __func__);
751 				status = -ENOMEM;
752 			}
753 		}
754 	}
755 func_end:
756 	return status;
757 }
758 
759 /*
760  *  ======== node_change_priority ========
761  *  Purpose:
762  *      Change the priority of a node in the allocated state, or that is
763  *      currently running or paused on the target.
764  */
node_change_priority(struct node_object * hnode,s32 prio)765 int node_change_priority(struct node_object *hnode, s32 prio)
766 {
767 	struct node_object *pnode = (struct node_object *)hnode;
768 	struct node_mgr *hnode_mgr = NULL;
769 	enum node_type node_type;
770 	enum node_state state;
771 	int status = 0;
772 	u32 proc_id;
773 
774 	if (!hnode || !hnode->node_mgr) {
775 		status = -EFAULT;
776 	} else {
777 		hnode_mgr = hnode->node_mgr;
778 		node_type = node_get_type(hnode);
779 		if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
780 			status = -EPERM;
781 		else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
782 			status = -EDOM;
783 	}
784 	if (status)
785 		goto func_end;
786 
787 	/* Enter critical section */
788 	mutex_lock(&hnode_mgr->node_mgr_lock);
789 
790 	state = node_get_state(hnode);
791 	if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
792 		NODE_SET_PRIORITY(hnode, prio);
793 	} else {
794 		if (state != NODE_RUNNING) {
795 			status = -EBADR;
796 			goto func_cont;
797 		}
798 		status = proc_get_processor_id(pnode->processor, &proc_id);
799 		if (proc_id == DSP_UNIT) {
800 			status =
801 			    disp_node_change_priority(hnode_mgr->disp_obj,
802 						      hnode,
803 						      hnode_mgr->fxn_addrs
804 						      [RMSCHANGENODEPRIORITY],
805 						      hnode->node_env, prio);
806 		}
807 		if (status >= 0)
808 			NODE_SET_PRIORITY(hnode, prio);
809 
810 	}
811 func_cont:
812 	/* Leave critical section */
813 	mutex_unlock(&hnode_mgr->node_mgr_lock);
814 func_end:
815 	return status;
816 }
817 
818 /*
819  *  ======== node_connect ========
820  *  Purpose:
821  *      Connect two nodes on the DSP, or a node on the DSP to the GPP.
822  */
node_connect(struct node_object * node1,u32 stream1,struct node_object * node2,u32 stream2,struct dsp_strmattr * pattrs,struct dsp_cbdata * conn_param)823 int node_connect(struct node_object *node1, u32 stream1,
824 			struct node_object *node2,
825 			u32 stream2, struct dsp_strmattr *pattrs,
826 			struct dsp_cbdata *conn_param)
827 {
828 	struct node_mgr *hnode_mgr;
829 	char *pstr_dev_name = NULL;
830 	enum node_type node1_type = NODE_TASK;
831 	enum node_type node2_type = NODE_TASK;
832 	enum dsp_strmmode strm_mode;
833 	struct node_strmdef *pstrm_def;
834 	struct node_strmdef *input = NULL;
835 	struct node_strmdef *output = NULL;
836 	struct node_object *dev_node_obj;
837 	struct node_object *hnode;
838 	struct stream_chnl *pstream;
839 	u32 pipe_id;
840 	u32 chnl_id;
841 	s8 chnl_mode;
842 	u32 dw_length;
843 	int status = 0;
844 
845 	if (!node1 || !node2)
846 		return -EFAULT;
847 
848 	/* The two nodes must be on the same processor */
849 	if (node1 != (struct node_object *)DSP_HGPPNODE &&
850 			node2 != (struct node_object *)DSP_HGPPNODE &&
851 			node1->node_mgr != node2->node_mgr)
852 		return -EPERM;
853 
854 	/* Cannot connect a node to itself */
855 	if (node1 == node2)
856 		return -EPERM;
857 
858 	/* node_get_type() will return NODE_GPP if hnode =  DSP_HGPPNODE. */
859 	node1_type = node_get_type(node1);
860 	node2_type = node_get_type(node2);
861 	/* Check stream indices ranges */
862 	if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
863 				stream1 >= MAX_OUTPUTS(node1)) ||
864 			(node2_type != NODE_GPP && node2_type != NODE_DEVICE &&
865 			 stream2 >= MAX_INPUTS(node2)))
866 		return -EINVAL;
867 
868 	/*
869 	 *  Only the following types of connections are allowed:
870 	 *      task/dais socket < == > task/dais socket
871 	 *      task/dais socket < == > device
872 	 *      task/dais socket < == > GPP
873 	 *
874 	 *  ie, no message nodes, and at least one task or dais
875 	 *  socket node.
876 	 */
877 	if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
878 			(node1_type != NODE_TASK &&
879 			 node1_type != NODE_DAISSOCKET &&
880 			 node2_type != NODE_TASK &&
881 			 node2_type != NODE_DAISSOCKET))
882 		return -EPERM;
883 	/*
884 	 * Check stream mode. Default is STRMMODE_PROCCOPY.
885 	 */
886 	if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
887 		return -EPERM;	/* illegal stream mode */
888 
889 	if (node1_type != NODE_GPP) {
890 		hnode_mgr = node1->node_mgr;
891 	} else {
892 		hnode_mgr = node2->node_mgr;
893 	}
894 
895 	/* Enter critical section */
896 	mutex_lock(&hnode_mgr->node_mgr_lock);
897 
898 	/* Nodes must be in the allocated state */
899 	if (node1_type != NODE_GPP &&
900 			node_get_state(node1) != NODE_ALLOCATED) {
901 		status = -EBADR;
902 		goto out_unlock;
903 	}
904 
905 	if (node2_type != NODE_GPP &&
906 			node_get_state(node2) != NODE_ALLOCATED) {
907 		status = -EBADR;
908 		goto out_unlock;
909 	}
910 
911 	/*
912 	 *  Check that stream indices for task and dais socket nodes
913 	 *  are not already be used. (Device nodes checked later)
914 	 */
915 	if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
916 		output = &(node1->create_args.asa.
917 				task_arg_obj.strm_out_def[stream1]);
918 		if (output->sz_device) {
919 			status = -EISCONN;
920 			goto out_unlock;
921 		}
922 
923 	}
924 	if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
925 		input = &(node2->create_args.asa.
926 				task_arg_obj.strm_in_def[stream2]);
927 		if (input->sz_device) {
928 			status = -EISCONN;
929 			goto out_unlock;
930 		}
931 
932 	}
933 	/* Connecting two task nodes? */
934 	if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) &&
935 				(node2_type == NODE_TASK ||
936 				 node2_type == NODE_DAISSOCKET)) {
937 		/* Find available pipe */
938 		pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES);
939 		if (pipe_id == MAXPIPES) {
940 			status = -ECONNREFUSED;
941 			goto out_unlock;
942 		}
943 		set_bit(pipe_id, hnode_mgr->pipe_map);
944 		node1->outputs[stream1].type = NODECONNECT;
945 		node2->inputs[stream2].type = NODECONNECT;
946 		node1->outputs[stream1].dev_id = pipe_id;
947 		node2->inputs[stream2].dev_id = pipe_id;
948 		output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
949 		input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
950 		if (!output->sz_device || !input->sz_device) {
951 			/* Undo the connection */
952 			kfree(output->sz_device);
953 			kfree(input->sz_device);
954 			clear_bit(pipe_id, hnode_mgr->pipe_map);
955 			status = -ENOMEM;
956 			goto out_unlock;
957 		}
958 		/* Copy "/dbpipe<pipId>" name to device names */
959 		sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id);
960 		strcpy(input->sz_device, output->sz_device);
961 	}
962 	/* Connecting task node to host? */
963 	if (node1_type == NODE_GPP || node2_type == NODE_GPP) {
964 		pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
965 		if (!pstr_dev_name) {
966 			status = -ENOMEM;
967 			goto out_unlock;
968 		}
969 
970 		chnl_mode = (node1_type == NODE_GPP) ?
971 			CHNL_MODETODSP : CHNL_MODEFROMDSP;
972 
973 		/*
974 		 *  Reserve a channel id. We need to put the name "/host<id>"
975 		 *  in the node's create_args, but the host
976 		 *  side channel will not be opened until DSPStream_Open is
977 		 *  called for this node.
978 		 */
979 		strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY;
980 		switch (strm_mode) {
981 		case STRMMODE_RDMA:
982 			chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map,
983 					CHNL_MAXCHANNELS);
984 			if (chnl_id < CHNL_MAXCHANNELS) {
985 				set_bit(chnl_id, hnode_mgr->dma_chnl_map);
986 				/* dma chans are 2nd transport chnl set
987 				 * ids(e.g. 16-31) */
988 				chnl_id = chnl_id + hnode_mgr->num_chnls;
989 			}
990 			break;
991 		case STRMMODE_ZEROCOPY:
992 			chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map,
993 					CHNL_MAXCHANNELS);
994 			if (chnl_id < CHNL_MAXCHANNELS) {
995 				set_bit(chnl_id, hnode_mgr->zc_chnl_map);
996 				/* zero-copy chans are 3nd transport set
997 				 * (e.g. 32-47) */
998 				chnl_id = chnl_id +
999 					(2 * hnode_mgr->num_chnls);
1000 			}
1001 			break;
1002 		case STRMMODE_PROCCOPY:
1003 			chnl_id = find_first_zero_bit(hnode_mgr->chnl_map,
1004 					CHNL_MAXCHANNELS);
1005 			if (chnl_id < CHNL_MAXCHANNELS)
1006 				set_bit(chnl_id, hnode_mgr->chnl_map);
1007 			break;
1008 		default:
1009 			status = -EINVAL;
1010 			goto out_unlock;
1011 		}
1012 		if (chnl_id == CHNL_MAXCHANNELS) {
1013 			status = -ECONNREFUSED;
1014 			goto out_unlock;
1015 		}
1016 
1017 		if (node1 == (struct node_object *)DSP_HGPPNODE) {
1018 			node2->inputs[stream2].type = HOSTCONNECT;
1019 			node2->inputs[stream2].dev_id = chnl_id;
1020 			input->sz_device = pstr_dev_name;
1021 		} else {
1022 			node1->outputs[stream1].type = HOSTCONNECT;
1023 			node1->outputs[stream1].dev_id = chnl_id;
1024 			output->sz_device = pstr_dev_name;
1025 		}
1026 		sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1027 	}
1028 	/* Connecting task node to device node? */
1029 	if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) {
1030 		if (node2_type == NODE_DEVICE) {
1031 			/* node1 == > device */
1032 			dev_node_obj = node2;
1033 			hnode = node1;
1034 			pstream = &(node1->outputs[stream1]);
1035 			pstrm_def = output;
1036 		} else {
1037 			/* device == > node2 */
1038 			dev_node_obj = node1;
1039 			hnode = node2;
1040 			pstream = &(node2->inputs[stream2]);
1041 			pstrm_def = input;
1042 		}
1043 		/* Set up create args */
1044 		pstream->type = DEVICECONNECT;
1045 		dw_length = strlen(dev_node_obj->str_dev_name);
1046 		if (conn_param)
1047 			pstrm_def->sz_device = kzalloc(dw_length + 1 +
1048 					conn_param->cb_data,
1049 					GFP_KERNEL);
1050 		else
1051 			pstrm_def->sz_device = kzalloc(dw_length + 1,
1052 					GFP_KERNEL);
1053 		if (!pstrm_def->sz_device) {
1054 			status = -ENOMEM;
1055 			goto out_unlock;
1056 		}
1057 		/* Copy device name */
1058 		strncpy(pstrm_def->sz_device,
1059 				dev_node_obj->str_dev_name, dw_length);
1060 		if (conn_param)
1061 			strncat(pstrm_def->sz_device,
1062 					(char *)conn_param->node_data,
1063 					(u32) conn_param->cb_data);
1064 		dev_node_obj->device_owner = hnode;
1065 	}
1066 	/* Fill in create args */
1067 	if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1068 		node1->create_args.asa.task_arg_obj.num_outputs++;
1069 		fill_stream_def(node1, output, pattrs);
1070 	}
1071 	if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1072 		node2->create_args.asa.task_arg_obj.num_inputs++;
1073 		fill_stream_def(node2, input, pattrs);
1074 	}
1075 	/* Update node1 and node2 stream_connect */
1076 	if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1077 		node1->num_outputs++;
1078 		if (stream1 > node1->max_output_index)
1079 			node1->max_output_index = stream1;
1080 
1081 	}
1082 	if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1083 		node2->num_inputs++;
1084 		if (stream2 > node2->max_input_index)
1085 			node2->max_input_index = stream2;
1086 
1087 	}
1088 	fill_stream_connect(node1, node2, stream1, stream2);
1089 	/* end of sync_enter_cs */
1090 	/* Exit critical section */
1091 out_unlock:
1092 	if (status && pstr_dev_name)
1093 		kfree(pstr_dev_name);
1094 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1095 	dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1096 			"pattrs: %p status: 0x%x\n", __func__, node1,
1097 			stream1, node2, stream2, pattrs, status);
1098 	return status;
1099 }
1100 
1101 /*
1102  *  ======== node_create ========
1103  *  Purpose:
1104  *      Create a node on the DSP by remotely calling the node's create function.
1105  */
node_create(struct node_object * hnode)1106 int node_create(struct node_object *hnode)
1107 {
1108 	struct node_object *pnode = (struct node_object *)hnode;
1109 	struct node_mgr *hnode_mgr;
1110 	struct bridge_drv_interface *intf_fxns;
1111 	u32 ul_create_fxn;
1112 	enum node_type node_type;
1113 	int status = 0;
1114 	int status1 = 0;
1115 	struct dsp_cbdata cb_data;
1116 	u32 proc_id = 255;
1117 	struct dsp_processorstate proc_state;
1118 	struct proc_object *hprocessor;
1119 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1120 	struct dspbridge_platform_data *pdata =
1121 	    omap_dspbridge_dev->dev.platform_data;
1122 #endif
1123 
1124 	if (!pnode) {
1125 		status = -EFAULT;
1126 		goto func_end;
1127 	}
1128 	hprocessor = hnode->processor;
1129 	status = proc_get_state(hprocessor, &proc_state,
1130 				sizeof(struct dsp_processorstate));
1131 	if (status)
1132 		goto func_end;
1133 	/* If processor is in error state then don't attempt to create
1134 	   new node */
1135 	if (proc_state.proc_state == PROC_ERROR) {
1136 		status = -EPERM;
1137 		goto func_end;
1138 	}
1139 	/* create struct dsp_cbdata struct for PWR calls */
1140 	cb_data.cb_data = PWR_TIMEOUT;
1141 	node_type = node_get_type(hnode);
1142 	hnode_mgr = hnode->node_mgr;
1143 	intf_fxns = hnode_mgr->intf_fxns;
1144 	/* Get access to node dispatcher */
1145 	mutex_lock(&hnode_mgr->node_mgr_lock);
1146 
1147 	/* Check node state */
1148 	if (node_get_state(hnode) != NODE_ALLOCATED)
1149 		status = -EBADR;
1150 
1151 	if (!status)
1152 		status = proc_get_processor_id(pnode->processor, &proc_id);
1153 
1154 	if (status)
1155 		goto func_cont2;
1156 
1157 	if (proc_id != DSP_UNIT)
1158 		goto func_cont2;
1159 
1160 	/* Make sure streams are properly connected */
1161 	if ((hnode->num_inputs && hnode->max_input_index >
1162 	     hnode->num_inputs - 1) ||
1163 	    (hnode->num_outputs && hnode->max_output_index >
1164 	     hnode->num_outputs - 1))
1165 		status = -ENOTCONN;
1166 
1167 	if (!status) {
1168 		/* If node's create function is not loaded, load it */
1169 		/* Boost the OPP level to max level that DSP can be requested */
1170 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1171 		if (pdata->cpu_set_freq)
1172 			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1173 #endif
1174 		status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
1175 						       NLDR_CREATE);
1176 		/* Get address of node's create function */
1177 		if (!status) {
1178 			hnode->loaded = true;
1179 			if (node_type != NODE_DEVICE) {
1180 				status = get_fxn_address(hnode, &ul_create_fxn,
1181 							 CREATEPHASE);
1182 			}
1183 		} else {
1184 			pr_err("%s: failed to load create code: 0x%x\n",
1185 			       __func__, status);
1186 		}
1187 		/* Request the lowest OPP level */
1188 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1189 		if (pdata->cpu_set_freq)
1190 			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1191 #endif
1192 		/* Get address of iAlg functions, if socket node */
1193 		if (!status) {
1194 			if (node_type == NODE_DAISSOCKET) {
1195 				status = hnode_mgr->nldr_fxns.get_fxn_addr
1196 				    (hnode->nldr_node_obj,
1197 				     hnode->dcd_props.obj_data.node_obj.
1198 				     str_i_alg_name,
1199 				     &hnode->create_args.asa.
1200 				     task_arg_obj.dais_arg);
1201 			}
1202 		}
1203 	}
1204 	if (!status) {
1205 		if (node_type != NODE_DEVICE) {
1206 			status = disp_node_create(hnode_mgr->disp_obj, hnode,
1207 						  hnode_mgr->fxn_addrs
1208 						  [RMSCREATENODE],
1209 						  ul_create_fxn,
1210 						  &(hnode->create_args),
1211 						  &(hnode->node_env));
1212 			if (status >= 0) {
1213 				/* Set the message queue id to the node env
1214 				 * pointer */
1215 				intf_fxns = hnode_mgr->intf_fxns;
1216 				(*intf_fxns->msg_set_queue_id) (hnode->
1217 							msg_queue_obj,
1218 							hnode->node_env);
1219 			}
1220 		}
1221 	}
1222 	/*  Phase II/Overlays: Create, execute, delete phases  possibly in
1223 	 *  different files/sections. */
1224 	if (hnode->loaded && hnode->phase_split) {
1225 		/* If create code was dynamically loaded, we can now unload
1226 		 * it. */
1227 		status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj,
1228 							  NLDR_CREATE);
1229 		hnode->loaded = false;
1230 	}
1231 	if (status1)
1232 		pr_err("%s: Failed to unload create code: 0x%x\n",
1233 		       __func__, status1);
1234 func_cont2:
1235 	/* Update node state and node manager state */
1236 	if (status >= 0) {
1237 		NODE_SET_STATE(hnode, NODE_CREATED);
1238 		hnode_mgr->num_created++;
1239 		goto func_cont;
1240 	}
1241 	if (status != -EBADR) {
1242 		/* Put back in NODE_ALLOCATED state if error occurred */
1243 		NODE_SET_STATE(hnode, NODE_ALLOCATED);
1244 	}
1245 func_cont:
1246 	/* Free access to node dispatcher */
1247 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1248 func_end:
1249 	if (status >= 0) {
1250 		proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1251 		ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1252 	}
1253 
1254 	dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1255 		hnode, status);
1256 	return status;
1257 }
1258 
1259 /*
1260  *  ======== node_create_mgr ========
1261  *  Purpose:
1262  *      Create a NODE Manager object.
1263  */
node_create_mgr(struct node_mgr ** node_man,struct dev_object * hdev_obj)1264 int node_create_mgr(struct node_mgr **node_man,
1265 			   struct dev_object *hdev_obj)
1266 {
1267 	u32 i;
1268 	struct node_mgr *node_mgr_obj = NULL;
1269 	struct disp_attr disp_attr_obj;
1270 	char *sz_zl_file = "";
1271 	struct nldr_attrs nldr_attrs_obj;
1272 	int status = 0;
1273 	u8 dev_type;
1274 
1275 	*node_man = NULL;
1276 	/* Allocate Node manager object */
1277 	node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1278 	if (!node_mgr_obj)
1279 		return -ENOMEM;
1280 
1281 	node_mgr_obj->dev_obj = hdev_obj;
1282 
1283 	node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
1284 			GFP_KERNEL);
1285 	if (!node_mgr_obj->ntfy_obj) {
1286 		status = -ENOMEM;
1287 		goto out_err;
1288 	}
1289 	ntfy_init(node_mgr_obj->ntfy_obj);
1290 
1291 	INIT_LIST_HEAD(&node_mgr_obj->node_list);
1292 
1293 	dev_get_dev_type(hdev_obj, &dev_type);
1294 
1295 	status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
1296 	if (status)
1297 		goto out_err;
1298 
1299 	status = get_proc_props(node_mgr_obj, hdev_obj);
1300 	if (status)
1301 		goto out_err;
1302 
1303 	/* Create NODE Dispatcher */
1304 	disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset;
1305 	disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size;
1306 	disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1307 	disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1308 
1309 	status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj);
1310 	if (status)
1311 		goto out_err;
1312 
1313 	/* Create a STRM Manager */
1314 	status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1315 	if (status)
1316 		goto out_err;
1317 
1318 	dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1319 	/* Get msg_ctrl queue manager */
1320 	dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1321 	mutex_init(&node_mgr_obj->node_mgr_lock);
1322 
1323 	/* Block out reserved channels */
1324 	for (i = 0; i < node_mgr_obj->chnl_offset; i++)
1325 		set_bit(i, node_mgr_obj->chnl_map);
1326 
1327 	/* Block out channels reserved for RMS */
1328 	set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map);
1329 	set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map);
1330 
1331 	/* NO RM Server on the IVA */
1332 	if (dev_type != IVA_UNIT) {
1333 		/* Get addresses of any RMS functions loaded */
1334 		status = get_rms_fxns(node_mgr_obj);
1335 		if (status)
1336 			goto out_err;
1337 	}
1338 
1339 	/* Get loader functions and create loader */
1340 	node_mgr_obj->nldr_fxns = nldr_fxns;	/* Dyn loader funcs */
1341 
1342 	nldr_attrs_obj.ovly = ovly;
1343 	nldr_attrs_obj.write = mem_write;
1344 	nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
1345 	nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
1346 	status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
1347 			hdev_obj,
1348 			&nldr_attrs_obj);
1349 	if (status)
1350 		goto out_err;
1351 
1352 	*node_man = node_mgr_obj;
1353 
1354 	return status;
1355 out_err:
1356 	delete_node_mgr(node_mgr_obj);
1357 	return status;
1358 }
1359 
1360 /*
1361  *  ======== node_delete ========
1362  *  Purpose:
1363  *      Delete a node on the DSP by remotely calling the node's delete function.
1364  *      Loads the node's delete function if necessary. Free GPP side resources
1365  *      after node's delete function returns.
1366  */
node_delete(struct node_res_object * noderes,struct process_context * pr_ctxt)1367 int node_delete(struct node_res_object *noderes,
1368 		       struct process_context *pr_ctxt)
1369 {
1370 	struct node_object *pnode = noderes->node;
1371 	struct node_mgr *hnode_mgr;
1372 	struct proc_object *hprocessor;
1373 	struct disp_object *disp_obj;
1374 	u32 ul_delete_fxn;
1375 	enum node_type node_type;
1376 	enum node_state state;
1377 	int status = 0;
1378 	int status1 = 0;
1379 	struct dsp_cbdata cb_data;
1380 	u32 proc_id;
1381 	struct bridge_drv_interface *intf_fxns;
1382 
1383 	void *node_res = noderes;
1384 
1385 	struct dsp_processorstate proc_state;
1386 
1387 	if (!pnode) {
1388 		status = -EFAULT;
1389 		goto func_end;
1390 	}
1391 	/* create struct dsp_cbdata struct for PWR call */
1392 	cb_data.cb_data = PWR_TIMEOUT;
1393 	hnode_mgr = pnode->node_mgr;
1394 	hprocessor = pnode->processor;
1395 	disp_obj = hnode_mgr->disp_obj;
1396 	node_type = node_get_type(pnode);
1397 	intf_fxns = hnode_mgr->intf_fxns;
1398 	/* Enter critical section */
1399 	mutex_lock(&hnode_mgr->node_mgr_lock);
1400 
1401 	state = node_get_state(pnode);
1402 	/*  Execute delete phase code for non-device node in all cases
1403 	 *  except when the node was only allocated. Delete phase must be
1404 	 *  executed even if create phase was executed, but failed.
1405 	 *  If the node environment pointer is non-NULL, the delete phase
1406 	 *  code must be  executed. */
1407 	if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1408 	    node_type != NODE_DEVICE) {
1409 		status = proc_get_processor_id(pnode->processor, &proc_id);
1410 		if (status)
1411 			goto func_cont1;
1412 
1413 		if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1414 			/*  If node has terminated, execute phase code will
1415 			 *  have already been unloaded in node_on_exit(). If the
1416 			 *  node is PAUSED, the execute phase is loaded, and it
1417 			 *  is now ok to unload it. If the node is running, we
1418 			 *  will unload the execute phase only after deleting
1419 			 *  the node. */
1420 			if (state == NODE_PAUSED && pnode->loaded &&
1421 			    pnode->phase_split) {
1422 				/* Ok to unload execute code as long as node
1423 				 * is not * running */
1424 				status1 =
1425 				    hnode_mgr->nldr_fxns.
1426 				    unload(pnode->nldr_node_obj,
1427 					       NLDR_EXECUTE);
1428 				pnode->loaded = false;
1429 				NODE_SET_STATE(pnode, NODE_DONE);
1430 			}
1431 			/* Load delete phase code if not loaded or if haven't
1432 			 * * unloaded EXECUTE phase */
1433 			if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
1434 			    pnode->phase_split) {
1435 				status =
1436 				    hnode_mgr->nldr_fxns.
1437 				    load(pnode->nldr_node_obj, NLDR_DELETE);
1438 				if (!status)
1439 					pnode->loaded = true;
1440 				else
1441 					pr_err("%s: fail - load delete code:"
1442 					       " 0x%x\n", __func__, status);
1443 			}
1444 		}
1445 func_cont1:
1446 		if (!status) {
1447 			/* Unblock a thread trying to terminate the node */
1448 			(void)sync_set_event(pnode->sync_done);
1449 			if (proc_id == DSP_UNIT) {
1450 				/* ul_delete_fxn = address of node's delete
1451 				 * function */
1452 				status = get_fxn_address(pnode, &ul_delete_fxn,
1453 							 DELETEPHASE);
1454 			} else if (proc_id == IVA_UNIT)
1455 				ul_delete_fxn = (u32) pnode->node_env;
1456 			if (!status) {
1457 				status = proc_get_state(hprocessor,
1458 						&proc_state,
1459 						sizeof(struct
1460 						       dsp_processorstate));
1461 				if (proc_state.proc_state != PROC_ERROR) {
1462 					status =
1463 					    disp_node_delete(disp_obj, pnode,
1464 							     hnode_mgr->
1465 							     fxn_addrs
1466 							     [RMSDELETENODE],
1467 							     ul_delete_fxn,
1468 							     pnode->node_env);
1469 				} else
1470 					NODE_SET_STATE(pnode, NODE_DONE);
1471 
1472 				/* Unload execute, if not unloaded, and delete
1473 				 * function */
1474 				if (state == NODE_RUNNING &&
1475 				    pnode->phase_split) {
1476 					status1 =
1477 					    hnode_mgr->nldr_fxns.
1478 					    unload(pnode->nldr_node_obj,
1479 						       NLDR_EXECUTE);
1480 				}
1481 				if (status1)
1482 					pr_err("%s: fail - unload execute code:"
1483 					       " 0x%x\n", __func__, status1);
1484 
1485 				status1 =
1486 				    hnode_mgr->nldr_fxns.unload(pnode->
1487 							    nldr_node_obj,
1488 							    NLDR_DELETE);
1489 				pnode->loaded = false;
1490 				if (status1)
1491 					pr_err("%s: fail - unload delete code: "
1492 					       "0x%x\n", __func__, status1);
1493 			}
1494 		}
1495 	}
1496 	/* Free host side resources even if a failure occurred */
1497 	/* Remove node from hnode_mgr->node_list */
1498 	list_del(&pnode->list_elem);
1499 	hnode_mgr->num_nodes--;
1500 	/* Decrement count of nodes created on DSP */
1501 	if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1502 					  (pnode->node_env != (u32) NULL)))
1503 		hnode_mgr->num_created--;
1504 	/*  Free host-side resources allocated by node_create()
1505 	 *  delete_node() fails if SM buffers not freed by client! */
1506 	drv_proc_node_update_status(node_res, false);
1507 	delete_node(pnode, pr_ctxt);
1508 
1509 	/*
1510 	 * Release all Node resources and its context
1511 	 */
1512 	idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
1513 	kfree(node_res);
1514 
1515 	/* Exit critical section */
1516 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1517 	proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1518 func_end:
1519 	dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
1520 	return status;
1521 }
1522 
1523 /*
1524  *  ======== node_delete_mgr ========
1525  *  Purpose:
1526  *      Delete the NODE Manager.
1527  */
node_delete_mgr(struct node_mgr * hnode_mgr)1528 int node_delete_mgr(struct node_mgr *hnode_mgr)
1529 {
1530 	if (!hnode_mgr)
1531 		return -EFAULT;
1532 
1533 	delete_node_mgr(hnode_mgr);
1534 
1535 	return 0;
1536 }
1537 
1538 /*
1539  *  ======== node_enum_nodes ========
1540  *  Purpose:
1541  *      Enumerate currently allocated nodes.
1542  */
node_enum_nodes(struct node_mgr * hnode_mgr,void ** node_tab,u32 node_tab_size,u32 * pu_num_nodes,u32 * pu_allocated)1543 int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1544 			   u32 node_tab_size, u32 *pu_num_nodes,
1545 			   u32 *pu_allocated)
1546 {
1547 	struct node_object *hnode;
1548 	u32 i = 0;
1549 	int status = 0;
1550 
1551 	if (!hnode_mgr) {
1552 		status = -EFAULT;
1553 		goto func_end;
1554 	}
1555 	/* Enter critical section */
1556 	mutex_lock(&hnode_mgr->node_mgr_lock);
1557 
1558 	if (hnode_mgr->num_nodes > node_tab_size) {
1559 		*pu_allocated = hnode_mgr->num_nodes;
1560 		*pu_num_nodes = 0;
1561 		status = -EINVAL;
1562 	} else {
1563 		list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem)
1564 			node_tab[i++] = hnode;
1565 		*pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1566 	}
1567 	/* end of sync_enter_cs */
1568 	/* Exit critical section */
1569 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1570 func_end:
1571 	return status;
1572 }
1573 
1574 /*
1575  *  ======== node_free_msg_buf ========
1576  *  Purpose:
1577  *      Frees the message buffer.
1578  */
node_free_msg_buf(struct node_object * hnode,u8 * pbuffer,struct dsp_bufferattr * pattr)1579 int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1580 			     struct dsp_bufferattr *pattr)
1581 {
1582 	struct node_object *pnode = (struct node_object *)hnode;
1583 	int status = 0;
1584 	u32 proc_id;
1585 
1586 	if (!hnode) {
1587 		status = -EFAULT;
1588 		goto func_end;
1589 	}
1590 	status = proc_get_processor_id(pnode->processor, &proc_id);
1591 	if (proc_id == DSP_UNIT) {
1592 		if (!status) {
1593 			if (pattr == NULL) {
1594 				/* set defaults */
1595 				pattr = &node_dfltbufattrs;
1596 			}
1597 			/* Node supports single SM segment only */
1598 			if (pattr->segment_id != 1)
1599 				status = -EBADR;
1600 
1601 			/* pbuffer is clients Va. */
1602 			status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1603 		}
1604 	} else {
1605 	}
1606 func_end:
1607 	return status;
1608 }
1609 
1610 /*
1611  *  ======== node_get_attr ========
1612  *  Purpose:
1613  *      Copy the current attributes of the specified node into a dsp_nodeattr
1614  *      structure.
1615  */
node_get_attr(struct node_object * hnode,struct dsp_nodeattr * pattr,u32 attr_size)1616 int node_get_attr(struct node_object *hnode,
1617 			 struct dsp_nodeattr *pattr, u32 attr_size)
1618 {
1619 	struct node_mgr *hnode_mgr;
1620 
1621 	if (!hnode)
1622 		return -EFAULT;
1623 
1624 	hnode_mgr = hnode->node_mgr;
1625 	/* Enter hnode_mgr critical section since we're accessing
1626 	 * data that could be changed by node_change_priority() and
1627 	 * node_connect(). */
1628 	mutex_lock(&hnode_mgr->node_mgr_lock);
1629 	pattr->cb_struct = sizeof(struct dsp_nodeattr);
1630 	/* dsp_nodeattrin */
1631 	pattr->in_node_attr_in.cb_struct =
1632 		sizeof(struct dsp_nodeattrin);
1633 	pattr->in_node_attr_in.prio = hnode->prio;
1634 	pattr->in_node_attr_in.timeout = hnode->timeout;
1635 	pattr->in_node_attr_in.heap_size =
1636 		hnode->create_args.asa.task_arg_obj.heap_size;
1637 	pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1638 		hnode->create_args.asa.task_arg_obj.gpp_heap_addr;
1639 	pattr->node_attr_inputs = hnode->num_gpp_inputs;
1640 	pattr->node_attr_outputs = hnode->num_gpp_outputs;
1641 	/* dsp_nodeinfo */
1642 	get_node_info(hnode, &(pattr->node_info));
1643 	/* end of sync_enter_cs */
1644 	/* Exit critical section */
1645 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1646 
1647 	return 0;
1648 }
1649 
1650 /*
1651  *  ======== node_get_channel_id ========
1652  *  Purpose:
1653  *      Get the channel index reserved for a stream connection between the
1654  *      host and a node.
1655  */
node_get_channel_id(struct node_object * hnode,u32 dir,u32 index,u32 * chan_id)1656 int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1657 			       u32 *chan_id)
1658 {
1659 	enum node_type node_type;
1660 	int status = -EINVAL;
1661 
1662 	if (!hnode) {
1663 		status = -EFAULT;
1664 		return status;
1665 	}
1666 	node_type = node_get_type(hnode);
1667 	if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1668 		status = -EPERM;
1669 		return status;
1670 	}
1671 	if (dir == DSP_TONODE) {
1672 		if (index < MAX_INPUTS(hnode)) {
1673 			if (hnode->inputs[index].type == HOSTCONNECT) {
1674 				*chan_id = hnode->inputs[index].dev_id;
1675 				status = 0;
1676 			}
1677 		}
1678 	} else {
1679 		if (index < MAX_OUTPUTS(hnode)) {
1680 			if (hnode->outputs[index].type == HOSTCONNECT) {
1681 				*chan_id = hnode->outputs[index].dev_id;
1682 				status = 0;
1683 			}
1684 		}
1685 	}
1686 	return status;
1687 }
1688 
1689 /*
1690  *  ======== node_get_message ========
1691  *  Purpose:
1692  *      Retrieve a message from a node on the DSP.
1693  */
node_get_message(struct node_object * hnode,struct dsp_msg * message,u32 utimeout)1694 int node_get_message(struct node_object *hnode,
1695 			    struct dsp_msg *message, u32 utimeout)
1696 {
1697 	struct node_mgr *hnode_mgr;
1698 	enum node_type node_type;
1699 	struct bridge_drv_interface *intf_fxns;
1700 	int status = 0;
1701 	void *tmp_buf;
1702 	struct dsp_processorstate proc_state;
1703 	struct proc_object *hprocessor;
1704 
1705 	if (!hnode) {
1706 		status = -EFAULT;
1707 		goto func_end;
1708 	}
1709 	hprocessor = hnode->processor;
1710 	status = proc_get_state(hprocessor, &proc_state,
1711 				sizeof(struct dsp_processorstate));
1712 	if (status)
1713 		goto func_end;
1714 	/* If processor is in error state then don't attempt to get the
1715 	   message */
1716 	if (proc_state.proc_state == PROC_ERROR) {
1717 		status = -EPERM;
1718 		goto func_end;
1719 	}
1720 	hnode_mgr = hnode->node_mgr;
1721 	node_type = node_get_type(hnode);
1722 	if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1723 	    node_type != NODE_DAISSOCKET) {
1724 		status = -EPERM;
1725 		goto func_end;
1726 	}
1727 	/*  This function will block unless a message is available. Since
1728 	 *  DSPNode_RegisterNotify() allows notification when a message
1729 	 *  is available, the system can be designed so that
1730 	 *  DSPNode_GetMessage() is only called when a message is
1731 	 *  available. */
1732 	intf_fxns = hnode_mgr->intf_fxns;
1733 	status =
1734 	    (*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout);
1735 	/* Check if message contains SM descriptor */
1736 	if (status || !(message->cmd & DSP_RMSBUFDESC))
1737 		goto func_end;
1738 
1739 	/* Translate DSP byte addr to GPP Va. */
1740 	tmp_buf = cmm_xlator_translate(hnode->xlator,
1741 				       (void *)(message->arg1 *
1742 						hnode->node_mgr->
1743 						dsp_word_size), CMM_DSPPA2PA);
1744 	if (tmp_buf != NULL) {
1745 		/* now convert this GPP Pa to Va */
1746 		tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1747 					       CMM_PA2VA);
1748 		if (tmp_buf != NULL) {
1749 			/* Adjust SM size in msg */
1750 			message->arg1 = (u32) tmp_buf;
1751 			message->arg2 *= hnode->node_mgr->dsp_word_size;
1752 		} else {
1753 			status = -ESRCH;
1754 		}
1755 	} else {
1756 		status = -ESRCH;
1757 	}
1758 func_end:
1759 	dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
1760 		hnode, message, utimeout);
1761 	return status;
1762 }
1763 
1764 /*
1765  *   ======== node_get_nldr_obj ========
1766  */
node_get_nldr_obj(struct node_mgr * hnode_mgr,struct nldr_object ** nldr_ovlyobj)1767 int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1768 			     struct nldr_object **nldr_ovlyobj)
1769 {
1770 	int status = 0;
1771 	struct node_mgr *node_mgr_obj = hnode_mgr;
1772 
1773 	if (!hnode_mgr)
1774 		status = -EFAULT;
1775 	else
1776 		*nldr_ovlyobj = node_mgr_obj->nldr_obj;
1777 
1778 	return status;
1779 }
1780 
1781 /*
1782  *  ======== node_get_strm_mgr ========
1783  *  Purpose:
1784  *      Returns the Stream manager.
1785  */
node_get_strm_mgr(struct node_object * hnode,struct strm_mgr ** strm_man)1786 int node_get_strm_mgr(struct node_object *hnode,
1787 			     struct strm_mgr **strm_man)
1788 {
1789 	int status = 0;
1790 
1791 	if (!hnode)
1792 		status = -EFAULT;
1793 	else
1794 		*strm_man = hnode->node_mgr->strm_mgr_obj;
1795 
1796 	return status;
1797 }
1798 
1799 /*
1800  *  ======== node_get_load_type ========
1801  */
node_get_load_type(struct node_object * hnode)1802 enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1803 {
1804 	if (!hnode) {
1805 		dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1806 		return -1;
1807 	} else {
1808 		return hnode->dcd_props.obj_data.node_obj.load_type;
1809 	}
1810 }
1811 
1812 /*
1813  *  ======== node_get_timeout ========
1814  *  Purpose:
1815  *      Returns the timeout value for this node.
1816  */
node_get_timeout(struct node_object * hnode)1817 u32 node_get_timeout(struct node_object *hnode)
1818 {
1819 	if (!hnode) {
1820 		dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1821 		return 0;
1822 	} else {
1823 		return hnode->timeout;
1824 	}
1825 }
1826 
1827 /*
1828  *  ======== node_get_type ========
1829  *  Purpose:
1830  *      Returns the node type.
1831  */
node_get_type(struct node_object * hnode)1832 enum node_type node_get_type(struct node_object *hnode)
1833 {
1834 	enum node_type node_type;
1835 
1836 	if (hnode == (struct node_object *)DSP_HGPPNODE)
1837 		node_type = NODE_GPP;
1838 	else {
1839 		if (!hnode)
1840 			node_type = -1;
1841 		else
1842 			node_type = hnode->ntype;
1843 	}
1844 	return node_type;
1845 }
1846 
1847 /*
1848  *  ======== node_on_exit ========
1849  *  Purpose:
1850  *      Gets called when RMS_EXIT is received for a node.
1851  */
node_on_exit(struct node_object * hnode,s32 node_status)1852 void node_on_exit(struct node_object *hnode, s32 node_status)
1853 {
1854 	if (!hnode)
1855 		return;
1856 
1857 	/* Set node state to done */
1858 	NODE_SET_STATE(hnode, NODE_DONE);
1859 	hnode->exit_status = node_status;
1860 	if (hnode->loaded && hnode->phase_split) {
1861 		(void)hnode->node_mgr->nldr_fxns.unload(hnode->
1862 							     nldr_node_obj,
1863 							     NLDR_EXECUTE);
1864 		hnode->loaded = false;
1865 	}
1866 	/* Unblock call to node_terminate */
1867 	(void)sync_set_event(hnode->sync_done);
1868 	/* Notify clients */
1869 	proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1870 	ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1871 }
1872 
1873 /*
1874  *  ======== node_pause ========
1875  *  Purpose:
1876  *      Suspend execution of a node currently running on the DSP.
1877  */
node_pause(struct node_object * hnode)1878 int node_pause(struct node_object *hnode)
1879 {
1880 	struct node_object *pnode = (struct node_object *)hnode;
1881 	enum node_type node_type;
1882 	enum node_state state;
1883 	struct node_mgr *hnode_mgr;
1884 	int status = 0;
1885 	u32 proc_id;
1886 	struct dsp_processorstate proc_state;
1887 	struct proc_object *hprocessor;
1888 
1889 	if (!hnode) {
1890 		status = -EFAULT;
1891 	} else {
1892 		node_type = node_get_type(hnode);
1893 		if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
1894 			status = -EPERM;
1895 	}
1896 	if (status)
1897 		goto func_end;
1898 
1899 	status = proc_get_processor_id(pnode->processor, &proc_id);
1900 
1901 	if (proc_id == IVA_UNIT)
1902 		status = -ENOSYS;
1903 
1904 	if (!status) {
1905 		hnode_mgr = hnode->node_mgr;
1906 
1907 		/* Enter critical section */
1908 		mutex_lock(&hnode_mgr->node_mgr_lock);
1909 		state = node_get_state(hnode);
1910 		/* Check node state */
1911 		if (state != NODE_RUNNING)
1912 			status = -EBADR;
1913 
1914 		if (status)
1915 			goto func_cont;
1916 		hprocessor = hnode->processor;
1917 		status = proc_get_state(hprocessor, &proc_state,
1918 				sizeof(struct dsp_processorstate));
1919 		if (status)
1920 			goto func_cont;
1921 		/* If processor is in error state then don't attempt
1922 		   to send the message */
1923 		if (proc_state.proc_state == PROC_ERROR) {
1924 			status = -EPERM;
1925 			goto func_cont;
1926 		}
1927 
1928 		status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
1929 			hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY],
1930 			hnode->node_env, NODE_SUSPENDEDPRI);
1931 
1932 		/* Update state */
1933 		if (status >= 0)
1934 			NODE_SET_STATE(hnode, NODE_PAUSED);
1935 
1936 func_cont:
1937 		/* End of sync_enter_cs */
1938 		/* Leave critical section */
1939 		mutex_unlock(&hnode_mgr->node_mgr_lock);
1940 		if (status >= 0) {
1941 			proc_notify_clients(hnode->processor,
1942 					    DSP_NODESTATECHANGE);
1943 			ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1944 		}
1945 	}
1946 func_end:
1947 	dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
1948 	return status;
1949 }
1950 
1951 /*
1952  *  ======== node_put_message ========
1953  *  Purpose:
1954  *      Send a message to a message node, task node, or XDAIS socket node. This
1955  *      function will block until the message stream can accommodate the
1956  *      message, or a timeout occurs.
1957  */
node_put_message(struct node_object * hnode,const struct dsp_msg * pmsg,u32 utimeout)1958 int node_put_message(struct node_object *hnode,
1959 			    const struct dsp_msg *pmsg, u32 utimeout)
1960 {
1961 	struct node_mgr *hnode_mgr = NULL;
1962 	enum node_type node_type;
1963 	struct bridge_drv_interface *intf_fxns;
1964 	enum node_state state;
1965 	int status = 0;
1966 	void *tmp_buf;
1967 	struct dsp_msg new_msg;
1968 	struct dsp_processorstate proc_state;
1969 	struct proc_object *hprocessor;
1970 
1971 	if (!hnode) {
1972 		status = -EFAULT;
1973 		goto func_end;
1974 	}
1975 	hprocessor = hnode->processor;
1976 	status = proc_get_state(hprocessor, &proc_state,
1977 				sizeof(struct dsp_processorstate));
1978 	if (status)
1979 		goto func_end;
1980 	/* If processor is in bad state then don't attempt sending the
1981 	   message */
1982 	if (proc_state.proc_state == PROC_ERROR) {
1983 		status = -EPERM;
1984 		goto func_end;
1985 	}
1986 	hnode_mgr = hnode->node_mgr;
1987 	node_type = node_get_type(hnode);
1988 	if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1989 	    node_type != NODE_DAISSOCKET)
1990 		status = -EPERM;
1991 
1992 	if (!status) {
1993 		/*  Check node state. Can't send messages to a node after
1994 		 *  we've sent the RMS_EXIT command. There is still the
1995 		 *  possibility that node_terminate can be called after we've
1996 		 *  checked the state. Could add another SYNC object to
1997 		 *  prevent this (can't use node_mgr_lock, since we don't
1998 		 *  want to block other NODE functions). However, the node may
1999 		 *  still exit on its own, before this message is sent. */
2000 		mutex_lock(&hnode_mgr->node_mgr_lock);
2001 		state = node_get_state(hnode);
2002 		if (state == NODE_TERMINATING || state == NODE_DONE)
2003 			status = -EBADR;
2004 
2005 		/* end of sync_enter_cs */
2006 		mutex_unlock(&hnode_mgr->node_mgr_lock);
2007 	}
2008 	if (status)
2009 		goto func_end;
2010 
2011 	/* assign pmsg values to new msg */
2012 	new_msg = *pmsg;
2013 	/* Now, check if message contains a SM buffer descriptor */
2014 	if (pmsg->cmd & DSP_RMSBUFDESC) {
2015 		/* Translate GPP Va to DSP physical buf Ptr. */
2016 		tmp_buf = cmm_xlator_translate(hnode->xlator,
2017 					       (void *)new_msg.arg1,
2018 					       CMM_VA2DSPPA);
2019 		if (tmp_buf != NULL) {
2020 			/* got translation, convert to MAUs in msg */
2021 			if (hnode->node_mgr->dsp_word_size != 0) {
2022 				new_msg.arg1 =
2023 				    (u32) tmp_buf /
2024 				    hnode->node_mgr->dsp_word_size;
2025 				/* MAUs */
2026 				new_msg.arg2 /= hnode->node_mgr->
2027 				    dsp_word_size;
2028 			} else {
2029 				pr_err("%s: dsp_word_size is zero!\n",
2030 				       __func__);
2031 				status = -EPERM;	/* bad DSPWordSize */
2032 			}
2033 		} else {	/* failed to translate buffer address */
2034 			status = -ESRCH;
2035 		}
2036 	}
2037 	if (!status) {
2038 		intf_fxns = hnode_mgr->intf_fxns;
2039 		status = (*intf_fxns->msg_put) (hnode->msg_queue_obj,
2040 						    &new_msg, utimeout);
2041 	}
2042 func_end:
2043 	dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2044 		"status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2045 	return status;
2046 }
2047 
2048 /*
2049  *  ======== node_register_notify ========
2050  *  Purpose:
2051  *      Register to be notified on specific events for this node.
2052  */
node_register_notify(struct node_object * hnode,u32 event_mask,u32 notify_type,struct dsp_notification * hnotification)2053 int node_register_notify(struct node_object *hnode, u32 event_mask,
2054 				u32 notify_type,
2055 				struct dsp_notification *hnotification)
2056 {
2057 	struct bridge_drv_interface *intf_fxns;
2058 	int status = 0;
2059 
2060 	if (!hnode) {
2061 		status = -EFAULT;
2062 	} else {
2063 		/* Check if event mask is a valid node related event */
2064 		if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2065 			status = -EINVAL;
2066 
2067 		/* Check if notify type is valid */
2068 		if (notify_type != DSP_SIGNALEVENT)
2069 			status = -EINVAL;
2070 
2071 		/* Only one Notification can be registered at a
2072 		 * time - Limitation */
2073 		if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2074 			status = -EINVAL;
2075 	}
2076 	if (!status) {
2077 		if (event_mask == DSP_NODESTATECHANGE) {
2078 			status = ntfy_register(hnode->ntfy_obj, hnotification,
2079 					       event_mask & DSP_NODESTATECHANGE,
2080 					       notify_type);
2081 		} else {
2082 			/* Send Message part of event mask to msg_ctrl */
2083 			intf_fxns = hnode->node_mgr->intf_fxns;
2084 			status = (*intf_fxns->msg_register_notify)
2085 			    (hnode->msg_queue_obj,
2086 			     event_mask & DSP_NODEMESSAGEREADY, notify_type,
2087 			     hnotification);
2088 		}
2089 
2090 	}
2091 	dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2092 		"hnotification: %p status 0x%x\n", __func__, hnode,
2093 		event_mask, notify_type, hnotification, status);
2094 	return status;
2095 }
2096 
2097 /*
2098  *  ======== node_run ========
2099  *  Purpose:
2100  *      Start execution of a node's execute phase, or resume execution of a node
2101  *      that has been suspended (via NODE_NodePause()) on the DSP. Load the
2102  *      node's execute function if necessary.
2103  */
node_run(struct node_object * hnode)2104 int node_run(struct node_object *hnode)
2105 {
2106 	struct node_object *pnode = (struct node_object *)hnode;
2107 	struct node_mgr *hnode_mgr;
2108 	enum node_type node_type;
2109 	enum node_state state;
2110 	u32 ul_execute_fxn;
2111 	u32 ul_fxn_addr;
2112 	int status = 0;
2113 	u32 proc_id;
2114 	struct bridge_drv_interface *intf_fxns;
2115 	struct dsp_processorstate proc_state;
2116 	struct proc_object *hprocessor;
2117 
2118 	if (!hnode) {
2119 		status = -EFAULT;
2120 		goto func_end;
2121 	}
2122 	hprocessor = hnode->processor;
2123 	status = proc_get_state(hprocessor, &proc_state,
2124 				sizeof(struct dsp_processorstate));
2125 	if (status)
2126 		goto func_end;
2127 	/* If processor is in error state then don't attempt to run the node */
2128 	if (proc_state.proc_state == PROC_ERROR) {
2129 		status = -EPERM;
2130 		goto func_end;
2131 	}
2132 	node_type = node_get_type(hnode);
2133 	if (node_type == NODE_DEVICE)
2134 		status = -EPERM;
2135 	if (status)
2136 		goto func_end;
2137 
2138 	hnode_mgr = hnode->node_mgr;
2139 	if (!hnode_mgr) {
2140 		status = -EFAULT;
2141 		goto func_end;
2142 	}
2143 	intf_fxns = hnode_mgr->intf_fxns;
2144 	/* Enter critical section */
2145 	mutex_lock(&hnode_mgr->node_mgr_lock);
2146 
2147 	state = node_get_state(hnode);
2148 	if (state != NODE_CREATED && state != NODE_PAUSED)
2149 		status = -EBADR;
2150 
2151 	if (!status)
2152 		status = proc_get_processor_id(pnode->processor, &proc_id);
2153 
2154 	if (status)
2155 		goto func_cont1;
2156 
2157 	if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2158 		goto func_cont1;
2159 
2160 	if (state == NODE_CREATED) {
2161 		/* If node's execute function is not loaded, load it */
2162 		if (!(hnode->loaded) && hnode->phase_split) {
2163 			status =
2164 			    hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
2165 							  NLDR_EXECUTE);
2166 			if (!status) {
2167 				hnode->loaded = true;
2168 			} else {
2169 				pr_err("%s: fail - load execute code: 0x%x\n",
2170 				       __func__, status);
2171 			}
2172 		}
2173 		if (!status) {
2174 			/* Get address of node's execute function */
2175 			if (proc_id == IVA_UNIT)
2176 				ul_execute_fxn = (u32) hnode->node_env;
2177 			else {
2178 				status = get_fxn_address(hnode, &ul_execute_fxn,
2179 							 EXECUTEPHASE);
2180 			}
2181 		}
2182 		if (!status) {
2183 			ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE];
2184 			status =
2185 			    disp_node_run(hnode_mgr->disp_obj, hnode,
2186 					  ul_fxn_addr, ul_execute_fxn,
2187 					  hnode->node_env);
2188 		}
2189 	} else if (state == NODE_PAUSED) {
2190 		ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY];
2191 		status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2192 						   ul_fxn_addr, hnode->node_env,
2193 						   NODE_GET_PRIORITY(hnode));
2194 	} else {
2195 		/* We should never get here */
2196 	}
2197 func_cont1:
2198 	/* Update node state. */
2199 	if (status >= 0)
2200 		NODE_SET_STATE(hnode, NODE_RUNNING);
2201 	else			/* Set state back to previous value */
2202 		NODE_SET_STATE(hnode, state);
2203 	/*End of sync_enter_cs */
2204 	/* Exit critical section */
2205 	mutex_unlock(&hnode_mgr->node_mgr_lock);
2206 	if (status >= 0) {
2207 		proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
2208 		ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2209 	}
2210 func_end:
2211 	dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2212 	return status;
2213 }
2214 
2215 /*
2216  *  ======== node_terminate ========
2217  *  Purpose:
2218  *      Signal a node running on the DSP that it should exit its execute phase
2219  *      function.
2220  */
node_terminate(struct node_object * hnode,int * pstatus)2221 int node_terminate(struct node_object *hnode, int *pstatus)
2222 {
2223 	struct node_object *pnode = (struct node_object *)hnode;
2224 	struct node_mgr *hnode_mgr = NULL;
2225 	enum node_type node_type;
2226 	struct bridge_drv_interface *intf_fxns;
2227 	enum node_state state;
2228 	struct dsp_msg msg, killmsg;
2229 	int status = 0;
2230 	u32 proc_id, kill_time_out;
2231 	struct deh_mgr *hdeh_mgr;
2232 	struct dsp_processorstate proc_state;
2233 
2234 	if (!hnode || !hnode->node_mgr) {
2235 		status = -EFAULT;
2236 		goto func_end;
2237 	}
2238 	if (pnode->processor == NULL) {
2239 		status = -EFAULT;
2240 		goto func_end;
2241 	}
2242 	status = proc_get_processor_id(pnode->processor, &proc_id);
2243 
2244 	if (!status) {
2245 		hnode_mgr = hnode->node_mgr;
2246 		node_type = node_get_type(hnode);
2247 		if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2248 			status = -EPERM;
2249 	}
2250 	if (!status) {
2251 		/* Check node state */
2252 		mutex_lock(&hnode_mgr->node_mgr_lock);
2253 		state = node_get_state(hnode);
2254 		if (state != NODE_RUNNING) {
2255 			status = -EBADR;
2256 			/* Set the exit status if node terminated on
2257 			 * its own. */
2258 			if (state == NODE_DONE)
2259 				*pstatus = hnode->exit_status;
2260 
2261 		} else {
2262 			NODE_SET_STATE(hnode, NODE_TERMINATING);
2263 		}
2264 		/* end of sync_enter_cs */
2265 		mutex_unlock(&hnode_mgr->node_mgr_lock);
2266 	}
2267 	if (!status) {
2268 		/*
2269 		 *  Send exit message. Do not change state to NODE_DONE
2270 		 *  here. That will be done in callback.
2271 		 */
2272 		status = proc_get_state(pnode->processor, &proc_state,
2273 					sizeof(struct dsp_processorstate));
2274 		if (status)
2275 			goto func_cont;
2276 		/* If processor is in error state then don't attempt to send
2277 		 * A kill task command */
2278 		if (proc_state.proc_state == PROC_ERROR) {
2279 			status = -EPERM;
2280 			goto func_cont;
2281 		}
2282 
2283 		msg.cmd = RMS_EXIT;
2284 		msg.arg1 = hnode->node_env;
2285 		killmsg.cmd = RMS_KILLTASK;
2286 		killmsg.arg1 = hnode->node_env;
2287 		intf_fxns = hnode_mgr->intf_fxns;
2288 
2289 		if (hnode->timeout > MAXTIMEOUT)
2290 			kill_time_out = MAXTIMEOUT;
2291 		else
2292 			kill_time_out = (hnode->timeout) * 2;
2293 
2294 		status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg,
2295 						    hnode->timeout);
2296 		if (status)
2297 			goto func_cont;
2298 
2299 		/*
2300 		 * Wait on synchronization object that will be
2301 		 * posted in the callback on receiving RMS_EXIT
2302 		 * message, or by node_delete. Check for valid hnode,
2303 		 * in case posted by node_delete().
2304 		 */
2305 		status = sync_wait_on_event(hnode->sync_done,
2306 					    kill_time_out / 2);
2307 		if (status != ETIME)
2308 			goto func_cont;
2309 
2310 		status = (*intf_fxns->msg_put)(hnode->msg_queue_obj,
2311 						&killmsg, hnode->timeout);
2312 		if (status)
2313 			goto func_cont;
2314 		status = sync_wait_on_event(hnode->sync_done,
2315 					     kill_time_out / 2);
2316 		if (status) {
2317 			/*
2318 			 * Here it goes the part of the simulation of
2319 			 * the DSP exception.
2320 			 */
2321 			dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
2322 			if (!hdeh_mgr)
2323 				goto func_cont;
2324 
2325 			bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
2326 		}
2327 	}
2328 func_cont:
2329 	if (!status) {
2330 		/* Enter CS before getting exit status, in case node was
2331 		 * deleted. */
2332 		mutex_lock(&hnode_mgr->node_mgr_lock);
2333 		/* Make sure node wasn't deleted while we blocked */
2334 		if (!hnode) {
2335 			status = -EPERM;
2336 		} else {
2337 			*pstatus = hnode->exit_status;
2338 			dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2339 				__func__, hnode, hnode->node_env, status);
2340 		}
2341 		mutex_unlock(&hnode_mgr->node_mgr_lock);
2342 	}			/*End of sync_enter_cs */
2343 func_end:
2344 	return status;
2345 }
2346 
2347 /*
2348  *  ======== delete_node ========
2349  *  Purpose:
2350  *      Free GPP resources allocated in node_allocate() or node_connect().
2351  */
delete_node(struct node_object * hnode,struct process_context * pr_ctxt)2352 static void delete_node(struct node_object *hnode,
2353 			struct process_context *pr_ctxt)
2354 {
2355 	struct node_mgr *hnode_mgr;
2356 	struct bridge_drv_interface *intf_fxns;
2357 	u32 i;
2358 	enum node_type node_type;
2359 	struct stream_chnl stream;
2360 	struct node_msgargs node_msg_args;
2361 	struct node_taskargs task_arg_obj;
2362 #ifdef DSP_DMM_DEBUG
2363 	struct dmm_object *dmm_mgr;
2364 	struct proc_object *p_proc_object =
2365 	    (struct proc_object *)hnode->processor;
2366 #endif
2367 	int status;
2368 	if (!hnode)
2369 		goto func_end;
2370 	hnode_mgr = hnode->node_mgr;
2371 	if (!hnode_mgr)
2372 		goto func_end;
2373 
2374 	node_type = node_get_type(hnode);
2375 	if (node_type != NODE_DEVICE) {
2376 		node_msg_args = hnode->create_args.asa.node_msg_args;
2377 		kfree(node_msg_args.pdata);
2378 
2379 		/* Free msg_ctrl queue */
2380 		if (hnode->msg_queue_obj) {
2381 			intf_fxns = hnode_mgr->intf_fxns;
2382 			(*intf_fxns->msg_delete_queue) (hnode->
2383 							    msg_queue_obj);
2384 			hnode->msg_queue_obj = NULL;
2385 		}
2386 
2387 		kfree(hnode->sync_done);
2388 
2389 		/* Free all stream info */
2390 		if (hnode->inputs) {
2391 			for (i = 0; i < MAX_INPUTS(hnode); i++) {
2392 				stream = hnode->inputs[i];
2393 				free_stream(hnode_mgr, stream);
2394 			}
2395 			kfree(hnode->inputs);
2396 			hnode->inputs = NULL;
2397 		}
2398 		if (hnode->outputs) {
2399 			for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2400 				stream = hnode->outputs[i];
2401 				free_stream(hnode_mgr, stream);
2402 			}
2403 			kfree(hnode->outputs);
2404 			hnode->outputs = NULL;
2405 		}
2406 		task_arg_obj = hnode->create_args.asa.task_arg_obj;
2407 		if (task_arg_obj.strm_in_def) {
2408 			for (i = 0; i < MAX_INPUTS(hnode); i++) {
2409 				kfree(task_arg_obj.strm_in_def[i].sz_device);
2410 				task_arg_obj.strm_in_def[i].sz_device = NULL;
2411 			}
2412 			kfree(task_arg_obj.strm_in_def);
2413 			task_arg_obj.strm_in_def = NULL;
2414 		}
2415 		if (task_arg_obj.strm_out_def) {
2416 			for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2417 				kfree(task_arg_obj.strm_out_def[i].sz_device);
2418 				task_arg_obj.strm_out_def[i].sz_device = NULL;
2419 			}
2420 			kfree(task_arg_obj.strm_out_def);
2421 			task_arg_obj.strm_out_def = NULL;
2422 		}
2423 		if (task_arg_obj.dsp_heap_res_addr) {
2424 			status = proc_un_map(hnode->processor, (void *)
2425 					     task_arg_obj.dsp_heap_addr,
2426 					     pr_ctxt);
2427 
2428 			status = proc_un_reserve_memory(hnode->processor,
2429 							(void *)
2430 							task_arg_obj.
2431 							dsp_heap_res_addr,
2432 							pr_ctxt);
2433 #ifdef DSP_DMM_DEBUG
2434 			status = dmm_get_handle(p_proc_object, &dmm_mgr);
2435 			if (dmm_mgr)
2436 				dmm_mem_map_dump(dmm_mgr);
2437 			else
2438 				status = DSP_EHANDLE;
2439 #endif
2440 		}
2441 	}
2442 	if (node_type != NODE_MESSAGE) {
2443 		kfree(hnode->stream_connect);
2444 		hnode->stream_connect = NULL;
2445 	}
2446 	kfree(hnode->str_dev_name);
2447 	hnode->str_dev_name = NULL;
2448 
2449 	if (hnode->ntfy_obj) {
2450 		ntfy_delete(hnode->ntfy_obj);
2451 		kfree(hnode->ntfy_obj);
2452 		hnode->ntfy_obj = NULL;
2453 	}
2454 
2455 	/* These were allocated in dcd_get_object_def (via node_allocate) */
2456 	kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn);
2457 	hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL;
2458 
2459 	kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn);
2460 	hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL;
2461 
2462 	kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn);
2463 	hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL;
2464 
2465 	kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name);
2466 	hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL;
2467 
2468 	/* Free all SM address translator resources */
2469 	kfree(hnode->xlator);
2470 	kfree(hnode->nldr_node_obj);
2471 	hnode->nldr_node_obj = NULL;
2472 	hnode->node_mgr = NULL;
2473 	kfree(hnode);
2474 	hnode = NULL;
2475 func_end:
2476 	return;
2477 }
2478 
2479 /*
2480  *  ======== delete_node_mgr ========
2481  *  Purpose:
2482  *      Frees the node manager.
2483  */
delete_node_mgr(struct node_mgr * hnode_mgr)2484 static void delete_node_mgr(struct node_mgr *hnode_mgr)
2485 {
2486 	struct node_object *hnode, *tmp;
2487 
2488 	if (hnode_mgr) {
2489 		/* Free resources */
2490 		if (hnode_mgr->dcd_mgr)
2491 			dcd_destroy_manager(hnode_mgr->dcd_mgr);
2492 
2493 		/* Remove any elements remaining in lists */
2494 		list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
2495 				list_elem) {
2496 			list_del(&hnode->list_elem);
2497 			delete_node(hnode, NULL);
2498 		}
2499 		mutex_destroy(&hnode_mgr->node_mgr_lock);
2500 		if (hnode_mgr->ntfy_obj) {
2501 			ntfy_delete(hnode_mgr->ntfy_obj);
2502 			kfree(hnode_mgr->ntfy_obj);
2503 		}
2504 
2505 		if (hnode_mgr->disp_obj)
2506 			disp_delete(hnode_mgr->disp_obj);
2507 
2508 		if (hnode_mgr->strm_mgr_obj)
2509 			strm_delete(hnode_mgr->strm_mgr_obj);
2510 
2511 		/* Delete the loader */
2512 		if (hnode_mgr->nldr_obj)
2513 			hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
2514 
2515 		kfree(hnode_mgr);
2516 	}
2517 }
2518 
2519 /*
2520  *  ======== fill_stream_connect ========
2521  *  Purpose:
2522  *      Fills stream information.
2523  */
fill_stream_connect(struct node_object * node1,struct node_object * node2,u32 stream1,u32 stream2)2524 static void fill_stream_connect(struct node_object *node1,
2525 				struct node_object *node2,
2526 				u32 stream1, u32 stream2)
2527 {
2528 	u32 strm_index;
2529 	struct dsp_streamconnect *strm1 = NULL;
2530 	struct dsp_streamconnect *strm2 = NULL;
2531 	enum node_type node1_type = NODE_TASK;
2532 	enum node_type node2_type = NODE_TASK;
2533 
2534 	node1_type = node_get_type(node1);
2535 	node2_type = node_get_type(node2);
2536 	if (node1 != (struct node_object *)DSP_HGPPNODE) {
2537 
2538 		if (node1_type != NODE_DEVICE) {
2539 			strm_index = node1->num_inputs +
2540 			    node1->num_outputs - 1;
2541 			strm1 = &(node1->stream_connect[strm_index]);
2542 			strm1->cb_struct = sizeof(struct dsp_streamconnect);
2543 			strm1->this_node_stream_index = stream1;
2544 		}
2545 
2546 		if (node2 != (struct node_object *)DSP_HGPPNODE) {
2547 			/* NODE == > NODE */
2548 			if (node1_type != NODE_DEVICE) {
2549 				strm1->connected_node = node2;
2550 				strm1->ui_connected_node_id = node2->node_uuid;
2551 				strm1->connected_node_stream_index = stream2;
2552 				strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2553 			}
2554 			if (node2_type != NODE_DEVICE) {
2555 				strm_index = node2->num_inputs +
2556 				    node2->num_outputs - 1;
2557 				strm2 = &(node2->stream_connect[strm_index]);
2558 				strm2->cb_struct =
2559 				    sizeof(struct dsp_streamconnect);
2560 				strm2->this_node_stream_index = stream2;
2561 				strm2->connected_node = node1;
2562 				strm2->ui_connected_node_id = node1->node_uuid;
2563 				strm2->connected_node_stream_index = stream1;
2564 				strm2->connect_type = CONNECTTYPE_NODEINPUT;
2565 			}
2566 		} else if (node1_type != NODE_DEVICE)
2567 			strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2568 	} else {
2569 		/* GPP == > NODE */
2570 		strm_index = node2->num_inputs + node2->num_outputs - 1;
2571 		strm2 = &(node2->stream_connect[strm_index]);
2572 		strm2->cb_struct = sizeof(struct dsp_streamconnect);
2573 		strm2->this_node_stream_index = stream2;
2574 		strm2->connect_type = CONNECTTYPE_GPPINPUT;
2575 	}
2576 }
2577 
2578 /*
2579  *  ======== fill_stream_def ========
2580  *  Purpose:
2581  *      Fills Stream attributes.
2582  */
fill_stream_def(struct node_object * hnode,struct node_strmdef * pstrm_def,struct dsp_strmattr * pattrs)2583 static void fill_stream_def(struct node_object *hnode,
2584 			    struct node_strmdef *pstrm_def,
2585 			    struct dsp_strmattr *pattrs)
2586 {
2587 	struct node_mgr *hnode_mgr = hnode->node_mgr;
2588 
2589 	if (pattrs != NULL) {
2590 		pstrm_def->num_bufs = pattrs->num_bufs;
2591 		pstrm_def->buf_size =
2592 		    pattrs->buf_size / hnode_mgr->dsp_data_mau_size;
2593 		pstrm_def->seg_id = pattrs->seg_id;
2594 		pstrm_def->buf_alignment = pattrs->buf_alignment;
2595 		pstrm_def->timeout = pattrs->timeout;
2596 	} else {
2597 		pstrm_def->num_bufs = DEFAULTNBUFS;
2598 		pstrm_def->buf_size =
2599 		    DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size;
2600 		pstrm_def->seg_id = DEFAULTSEGID;
2601 		pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2602 		pstrm_def->timeout = DEFAULTTIMEOUT;
2603 	}
2604 }
2605 
2606 /*
2607  *  ======== free_stream ========
2608  *  Purpose:
2609  *      Updates the channel mask and frees the pipe id.
2610  */
free_stream(struct node_mgr * hnode_mgr,struct stream_chnl stream)2611 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2612 {
2613 	/* Free up the pipe id unless other node has not yet been deleted. */
2614 	if (stream.type == NODECONNECT) {
2615 		if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) {
2616 			/* The other node has already been deleted */
2617 			clear_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2618 			clear_bit(stream.dev_id, hnode_mgr->pipe_map);
2619 		} else {
2620 			/* The other node has not been deleted yet */
2621 			set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2622 		}
2623 	} else if (stream.type == HOSTCONNECT) {
2624 		if (stream.dev_id < hnode_mgr->num_chnls) {
2625 			clear_bit(stream.dev_id, hnode_mgr->chnl_map);
2626 		} else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
2627 			/* dsp-dma */
2628 			clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
2629 					hnode_mgr->dma_chnl_map);
2630 		} else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
2631 			/* zero-copy */
2632 			clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
2633 					hnode_mgr->zc_chnl_map);
2634 		}
2635 	}
2636 }
2637 
2638 /*
2639  *  ======== get_fxn_address ========
2640  *  Purpose:
2641  *      Retrieves the address for create, execute or delete phase for a node.
2642  */
get_fxn_address(struct node_object * hnode,u32 * fxn_addr,u32 phase)2643 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2644 				  u32 phase)
2645 {
2646 	char *pstr_fxn_name = NULL;
2647 	struct node_mgr *hnode_mgr = hnode->node_mgr;
2648 	int status = 0;
2649 
2650 	switch (phase) {
2651 	case CREATEPHASE:
2652 		pstr_fxn_name =
2653 		    hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn;
2654 		break;
2655 	case EXECUTEPHASE:
2656 		pstr_fxn_name =
2657 		    hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn;
2658 		break;
2659 	case DELETEPHASE:
2660 		pstr_fxn_name =
2661 		    hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn;
2662 		break;
2663 	default:
2664 		/* Should never get here */
2665 		break;
2666 	}
2667 
2668 	status =
2669 	    hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj,
2670 						  pstr_fxn_name, fxn_addr);
2671 
2672 	return status;
2673 }
2674 
2675 /*
2676  *  ======== get_node_info ========
2677  *  Purpose:
2678  *      Retrieves the node information.
2679  */
get_node_info(struct node_object * hnode,struct dsp_nodeinfo * node_info)2680 void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2681 {
2682 	u32 i;
2683 
2684 	node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2685 	node_info->nb_node_database_props =
2686 	    hnode->dcd_props.obj_data.node_obj.ndb_props;
2687 	node_info->execution_priority = hnode->prio;
2688 	node_info->device_owner = hnode->device_owner;
2689 	node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
2690 	node_info->node_env = hnode->node_env;
2691 
2692 	node_info->ns_execution_state = node_get_state(hnode);
2693 
2694 	/* Copy stream connect data */
2695 	for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2696 		node_info->sc_stream_connection[i] = hnode->stream_connect[i];
2697 
2698 }
2699 
2700 /*
2701  *  ======== get_node_props ========
2702  *  Purpose:
2703  *      Retrieve node properties.
2704  */
get_node_props(struct dcd_manager * hdcd_mgr,struct node_object * hnode,const struct dsp_uuid * node_uuid,struct dcd_genericobj * dcd_prop)2705 static int get_node_props(struct dcd_manager *hdcd_mgr,
2706 				 struct node_object *hnode,
2707 				 const struct dsp_uuid *node_uuid,
2708 				 struct dcd_genericobj *dcd_prop)
2709 {
2710 	u32 len;
2711 	struct node_msgargs *pmsg_args;
2712 	struct node_taskargs *task_arg_obj;
2713 	enum node_type node_type = NODE_TASK;
2714 	struct dsp_ndbprops *pndb_props =
2715 	    &(dcd_prop->obj_data.node_obj.ndb_props);
2716 	int status = 0;
2717 	char sz_uuid[MAXUUIDLEN];
2718 
2719 	status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
2720 				    DSP_DCDNODETYPE, dcd_prop);
2721 
2722 	if (!status) {
2723 		hnode->ntype = node_type = pndb_props->ntype;
2724 
2725 		/* Create UUID value to set in registry. */
2726 		snprintf(sz_uuid, MAXUUIDLEN, "%pUL", node_uuid);
2727 		dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2728 
2729 		/* Fill in message args that come from NDB */
2730 		if (node_type != NODE_DEVICE) {
2731 			pmsg_args = &(hnode->create_args.asa.node_msg_args);
2732 			pmsg_args->seg_id =
2733 			    dcd_prop->obj_data.node_obj.msg_segid;
2734 			pmsg_args->notify_type =
2735 			    dcd_prop->obj_data.node_obj.msg_notify_type;
2736 			pmsg_args->max_msgs = pndb_props->message_depth;
2737 			dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2738 				pmsg_args->max_msgs);
2739 		} else {
2740 			/* Copy device name */
2741 			len = strlen(pndb_props->ac_name);
2742 			hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
2743 			if (hnode->str_dev_name == NULL) {
2744 				status = -ENOMEM;
2745 			} else {
2746 				strncpy(hnode->str_dev_name,
2747 					pndb_props->ac_name, len);
2748 			}
2749 		}
2750 	}
2751 	if (!status) {
2752 		/* Fill in create args that come from NDB */
2753 		if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2754 			task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2755 			task_arg_obj->prio = pndb_props->prio;
2756 			task_arg_obj->stack_size = pndb_props->stack_size;
2757 			task_arg_obj->sys_stack_size =
2758 			    pndb_props->sys_stack_size;
2759 			task_arg_obj->stack_seg = pndb_props->stack_seg;
2760 			dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2761 				"0x%x words System Stack Size: 0x%x words "
2762 				"Stack Segment: 0x%x profile count : 0x%x\n",
2763 				task_arg_obj->prio, task_arg_obj->stack_size,
2764 				task_arg_obj->sys_stack_size,
2765 				task_arg_obj->stack_seg,
2766 				pndb_props->count_profiles);
2767 		}
2768 	}
2769 
2770 	return status;
2771 }
2772 
2773 /*
2774  *  ======== get_proc_props ========
2775  *  Purpose:
2776  *      Retrieve the processor properties.
2777  */
get_proc_props(struct node_mgr * hnode_mgr,struct dev_object * hdev_obj)2778 static int get_proc_props(struct node_mgr *hnode_mgr,
2779 				 struct dev_object *hdev_obj)
2780 {
2781 	struct cfg_hostres *host_res;
2782 	struct bridge_dev_context *pbridge_context;
2783 	int status = 0;
2784 
2785 	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2786 	if (!pbridge_context)
2787 		status = -EFAULT;
2788 
2789 	if (!status) {
2790 		host_res = pbridge_context->resources;
2791 		if (!host_res)
2792 			return -EPERM;
2793 		hnode_mgr->chnl_offset = host_res->chnl_offset;
2794 		hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
2795 		hnode_mgr->num_chnls = host_res->num_chnls;
2796 
2797 		/*
2798 		 *  PROC will add an API to get dsp_processorinfo.
2799 		 *  Fill in default values for now.
2800 		 */
2801 		/* TODO -- Instead of hard coding, take from registry */
2802 		hnode_mgr->proc_family = 6000;
2803 		hnode_mgr->proc_type = 6410;
2804 		hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2805 		hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2806 		hnode_mgr->dsp_word_size = DSPWORDSIZE;
2807 		hnode_mgr->dsp_data_mau_size = DSPWORDSIZE;
2808 		hnode_mgr->dsp_mau_size = 1;
2809 
2810 	}
2811 	return status;
2812 }
2813 
2814 /*
2815  *  ======== node_get_uuid_props ========
2816  *  Purpose:
2817  *      Fetch Node UUID properties from DCD/DOF file.
2818  */
node_get_uuid_props(void * hprocessor,const struct dsp_uuid * node_uuid,struct dsp_ndbprops * node_props)2819 int node_get_uuid_props(void *hprocessor,
2820 			       const struct dsp_uuid *node_uuid,
2821 			       struct dsp_ndbprops *node_props)
2822 {
2823 	struct node_mgr *hnode_mgr = NULL;
2824 	struct dev_object *hdev_obj;
2825 	int status = 0;
2826 	struct dcd_nodeprops dcd_node_props;
2827 	struct dsp_processorstate proc_state;
2828 
2829 	if (hprocessor == NULL || node_uuid == NULL) {
2830 		status = -EFAULT;
2831 		goto func_end;
2832 	}
2833 	status = proc_get_state(hprocessor, &proc_state,
2834 				sizeof(struct dsp_processorstate));
2835 	if (status)
2836 		goto func_end;
2837 	/* If processor is in error state then don't attempt
2838 	   to send the message */
2839 	if (proc_state.proc_state == PROC_ERROR) {
2840 		status = -EPERM;
2841 		goto func_end;
2842 	}
2843 
2844 	status = proc_get_dev_object(hprocessor, &hdev_obj);
2845 	if (hdev_obj) {
2846 		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
2847 		if (hnode_mgr == NULL) {
2848 			status = -EFAULT;
2849 			goto func_end;
2850 		}
2851 	}
2852 
2853 	/*
2854 	 * Enter the critical section. This is needed because
2855 	 * dcd_get_object_def will ultimately end up calling dbll_open/close,
2856 	 * which needs to be protected in order to not corrupt the zlib manager
2857 	 * (COD).
2858 	 */
2859 	mutex_lock(&hnode_mgr->node_mgr_lock);
2860 
2861 	dcd_node_props.str_create_phase_fxn = NULL;
2862 	dcd_node_props.str_execute_phase_fxn = NULL;
2863 	dcd_node_props.str_delete_phase_fxn = NULL;
2864 	dcd_node_props.str_i_alg_name = NULL;
2865 
2866 	status = dcd_get_object_def(hnode_mgr->dcd_mgr,
2867 		(struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
2868 		(struct dcd_genericobj *)&dcd_node_props);
2869 
2870 	if (!status) {
2871 		*node_props = dcd_node_props.ndb_props;
2872 		kfree(dcd_node_props.str_create_phase_fxn);
2873 
2874 		kfree(dcd_node_props.str_execute_phase_fxn);
2875 
2876 		kfree(dcd_node_props.str_delete_phase_fxn);
2877 
2878 		kfree(dcd_node_props.str_i_alg_name);
2879 	}
2880 	/*  Leave the critical section, we're done. */
2881 	mutex_unlock(&hnode_mgr->node_mgr_lock);
2882 func_end:
2883 	return status;
2884 }
2885 
2886 /*
2887  *  ======== get_rms_fxns ========
2888  *  Purpose:
2889  *      Retrieve the RMS functions.
2890  */
get_rms_fxns(struct node_mgr * hnode_mgr)2891 static int get_rms_fxns(struct node_mgr *hnode_mgr)
2892 {
2893 	s32 i;
2894 	struct dev_object *dev_obj = hnode_mgr->dev_obj;
2895 	int status = 0;
2896 
2897 	static char *psz_fxns[NUMRMSFXNS] = {
2898 		"RMS_queryServer",	/* RMSQUERYSERVER */
2899 		"RMS_configureServer",	/* RMSCONFIGURESERVER */
2900 		"RMS_createNode",	/* RMSCREATENODE */
2901 		"RMS_executeNode",	/* RMSEXECUTENODE */
2902 		"RMS_deleteNode",	/* RMSDELETENODE */
2903 		"RMS_changeNodePriority",	/* RMSCHANGENODEPRIORITY */
2904 		"RMS_readMemory",	/* RMSREADMEMORY */
2905 		"RMS_writeMemory",	/* RMSWRITEMEMORY */
2906 		"RMS_copy",	/* RMSCOPY */
2907 	};
2908 
2909 	for (i = 0; i < NUMRMSFXNS; i++) {
2910 		status = dev_get_symbol(dev_obj, psz_fxns[i],
2911 					&(hnode_mgr->fxn_addrs[i]));
2912 		if (status) {
2913 			if (status == -ESPIPE) {
2914 				/*
2915 				 *  May be loaded dynamically (in the future),
2916 				 *  but return an error for now.
2917 				 */
2918 				dev_dbg(bridge, "%s: RMS function: %s currently"
2919 					" not loaded\n", __func__, psz_fxns[i]);
2920 			} else {
2921 				dev_dbg(bridge, "%s: Symbol not found: %s "
2922 					"status = 0x%x\n", __func__,
2923 					psz_fxns[i], status);
2924 				break;
2925 			}
2926 		}
2927 	}
2928 
2929 	return status;
2930 }
2931 
2932 /*
2933  *  ======== ovly ========
2934  *  Purpose:
2935  *      Called during overlay.Sends command to RMS to copy a block of data.
2936  */
ovly(void * priv_ref,u32 dsp_run_addr,u32 dsp_load_addr,u32 ul_num_bytes,u32 mem_space)2937 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
2938 		u32 ul_num_bytes, u32 mem_space)
2939 {
2940 	struct node_object *hnode = (struct node_object *)priv_ref;
2941 	struct node_mgr *hnode_mgr;
2942 	u32 ul_bytes = 0;
2943 	u32 ul_size;
2944 	u32 ul_timeout;
2945 	int status = 0;
2946 	struct bridge_dev_context *hbridge_context;
2947 	/* Function interface to Bridge driver*/
2948 	struct bridge_drv_interface *intf_fxns;
2949 
2950 	hnode_mgr = hnode->node_mgr;
2951 
2952 	ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
2953 	ul_timeout = hnode->timeout;
2954 
2955 	/* Call new MemCopy function */
2956 	intf_fxns = hnode_mgr->intf_fxns;
2957 	status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
2958 	if (!status) {
2959 		status =
2960 		    (*intf_fxns->brd_mem_copy) (hbridge_context,
2961 						dsp_run_addr, dsp_load_addr,
2962 						ul_num_bytes, (u32) mem_space);
2963 		if (!status)
2964 			ul_bytes = ul_num_bytes;
2965 		else
2966 			pr_debug("%s: failed to copy brd memory, status 0x%x\n",
2967 				 __func__, status);
2968 	} else {
2969 		pr_debug("%s: failed to get Bridge context, status 0x%x\n",
2970 			 __func__, status);
2971 	}
2972 
2973 	return ul_bytes;
2974 }
2975 
2976 /*
2977  *  ======== mem_write ========
2978  */
mem_write(void * priv_ref,u32 dsp_add,void * pbuf,u32 ul_num_bytes,u32 mem_space)2979 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
2980 		     u32 ul_num_bytes, u32 mem_space)
2981 {
2982 	struct node_object *hnode = (struct node_object *)priv_ref;
2983 	struct node_mgr *hnode_mgr;
2984 	u16 mem_sect_type;
2985 	u32 ul_timeout;
2986 	int status = 0;
2987 	struct bridge_dev_context *hbridge_context;
2988 	/* Function interface to Bridge driver */
2989 	struct bridge_drv_interface *intf_fxns;
2990 
2991 	hnode_mgr = hnode->node_mgr;
2992 
2993 	ul_timeout = hnode->timeout;
2994 	mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
2995 
2996 	/* Call new MemWrite function */
2997 	intf_fxns = hnode_mgr->intf_fxns;
2998 	status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
2999 	status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
3000 					dsp_add, ul_num_bytes, mem_sect_type);
3001 
3002 	return ul_num_bytes;
3003 }
3004 
3005 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
3006 /*
3007  *  ======== node_find_addr ========
3008  */
node_find_addr(struct node_mgr * node_mgr,u32 sym_addr,u32 offset_range,void * sym_addr_output,char * sym_name)3009 int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3010 		u32 offset_range, void *sym_addr_output, char *sym_name)
3011 {
3012 	struct node_object *node_obj;
3013 	int status = -ENOENT;
3014 
3015 	list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
3016 		status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3017 			offset_range, sym_addr_output, sym_name);
3018 		if (!status) {
3019 			pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
3020 				 (unsigned int) node_mgr,
3021 				 sym_addr, offset_range,
3022 				 (unsigned int) sym_addr_output, sym_name);
3023 			break;
3024 		}
3025 	}
3026 
3027 	return status;
3028 }
3029 #endif
3030