• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Message Protocol driver
4  *
5  * SCMI Message Protocol is used between the System Control Processor(SCP)
6  * and the Application Processors(AP). The Message Handling Unit(MHU)
7  * provides a mechanism for inter-processor communication between SCP's
8  * Cortex M3 and AP.
9  *
10  * SCP offers control and management of the core/cluster power states,
11  * various power domain DVFS including the core/cluster, certain system
12  * clocks configuration, thermal sensors and many others.
13  *
14  * Copyright (C) 2018-2021 ARM Ltd.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/bitmap.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/idr.h>
24 #include <linux/io.h>
25 #include <linux/io-64-nonatomic-hi-lo.h>
26 #include <linux/kernel.h>
27 #include <linux/ktime.h>
28 #include <linux/hashtable.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/of.h>
32 #include <linux/platform_device.h>
33 #include <linux/processor.h>
34 #include <linux/refcount.h>
35 #include <linux/slab.h>
36 
37 #include "common.h"
38 #include "notify.h"
39 
40 #include "raw_mode.h"
41 
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/scmi.h>
44 
45 static DEFINE_IDA(scmi_id);
46 
47 static DEFINE_IDR(scmi_protocols);
48 static DEFINE_SPINLOCK(protocol_lock);
49 
50 /* List of all SCMI devices active in system */
51 static LIST_HEAD(scmi_list);
52 /* Protection for the entire list */
53 static DEFINE_MUTEX(scmi_list_mutex);
54 /* Track the unique id for the transfers for debug & profiling purpose */
55 static atomic_t transfer_last_id;
56 
57 static struct dentry *scmi_top_dentry;
58 
59 /**
60  * struct scmi_xfers_info - Structure to manage transfer information
61  *
62  * @xfer_alloc_table: Bitmap table for allocated messages.
63  *	Index of this bitmap table is also used for message
64  *	sequence identifier.
65  * @xfer_lock: Protection for message allocation
66  * @max_msg: Maximum number of messages that can be pending
67  * @free_xfers: A free list for available to use xfers. It is initialized with
68  *		a number of xfers equal to the maximum allowed in-flight
69  *		messages.
70  * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
71  *		   currently in-flight messages.
72  */
73 struct scmi_xfers_info {
74 	unsigned long *xfer_alloc_table;
75 	spinlock_t xfer_lock;
76 	int max_msg;
77 	struct hlist_head free_xfers;
78 	DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
79 };
80 
81 /**
82  * struct scmi_protocol_instance  - Describe an initialized protocol instance.
83  * @handle: Reference to the SCMI handle associated to this protocol instance.
84  * @proto: A reference to the protocol descriptor.
85  * @gid: A reference for per-protocol devres management.
86  * @users: A refcount to track effective users of this protocol.
87  * @priv: Reference for optional protocol private data.
88  * @version: Protocol version supported by the platform as detected at runtime.
89  * @ph: An embedded protocol handle that will be passed down to protocol
90  *	initialization code to identify this instance.
91  *
92  * Each protocol is initialized independently once for each SCMI platform in
93  * which is defined by DT and implemented by the SCMI server fw.
94  */
95 struct scmi_protocol_instance {
96 	const struct scmi_handle	*handle;
97 	const struct scmi_protocol	*proto;
98 	void				*gid;
99 	refcount_t			users;
100 	void				*priv;
101 	unsigned int			version;
102 	struct scmi_protocol_handle	ph;
103 };
104 
105 #define ph_to_pi(h)	container_of(h, struct scmi_protocol_instance, ph)
106 
107 /**
108  * struct scmi_debug_info  - Debug common info
109  * @top_dentry: A reference to the top debugfs dentry
110  * @name: Name of this SCMI instance
111  * @type: Type of this SCMI instance
112  * @is_atomic: Flag to state if the transport of this instance is atomic
113  */
114 struct scmi_debug_info {
115 	struct dentry *top_dentry;
116 	const char *name;
117 	const char *type;
118 	bool is_atomic;
119 };
120 
121 /**
122  * struct scmi_info - Structure representing a SCMI instance
123  *
124  * @id: A sequence number starting from zero identifying this instance
125  * @dev: Device pointer
126  * @desc: SoC description for this instance
127  * @version: SCMI revision information containing protocol version,
128  *	implementation version and (sub-)vendor identification.
129  * @handle: Instance of SCMI handle to send to clients
130  * @tx_minfo: Universal Transmit Message management info
131  * @rx_minfo: Universal Receive Message management info
132  * @tx_idr: IDR object to map protocol id to Tx channel info pointer
133  * @rx_idr: IDR object to map protocol id to Rx channel info pointer
134  * @protocols: IDR for protocols' instance descriptors initialized for
135  *	       this SCMI instance: populated on protocol's first attempted
136  *	       usage.
137  * @protocols_mtx: A mutex to protect protocols instances initialization.
138  * @protocols_imp: List of protocols implemented, currently maximum of
139  *		   scmi_revision_info.num_protocols elements allocated by the
140  *		   base protocol
141  * @active_protocols: IDR storing device_nodes for protocols actually defined
142  *		      in the DT and confirmed as implemented by fw.
143  * @atomic_threshold: Optional system wide DT-configured threshold, expressed
144  *		      in microseconds, for atomic operations.
145  *		      Only SCMI synchronous commands reported by the platform
146  *		      to have an execution latency lesser-equal to the threshold
147  *		      should be considered for atomic mode operation: such
148  *		      decision is finally left up to the SCMI drivers.
149  * @notify_priv: Pointer to private data structure specific to notifications.
150  * @node: List head
151  * @users: Number of users of this instance
152  * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
153  * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
154  *		bus
155  * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
156  * @dbg: A pointer to debugfs related data (if any)
157  * @raw: An opaque reference handle used by SCMI Raw mode.
158  */
159 struct scmi_info {
160 	int id;
161 	struct device *dev;
162 	const struct scmi_desc *desc;
163 	struct scmi_revision_info version;
164 	struct scmi_handle handle;
165 	struct scmi_xfers_info tx_minfo;
166 	struct scmi_xfers_info rx_minfo;
167 	struct idr tx_idr;
168 	struct idr rx_idr;
169 	struct idr protocols;
170 	/* Ensure mutual exclusive access to protocols instance array */
171 	struct mutex protocols_mtx;
172 	u8 *protocols_imp;
173 	struct idr active_protocols;
174 	unsigned int atomic_threshold;
175 	void *notify_priv;
176 	struct list_head node;
177 	int users;
178 	struct notifier_block bus_nb;
179 	struct notifier_block dev_req_nb;
180 	/* Serialize device creation process for this instance */
181 	struct mutex devreq_mtx;
182 	struct scmi_debug_info *dbg;
183 	void *raw;
184 };
185 
186 #define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
187 #define bus_nb_to_scmi_info(nb)	container_of(nb, struct scmi_info, bus_nb)
188 #define req_nb_to_scmi_info(nb)	container_of(nb, struct scmi_info, dev_req_nb)
189 
scmi_protocol_get(int protocol_id)190 static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
191 {
192 	const struct scmi_protocol *proto;
193 
194 	proto = idr_find(&scmi_protocols, protocol_id);
195 	if (!proto || !try_module_get(proto->owner)) {
196 		pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
197 		return NULL;
198 	}
199 
200 	pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
201 
202 	return proto;
203 }
204 
scmi_protocol_put(int protocol_id)205 static void scmi_protocol_put(int protocol_id)
206 {
207 	const struct scmi_protocol *proto;
208 
209 	proto = idr_find(&scmi_protocols, protocol_id);
210 	if (proto)
211 		module_put(proto->owner);
212 }
213 
scmi_protocol_register(const struct scmi_protocol * proto)214 int scmi_protocol_register(const struct scmi_protocol *proto)
215 {
216 	int ret;
217 
218 	if (!proto) {
219 		pr_err("invalid protocol\n");
220 		return -EINVAL;
221 	}
222 
223 	if (!proto->instance_init) {
224 		pr_err("missing init for protocol 0x%x\n", proto->id);
225 		return -EINVAL;
226 	}
227 
228 	spin_lock(&protocol_lock);
229 	ret = idr_alloc(&scmi_protocols, (void *)proto,
230 			proto->id, proto->id + 1, GFP_ATOMIC);
231 	spin_unlock(&protocol_lock);
232 	if (ret != proto->id) {
233 		pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
234 		       proto->id, ret);
235 		return ret;
236 	}
237 
238 	pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
239 
240 	return 0;
241 }
242 EXPORT_SYMBOL_GPL(scmi_protocol_register);
243 
scmi_protocol_unregister(const struct scmi_protocol * proto)244 void scmi_protocol_unregister(const struct scmi_protocol *proto)
245 {
246 	spin_lock(&protocol_lock);
247 	idr_remove(&scmi_protocols, proto->id);
248 	spin_unlock(&protocol_lock);
249 
250 	pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
251 }
252 EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
253 
254 /**
255  * scmi_create_protocol_devices  - Create devices for all pending requests for
256  * this SCMI instance.
257  *
258  * @np: The device node describing the protocol
259  * @info: The SCMI instance descriptor
260  * @prot_id: The protocol ID
261  * @name: The optional name of the device to be created: if not provided this
262  *	  call will lead to the creation of all the devices currently requested
263  *	  for the specified protocol.
264  */
scmi_create_protocol_devices(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)265 static void scmi_create_protocol_devices(struct device_node *np,
266 					 struct scmi_info *info,
267 					 int prot_id, const char *name)
268 {
269 	struct scmi_device *sdev;
270 
271 	mutex_lock(&info->devreq_mtx);
272 	sdev = scmi_device_create(np, info->dev, prot_id, name);
273 	if (name && !sdev)
274 		dev_err(info->dev,
275 			"failed to create device for protocol 0x%X (%s)\n",
276 			prot_id, name);
277 	mutex_unlock(&info->devreq_mtx);
278 }
279 
scmi_destroy_protocol_devices(struct scmi_info * info,int prot_id,const char * name)280 static void scmi_destroy_protocol_devices(struct scmi_info *info,
281 					  int prot_id, const char *name)
282 {
283 	mutex_lock(&info->devreq_mtx);
284 	scmi_device_destroy(info->dev, prot_id, name);
285 	mutex_unlock(&info->devreq_mtx);
286 }
287 
scmi_notification_instance_data_set(const struct scmi_handle * handle,void * priv)288 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
289 					 void *priv)
290 {
291 	struct scmi_info *info = handle_to_scmi_info(handle);
292 
293 	info->notify_priv = priv;
294 	/* Ensure updated protocol private date are visible */
295 	smp_wmb();
296 }
297 
scmi_notification_instance_data_get(const struct scmi_handle * handle)298 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
299 {
300 	struct scmi_info *info = handle_to_scmi_info(handle);
301 
302 	/* Ensure protocols_private_data has been updated */
303 	smp_rmb();
304 	return info->notify_priv;
305 }
306 
307 /**
308  * scmi_xfer_token_set  - Reserve and set new token for the xfer at hand
309  *
310  * @minfo: Pointer to Tx/Rx Message management info based on channel type
311  * @xfer: The xfer to act upon
312  *
313  * Pick the next unused monotonically increasing token and set it into
314  * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
315  * reuse of freshly completed or timed-out xfers, thus mitigating the risk
316  * of incorrect association of a late and expired xfer with a live in-flight
317  * transaction, both happening to re-use the same token identifier.
318  *
319  * Since platform is NOT required to answer our request in-order we should
320  * account for a few rare but possible scenarios:
321  *
322  *  - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
323  *    using find_next_zero_bit() starting from candidate next_token bit
324  *
325  *  - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
326  *    are plenty of free tokens at start, so try a second pass using
327  *    find_next_zero_bit() and starting from 0.
328  *
329  *  X = used in-flight
330  *
331  * Normal
332  * ------
333  *
334  *		|- xfer_id picked
335  *   -----------+----------------------------------------------------------
336  *   | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
337  *   ----------------------------------------------------------------------
338  *		^
339  *		|- next_token
340  *
341  * Out-of-order pending at start
342  * -----------------------------
343  *
344  *	  |- xfer_id picked, last_token fixed
345  *   -----+----------------------------------------------------------------
346  *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
347  *   ----------------------------------------------------------------------
348  *    ^
349  *    |- next_token
350  *
351  *
352  * Out-of-order pending at end
353  * ---------------------------
354  *
355  *	  |- xfer_id picked, last_token fixed
356  *   -----+----------------------------------------------------------------
357  *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
358  *   ----------------------------------------------------------------------
359  *								^
360  *								|- next_token
361  *
362  * Context: Assumes to be called with @xfer_lock already acquired.
363  *
364  * Return: 0 on Success or error
365  */
scmi_xfer_token_set(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)366 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
367 			       struct scmi_xfer *xfer)
368 {
369 	unsigned long xfer_id, next_token;
370 
371 	/*
372 	 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
373 	 * using the pre-allocated transfer_id as a base.
374 	 * Note that the global transfer_id is shared across all message types
375 	 * so there could be holes in the allocated set of monotonic sequence
376 	 * numbers, but that is going to limit the effectiveness of the
377 	 * mitigation only in very rare limit conditions.
378 	 */
379 	next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
380 
381 	/* Pick the next available xfer_id >= next_token */
382 	xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
383 				     MSG_TOKEN_MAX, next_token);
384 	if (xfer_id == MSG_TOKEN_MAX) {
385 		/*
386 		 * After heavily out-of-order responses, there are no free
387 		 * tokens ahead, but only at start of xfer_alloc_table so
388 		 * try again from the beginning.
389 		 */
390 		xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
391 					     MSG_TOKEN_MAX, 0);
392 		/*
393 		 * Something is wrong if we got here since there can be a
394 		 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
395 		 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
396 		 */
397 		if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
398 			return -ENOMEM;
399 	}
400 
401 	/* Update +/- last_token accordingly if we skipped some hole */
402 	if (xfer_id != next_token)
403 		atomic_add((int)(xfer_id - next_token), &transfer_last_id);
404 
405 	xfer->hdr.seq = (u16)xfer_id;
406 
407 	return 0;
408 }
409 
410 /**
411  * scmi_xfer_token_clear  - Release the token
412  *
413  * @minfo: Pointer to Tx/Rx Message management info based on channel type
414  * @xfer: The xfer to act upon
415  */
scmi_xfer_token_clear(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)416 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
417 					 struct scmi_xfer *xfer)
418 {
419 	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
420 }
421 
422 /**
423  * scmi_xfer_inflight_register_unlocked  - Register the xfer as in-flight
424  *
425  * @xfer: The xfer to register
426  * @minfo: Pointer to Tx/Rx Message management info based on channel type
427  *
428  * Note that this helper assumes that the xfer to be registered as in-flight
429  * had been built using an xfer sequence number which still corresponds to a
430  * free slot in the xfer_alloc_table.
431  *
432  * Context: Assumes to be called with @xfer_lock already acquired.
433  */
434 static inline void
scmi_xfer_inflight_register_unlocked(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)435 scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
436 				     struct scmi_xfers_info *minfo)
437 {
438 	/* Set in-flight */
439 	set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
440 	hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
441 	xfer->pending = true;
442 }
443 
444 /**
445  * scmi_xfer_inflight_register  - Try to register an xfer as in-flight
446  *
447  * @xfer: The xfer to register
448  * @minfo: Pointer to Tx/Rx Message management info based on channel type
449  *
450  * Note that this helper does NOT assume anything about the sequence number
451  * that was baked into the provided xfer, so it checks at first if it can
452  * be mapped to a free slot and fails with an error if another xfer with the
453  * same sequence number is currently still registered as in-flight.
454  *
455  * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
456  *	   could not rbe mapped to a free slot in the xfer_alloc_table.
457  */
scmi_xfer_inflight_register(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)458 static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
459 				       struct scmi_xfers_info *minfo)
460 {
461 	int ret = 0;
462 	unsigned long flags;
463 
464 	spin_lock_irqsave(&minfo->xfer_lock, flags);
465 	if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
466 		scmi_xfer_inflight_register_unlocked(xfer, minfo);
467 	else
468 		ret = -EBUSY;
469 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
470 
471 	return ret;
472 }
473 
474 /**
475  * scmi_xfer_raw_inflight_register  - An helper to register the given xfer as in
476  * flight on the TX channel, if possible.
477  *
478  * @handle: Pointer to SCMI entity handle
479  * @xfer: The xfer to register
480  *
481  * Return: 0 on Success, error otherwise
482  */
scmi_xfer_raw_inflight_register(const struct scmi_handle * handle,struct scmi_xfer * xfer)483 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
484 				    struct scmi_xfer *xfer)
485 {
486 	struct scmi_info *info = handle_to_scmi_info(handle);
487 
488 	return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
489 }
490 
491 /**
492  * scmi_xfer_pending_set  - Pick a proper sequence number and mark the xfer
493  * as pending in-flight
494  *
495  * @xfer: The xfer to act upon
496  * @minfo: Pointer to Tx/Rx Message management info based on channel type
497  *
498  * Return: 0 on Success or error otherwise
499  */
scmi_xfer_pending_set(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)500 static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
501 					struct scmi_xfers_info *minfo)
502 {
503 	int ret;
504 	unsigned long flags;
505 
506 	spin_lock_irqsave(&minfo->xfer_lock, flags);
507 	/* Set a new monotonic token as the xfer sequence number */
508 	ret = scmi_xfer_token_set(minfo, xfer);
509 	if (!ret)
510 		scmi_xfer_inflight_register_unlocked(xfer, minfo);
511 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
512 
513 	return ret;
514 }
515 
516 /**
517  * scmi_xfer_get() - Allocate one message
518  *
519  * @handle: Pointer to SCMI entity handle
520  * @minfo: Pointer to Tx/Rx Message management info based on channel type
521  *
522  * Helper function which is used by various message functions that are
523  * exposed to clients of this driver for allocating a message traffic event.
524  *
525  * Picks an xfer from the free list @free_xfers (if any available) and perform
526  * a basic initialization.
527  *
528  * Note that, at this point, still no sequence number is assigned to the
529  * allocated xfer, nor it is registered as a pending transaction.
530  *
531  * The successfully initialized xfer is refcounted.
532  *
533  * Context: Holds @xfer_lock while manipulating @free_xfers.
534  *
535  * Return: An initialized xfer if all went fine, else pointer error.
536  */
scmi_xfer_get(const struct scmi_handle * handle,struct scmi_xfers_info * minfo)537 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
538 				       struct scmi_xfers_info *minfo)
539 {
540 	unsigned long flags;
541 	struct scmi_xfer *xfer;
542 
543 	spin_lock_irqsave(&minfo->xfer_lock, flags);
544 	if (hlist_empty(&minfo->free_xfers)) {
545 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
546 		return ERR_PTR(-ENOMEM);
547 	}
548 
549 	/* grab an xfer from the free_list */
550 	xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
551 	hlist_del_init(&xfer->node);
552 
553 	/*
554 	 * Allocate transfer_id early so that can be used also as base for
555 	 * monotonic sequence number generation if needed.
556 	 */
557 	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
558 
559 	refcount_set(&xfer->users, 1);
560 	atomic_set(&xfer->busy, SCMI_XFER_FREE);
561 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
562 
563 	return xfer;
564 }
565 
566 /**
567  * scmi_xfer_raw_get  - Helper to get a bare free xfer from the TX channel
568  *
569  * @handle: Pointer to SCMI entity handle
570  *
571  * Note that xfer is taken from the TX channel structures.
572  *
573  * Return: A valid xfer on Success, or an error-pointer otherwise
574  */
scmi_xfer_raw_get(const struct scmi_handle * handle)575 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
576 {
577 	struct scmi_xfer *xfer;
578 	struct scmi_info *info = handle_to_scmi_info(handle);
579 
580 	xfer = scmi_xfer_get(handle, &info->tx_minfo);
581 	if (!IS_ERR(xfer))
582 		xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
583 
584 	return xfer;
585 }
586 
587 /**
588  * scmi_xfer_raw_channel_get  - Helper to get a reference to the proper channel
589  * to use for a specific protocol_id Raw transaction.
590  *
591  * @handle: Pointer to SCMI entity handle
592  * @protocol_id: Identifier of the protocol
593  *
594  * Note that in a regular SCMI stack, usually, a protocol has to be defined in
595  * the DT to have an associated channel and be usable; but in Raw mode any
596  * protocol in range is allowed, re-using the Base channel, so as to enable
597  * fuzzing on any protocol without the need of a fully compiled DT.
598  *
599  * Return: A reference to the channel to use, or an ERR_PTR
600  */
601 struct scmi_chan_info *
scmi_xfer_raw_channel_get(const struct scmi_handle * handle,u8 protocol_id)602 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
603 {
604 	struct scmi_chan_info *cinfo;
605 	struct scmi_info *info = handle_to_scmi_info(handle);
606 
607 	cinfo = idr_find(&info->tx_idr, protocol_id);
608 	if (!cinfo) {
609 		if (protocol_id == SCMI_PROTOCOL_BASE)
610 			return ERR_PTR(-EINVAL);
611 		/* Use Base channel for protocols not defined for DT */
612 		cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
613 		if (!cinfo)
614 			return ERR_PTR(-EINVAL);
615 		dev_warn_once(handle->dev,
616 			      "Using Base channel for protocol 0x%X\n",
617 			      protocol_id);
618 	}
619 
620 	return cinfo;
621 }
622 
623 /**
624  * __scmi_xfer_put() - Release a message
625  *
626  * @minfo: Pointer to Tx/Rx Message management info based on channel type
627  * @xfer: message that was reserved by scmi_xfer_get
628  *
629  * After refcount check, possibly release an xfer, clearing the token slot,
630  * removing xfer from @pending_xfers and putting it back into free_xfers.
631  *
632  * This holds a spinlock to maintain integrity of internal data structures.
633  */
634 static void
__scmi_xfer_put(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)635 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
636 {
637 	unsigned long flags;
638 
639 	spin_lock_irqsave(&minfo->xfer_lock, flags);
640 	if (refcount_dec_and_test(&xfer->users)) {
641 		if (xfer->pending) {
642 			scmi_xfer_token_clear(minfo, xfer);
643 			hash_del(&xfer->node);
644 			xfer->pending = false;
645 		}
646 		hlist_add_head(&xfer->node, &minfo->free_xfers);
647 	}
648 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
649 }
650 
651 /**
652  * scmi_xfer_raw_put  - Release an xfer that was taken by @scmi_xfer_raw_get
653  *
654  * @handle: Pointer to SCMI entity handle
655  * @xfer: A reference to the xfer to put
656  *
657  * Note that as with other xfer_put() handlers the xfer is really effectively
658  * released only if there are no more users on the system.
659  */
scmi_xfer_raw_put(const struct scmi_handle * handle,struct scmi_xfer * xfer)660 void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
661 {
662 	struct scmi_info *info = handle_to_scmi_info(handle);
663 
664 	xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
665 	xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
666 	return __scmi_xfer_put(&info->tx_minfo, xfer);
667 }
668 
669 /**
670  * scmi_xfer_lookup_unlocked  -  Helper to lookup an xfer_id
671  *
672  * @minfo: Pointer to Tx/Rx Message management info based on channel type
673  * @xfer_id: Token ID to lookup in @pending_xfers
674  *
675  * Refcounting is untouched.
676  *
677  * Context: Assumes to be called with @xfer_lock already acquired.
678  *
679  * Return: A valid xfer on Success or error otherwise
680  */
681 static struct scmi_xfer *
scmi_xfer_lookup_unlocked(struct scmi_xfers_info * minfo,u16 xfer_id)682 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
683 {
684 	struct scmi_xfer *xfer = NULL;
685 
686 	if (test_bit(xfer_id, minfo->xfer_alloc_table))
687 		xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
688 
689 	return xfer ?: ERR_PTR(-EINVAL);
690 }
691 
692 /**
693  * scmi_msg_response_validate  - Validate message type against state of related
694  * xfer
695  *
696  * @cinfo: A reference to the channel descriptor.
697  * @msg_type: Message type to check
698  * @xfer: A reference to the xfer to validate against @msg_type
699  *
700  * This function checks if @msg_type is congruent with the current state of
701  * a pending @xfer; if an asynchronous delayed response is received before the
702  * related synchronous response (Out-of-Order Delayed Response) the missing
703  * synchronous response is assumed to be OK and completed, carrying on with the
704  * Delayed Response: this is done to address the case in which the underlying
705  * SCMI transport can deliver such out-of-order responses.
706  *
707  * Context: Assumes to be called with xfer->lock already acquired.
708  *
709  * Return: 0 on Success, error otherwise
710  */
scmi_msg_response_validate(struct scmi_chan_info * cinfo,u8 msg_type,struct scmi_xfer * xfer)711 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
712 					     u8 msg_type,
713 					     struct scmi_xfer *xfer)
714 {
715 	/*
716 	 * Even if a response was indeed expected on this slot at this point,
717 	 * a buggy platform could wrongly reply feeding us an unexpected
718 	 * delayed response we're not prepared to handle: bail-out safely
719 	 * blaming firmware.
720 	 */
721 	if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
722 		dev_err(cinfo->dev,
723 			"Delayed Response for %d not expected! Buggy F/W ?\n",
724 			xfer->hdr.seq);
725 		return -EINVAL;
726 	}
727 
728 	switch (xfer->state) {
729 	case SCMI_XFER_SENT_OK:
730 		if (msg_type == MSG_TYPE_DELAYED_RESP) {
731 			/*
732 			 * Delayed Response expected but delivered earlier.
733 			 * Assume message RESPONSE was OK and skip state.
734 			 */
735 			xfer->hdr.status = SCMI_SUCCESS;
736 			xfer->state = SCMI_XFER_RESP_OK;
737 			complete(&xfer->done);
738 			dev_warn(cinfo->dev,
739 				 "Received valid OoO Delayed Response for %d\n",
740 				 xfer->hdr.seq);
741 		}
742 		break;
743 	case SCMI_XFER_RESP_OK:
744 		if (msg_type != MSG_TYPE_DELAYED_RESP)
745 			return -EINVAL;
746 		break;
747 	case SCMI_XFER_DRESP_OK:
748 		/* No further message expected once in SCMI_XFER_DRESP_OK */
749 		return -EINVAL;
750 	}
751 
752 	return 0;
753 }
754 
755 /**
756  * scmi_xfer_state_update  - Update xfer state
757  *
758  * @xfer: A reference to the xfer to update
759  * @msg_type: Type of message being processed.
760  *
761  * Note that this message is assumed to have been already successfully validated
762  * by @scmi_msg_response_validate(), so here we just update the state.
763  *
764  * Context: Assumes to be called on an xfer exclusively acquired using the
765  *	    busy flag.
766  */
scmi_xfer_state_update(struct scmi_xfer * xfer,u8 msg_type)767 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
768 {
769 	xfer->hdr.type = msg_type;
770 
771 	/* Unknown command types were already discarded earlier */
772 	if (xfer->hdr.type == MSG_TYPE_COMMAND)
773 		xfer->state = SCMI_XFER_RESP_OK;
774 	else
775 		xfer->state = SCMI_XFER_DRESP_OK;
776 }
777 
scmi_xfer_acquired(struct scmi_xfer * xfer)778 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
779 {
780 	int ret;
781 
782 	ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
783 
784 	return ret == SCMI_XFER_FREE;
785 }
786 
787 /**
788  * scmi_xfer_command_acquire  -  Helper to lookup and acquire a command xfer
789  *
790  * @cinfo: A reference to the channel descriptor.
791  * @msg_hdr: A message header to use as lookup key
792  *
793  * When a valid xfer is found for the sequence number embedded in the provided
794  * msg_hdr, reference counting is properly updated and exclusive access to this
795  * xfer is granted till released with @scmi_xfer_command_release.
796  *
797  * Return: A valid @xfer on Success or error otherwise.
798  */
799 static inline struct scmi_xfer *
scmi_xfer_command_acquire(struct scmi_chan_info * cinfo,u32 msg_hdr)800 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
801 {
802 	int ret;
803 	unsigned long flags;
804 	struct scmi_xfer *xfer;
805 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
806 	struct scmi_xfers_info *minfo = &info->tx_minfo;
807 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
808 	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
809 
810 	/* Are we even expecting this? */
811 	spin_lock_irqsave(&minfo->xfer_lock, flags);
812 	xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
813 	if (IS_ERR(xfer)) {
814 		dev_err(cinfo->dev,
815 			"Message for %d type %d is not expected!\n",
816 			xfer_id, msg_type);
817 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
818 		return xfer;
819 	}
820 	refcount_inc(&xfer->users);
821 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
822 
823 	spin_lock_irqsave(&xfer->lock, flags);
824 	ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
825 	/*
826 	 * If a pending xfer was found which was also in a congruent state with
827 	 * the received message, acquire exclusive access to it setting the busy
828 	 * flag.
829 	 * Spins only on the rare limit condition of concurrent reception of
830 	 * RESP and DRESP for the same xfer.
831 	 */
832 	if (!ret) {
833 		spin_until_cond(scmi_xfer_acquired(xfer));
834 		scmi_xfer_state_update(xfer, msg_type);
835 	}
836 	spin_unlock_irqrestore(&xfer->lock, flags);
837 
838 	if (ret) {
839 		dev_err(cinfo->dev,
840 			"Invalid message type:%d for %d - HDR:0x%X  state:%d\n",
841 			msg_type, xfer_id, msg_hdr, xfer->state);
842 		/* On error the refcount incremented above has to be dropped */
843 		__scmi_xfer_put(minfo, xfer);
844 		xfer = ERR_PTR(-EINVAL);
845 	}
846 
847 	return xfer;
848 }
849 
scmi_xfer_command_release(struct scmi_info * info,struct scmi_xfer * xfer)850 static inline void scmi_xfer_command_release(struct scmi_info *info,
851 					     struct scmi_xfer *xfer)
852 {
853 	atomic_set(&xfer->busy, SCMI_XFER_FREE);
854 	__scmi_xfer_put(&info->tx_minfo, xfer);
855 }
856 
scmi_clear_channel(struct scmi_info * info,struct scmi_chan_info * cinfo)857 static inline void scmi_clear_channel(struct scmi_info *info,
858 				      struct scmi_chan_info *cinfo)
859 {
860 	if (info->desc->ops->clear_channel)
861 		info->desc->ops->clear_channel(cinfo);
862 }
863 
scmi_handle_notification(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)864 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
865 				     u32 msg_hdr, void *priv)
866 {
867 	struct scmi_xfer *xfer;
868 	struct device *dev = cinfo->dev;
869 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
870 	struct scmi_xfers_info *minfo = &info->rx_minfo;
871 	ktime_t ts;
872 
873 	ts = ktime_get_boottime();
874 	xfer = scmi_xfer_get(cinfo->handle, minfo);
875 	if (IS_ERR(xfer)) {
876 		dev_err(dev, "failed to get free message slot (%ld)\n",
877 			PTR_ERR(xfer));
878 		scmi_clear_channel(info, cinfo);
879 		return;
880 	}
881 
882 	unpack_scmi_header(msg_hdr, &xfer->hdr);
883 	if (priv)
884 		/* Ensure order between xfer->priv store and following ops */
885 		smp_store_mb(xfer->priv, priv);
886 	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
887 					    xfer);
888 
889 	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
890 			    xfer->hdr.id, "NOTI", xfer->hdr.seq,
891 			    xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
892 
893 	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
894 		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
895 
896 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
897 			   xfer->hdr.protocol_id, xfer->hdr.seq,
898 			   MSG_TYPE_NOTIFICATION);
899 
900 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
901 		xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
902 		scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
903 					cinfo->id);
904 	}
905 
906 	__scmi_xfer_put(minfo, xfer);
907 
908 	scmi_clear_channel(info, cinfo);
909 }
910 
scmi_handle_response(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)911 static void scmi_handle_response(struct scmi_chan_info *cinfo,
912 				 u32 msg_hdr, void *priv)
913 {
914 	struct scmi_xfer *xfer;
915 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
916 
917 	xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
918 	if (IS_ERR(xfer)) {
919 		if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
920 			scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
921 
922 		if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
923 			scmi_clear_channel(info, cinfo);
924 		return;
925 	}
926 
927 	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
928 	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
929 		xfer->rx.len = info->desc->max_msg_size;
930 
931 	if (priv)
932 		/* Ensure order between xfer->priv store and following ops */
933 		smp_store_mb(xfer->priv, priv);
934 	info->desc->ops->fetch_response(cinfo, xfer);
935 
936 	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
937 			    xfer->hdr.id,
938 			    xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
939 			    (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
940 			    (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
941 			    xfer->hdr.seq, xfer->hdr.status,
942 			    xfer->rx.buf, xfer->rx.len);
943 
944 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
945 			   xfer->hdr.protocol_id, xfer->hdr.seq,
946 			   xfer->hdr.type);
947 
948 	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
949 		scmi_clear_channel(info, cinfo);
950 		complete(xfer->async_done);
951 	} else {
952 		complete(&xfer->done);
953 	}
954 
955 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
956 		/*
957 		 * When in polling mode avoid to queue the Raw xfer on the IRQ
958 		 * RX path since it will be already queued at the end of the TX
959 		 * poll loop.
960 		 */
961 		if (!xfer->hdr.poll_completion)
962 			scmi_raw_message_report(info->raw, xfer,
963 						SCMI_RAW_REPLY_QUEUE,
964 						cinfo->id);
965 	}
966 
967 	scmi_xfer_command_release(info, xfer);
968 }
969 
970 /**
971  * scmi_rx_callback() - callback for receiving messages
972  *
973  * @cinfo: SCMI channel info
974  * @msg_hdr: Message header
975  * @priv: Transport specific private data.
976  *
977  * Processes one received message to appropriate transfer information and
978  * signals completion of the transfer.
979  *
980  * NOTE: This function will be invoked in IRQ context, hence should be
981  * as optimal as possible.
982  */
scmi_rx_callback(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)983 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
984 {
985 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
986 
987 	switch (msg_type) {
988 	case MSG_TYPE_NOTIFICATION:
989 		scmi_handle_notification(cinfo, msg_hdr, priv);
990 		break;
991 	case MSG_TYPE_COMMAND:
992 	case MSG_TYPE_DELAYED_RESP:
993 		scmi_handle_response(cinfo, msg_hdr, priv);
994 		break;
995 	default:
996 		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
997 		break;
998 	}
999 }
1000 
1001 /**
1002  * xfer_put() - Release a transmit message
1003  *
1004  * @ph: Pointer to SCMI protocol handle
1005  * @xfer: message that was reserved by xfer_get_init
1006  */
xfer_put(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1007 static void xfer_put(const struct scmi_protocol_handle *ph,
1008 		     struct scmi_xfer *xfer)
1009 {
1010 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1011 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1012 
1013 	__scmi_xfer_put(&info->tx_minfo, xfer);
1014 }
1015 
scmi_xfer_done_no_timeout(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,ktime_t stop)1016 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
1017 				      struct scmi_xfer *xfer, ktime_t stop)
1018 {
1019 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1020 
1021 	/*
1022 	 * Poll also on xfer->done so that polling can be forcibly terminated
1023 	 * in case of out-of-order receptions of delayed responses
1024 	 */
1025 	return info->desc->ops->poll_done(cinfo, xfer) ||
1026 	       try_wait_for_completion(&xfer->done) ||
1027 	       ktime_after(ktime_get(), stop);
1028 }
1029 
scmi_wait_for_reply(struct device * dev,const struct scmi_desc * desc,struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1030 static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
1031 			       struct scmi_chan_info *cinfo,
1032 			       struct scmi_xfer *xfer, unsigned int timeout_ms)
1033 {
1034 	int ret = 0;
1035 
1036 	if (xfer->hdr.poll_completion) {
1037 		/*
1038 		 * Real polling is needed only if transport has NOT declared
1039 		 * itself to support synchronous commands replies.
1040 		 */
1041 		if (!desc->sync_cmds_completed_on_ret) {
1042 			/*
1043 			 * Poll on xfer using transport provided .poll_done();
1044 			 * assumes no completion interrupt was available.
1045 			 */
1046 			ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
1047 
1048 			spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
1049 								  xfer, stop));
1050 			if (ktime_after(ktime_get(), stop)) {
1051 				dev_err(dev,
1052 					"timed out in resp(caller: %pS) - polling\n",
1053 					(void *)_RET_IP_);
1054 				ret = -ETIMEDOUT;
1055 			}
1056 		}
1057 
1058 		if (!ret) {
1059 			unsigned long flags;
1060 			struct scmi_info *info =
1061 				handle_to_scmi_info(cinfo->handle);
1062 
1063 			/*
1064 			 * Do not fetch_response if an out-of-order delayed
1065 			 * response is being processed.
1066 			 */
1067 			spin_lock_irqsave(&xfer->lock, flags);
1068 			if (xfer->state == SCMI_XFER_SENT_OK) {
1069 				desc->ops->fetch_response(cinfo, xfer);
1070 				xfer->state = SCMI_XFER_RESP_OK;
1071 			}
1072 			spin_unlock_irqrestore(&xfer->lock, flags);
1073 
1074 			/* Trace polled replies. */
1075 			trace_scmi_msg_dump(info->id, cinfo->id,
1076 					    xfer->hdr.protocol_id, xfer->hdr.id,
1077 					    !SCMI_XFER_IS_RAW(xfer) ?
1078 					    "RESP" : "resp",
1079 					    xfer->hdr.seq, xfer->hdr.status,
1080 					    xfer->rx.buf, xfer->rx.len);
1081 
1082 			if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1083 				struct scmi_info *info =
1084 					handle_to_scmi_info(cinfo->handle);
1085 
1086 				scmi_raw_message_report(info->raw, xfer,
1087 							SCMI_RAW_REPLY_QUEUE,
1088 							cinfo->id);
1089 			}
1090 		}
1091 	} else {
1092 		/* And we wait for the response. */
1093 		if (!wait_for_completion_timeout(&xfer->done,
1094 						 msecs_to_jiffies(timeout_ms))) {
1095 			dev_err(dev, "timed out in resp(caller: %pS)\n",
1096 				(void *)_RET_IP_);
1097 			ret = -ETIMEDOUT;
1098 		}
1099 	}
1100 
1101 	return ret;
1102 }
1103 
1104 /**
1105  * scmi_wait_for_message_response  - An helper to group all the possible ways of
1106  * waiting for a synchronous message response.
1107  *
1108  * @cinfo: SCMI channel info
1109  * @xfer: Reference to the transfer being waited for.
1110  *
1111  * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1112  * configuration flags like xfer->hdr.poll_completion.
1113  *
1114  * Return: 0 on Success, error otherwise.
1115  */
scmi_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)1116 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
1117 					  struct scmi_xfer *xfer)
1118 {
1119 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1120 	struct device *dev = info->dev;
1121 
1122 	trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1123 				      xfer->hdr.protocol_id, xfer->hdr.seq,
1124 				      info->desc->max_rx_timeout_ms,
1125 				      xfer->hdr.poll_completion);
1126 
1127 	return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1128 				   info->desc->max_rx_timeout_ms);
1129 }
1130 
1131 /**
1132  * scmi_xfer_raw_wait_for_message_response  - An helper to wait for a message
1133  * reply to an xfer raw request on a specific channel for the required timeout.
1134  *
1135  * @cinfo: SCMI channel info
1136  * @xfer: Reference to the transfer being waited for.
1137  * @timeout_ms: The maximum timeout in milliseconds
1138  *
1139  * Return: 0 on Success, error otherwise.
1140  */
scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1141 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
1142 					    struct scmi_xfer *xfer,
1143 					    unsigned int timeout_ms)
1144 {
1145 	int ret;
1146 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1147 	struct device *dev = info->dev;
1148 
1149 	ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1150 	if (ret)
1151 		dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1152 			pack_scmi_header(&xfer->hdr));
1153 
1154 	return ret;
1155 }
1156 
1157 /**
1158  * do_xfer() - Do one transfer
1159  *
1160  * @ph: Pointer to SCMI protocol handle
1161  * @xfer: Transfer to initiate and wait for response
1162  *
1163  * Return: -ETIMEDOUT in case of no response, if transmit error,
1164  *	return corresponding error, else if all goes well,
1165  *	return 0.
1166  */
do_xfer(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1167 static int do_xfer(const struct scmi_protocol_handle *ph,
1168 		   struct scmi_xfer *xfer)
1169 {
1170 	int ret;
1171 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1172 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1173 	struct device *dev = info->dev;
1174 	struct scmi_chan_info *cinfo;
1175 
1176 	/* Check for polling request on custom command xfers at first */
1177 	if (xfer->hdr.poll_completion &&
1178 	    !is_transport_polling_capable(info->desc)) {
1179 		dev_warn_once(dev,
1180 			      "Polling mode is not supported by transport.\n");
1181 		return -EINVAL;
1182 	}
1183 
1184 	cinfo = idr_find(&info->tx_idr, pi->proto->id);
1185 	if (unlikely(!cinfo))
1186 		return -EINVAL;
1187 
1188 	/* True ONLY if also supported by transport. */
1189 	if (is_polling_enabled(cinfo, info->desc))
1190 		xfer->hdr.poll_completion = true;
1191 
1192 	/*
1193 	 * Initialise protocol id now from protocol handle to avoid it being
1194 	 * overridden by mistake (or malice) by the protocol code mangling with
1195 	 * the scmi_xfer structure prior to this.
1196 	 */
1197 	xfer->hdr.protocol_id = pi->proto->id;
1198 	reinit_completion(&xfer->done);
1199 
1200 	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1201 			      xfer->hdr.protocol_id, xfer->hdr.seq,
1202 			      xfer->hdr.poll_completion);
1203 
1204 	/* Clear any stale status */
1205 	xfer->hdr.status = SCMI_SUCCESS;
1206 	xfer->state = SCMI_XFER_SENT_OK;
1207 	/*
1208 	 * Even though spinlocking is not needed here since no race is possible
1209 	 * on xfer->state due to the monotonically increasing tokens allocation,
1210 	 * we must anyway ensure xfer->state initialization is not re-ordered
1211 	 * after the .send_message() to be sure that on the RX path an early
1212 	 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1213 	 */
1214 	smp_mb();
1215 
1216 	ret = info->desc->ops->send_message(cinfo, xfer);
1217 	if (ret < 0) {
1218 		dev_dbg(dev, "Failed to send message %d\n", ret);
1219 		return ret;
1220 	}
1221 
1222 	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1223 			    xfer->hdr.id, "CMND", xfer->hdr.seq,
1224 			    xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1225 
1226 	ret = scmi_wait_for_message_response(cinfo, xfer);
1227 	if (!ret && xfer->hdr.status)
1228 		ret = scmi_to_linux_errno(xfer->hdr.status);
1229 
1230 	if (info->desc->ops->mark_txdone)
1231 		info->desc->ops->mark_txdone(cinfo, ret, xfer);
1232 
1233 	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1234 			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
1235 
1236 	return ret;
1237 }
1238 
reset_rx_to_maxsz(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1239 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
1240 			      struct scmi_xfer *xfer)
1241 {
1242 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1243 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1244 
1245 	xfer->rx.len = info->desc->max_msg_size;
1246 }
1247 
1248 /**
1249  * do_xfer_with_response() - Do one transfer and wait until the delayed
1250  *	response is received
1251  *
1252  * @ph: Pointer to SCMI protocol handle
1253  * @xfer: Transfer to initiate and wait for response
1254  *
1255  * Using asynchronous commands in atomic/polling mode should be avoided since
1256  * it could cause long busy-waiting here, so ignore polling for the delayed
1257  * response and WARN if it was requested for this command transaction since
1258  * upper layers should refrain from issuing such kind of requests.
1259  *
1260  * The only other option would have been to refrain from using any asynchronous
1261  * command even if made available, when an atomic transport is detected, and
1262  * instead forcibly use the synchronous version (thing that can be easily
1263  * attained at the protocol layer), but this would also have led to longer
1264  * stalls of the channel for synchronous commands and possibly timeouts.
1265  * (in other words there is usually a good reason if a platform provides an
1266  *  asynchronous version of a command and we should prefer to use it...just not
1267  *  when using atomic/polling mode)
1268  *
1269  * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1270  *	return corresponding error, else if all goes well, return 0.
1271  */
do_xfer_with_response(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1272 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
1273 				 struct scmi_xfer *xfer)
1274 {
1275 	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
1276 	DECLARE_COMPLETION_ONSTACK(async_response);
1277 
1278 	xfer->async_done = &async_response;
1279 
1280 	/*
1281 	 * Delayed responses should not be polled, so an async command should
1282 	 * not have been used when requiring an atomic/poll context; WARN and
1283 	 * perform instead a sleeping wait.
1284 	 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1285 	 */
1286 	WARN_ON_ONCE(xfer->hdr.poll_completion);
1287 
1288 	ret = do_xfer(ph, xfer);
1289 	if (!ret) {
1290 		if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1291 			dev_err(ph->dev,
1292 				"timed out in delayed resp(caller: %pS)\n",
1293 				(void *)_RET_IP_);
1294 			ret = -ETIMEDOUT;
1295 		} else if (xfer->hdr.status) {
1296 			ret = scmi_to_linux_errno(xfer->hdr.status);
1297 		}
1298 	}
1299 
1300 	xfer->async_done = NULL;
1301 	return ret;
1302 }
1303 
1304 /**
1305  * xfer_get_init() - Allocate and initialise one message for transmit
1306  *
1307  * @ph: Pointer to SCMI protocol handle
1308  * @msg_id: Message identifier
1309  * @tx_size: transmit message size
1310  * @rx_size: receive message size
1311  * @p: pointer to the allocated and initialised message
1312  *
1313  * This function allocates the message using @scmi_xfer_get and
1314  * initialise the header.
1315  *
1316  * Return: 0 if all went fine with @p pointing to message, else
1317  *	corresponding error.
1318  */
xfer_get_init(const struct scmi_protocol_handle * ph,u8 msg_id,size_t tx_size,size_t rx_size,struct scmi_xfer ** p)1319 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1320 			 u8 msg_id, size_t tx_size, size_t rx_size,
1321 			 struct scmi_xfer **p)
1322 {
1323 	int ret;
1324 	struct scmi_xfer *xfer;
1325 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1326 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1327 	struct scmi_xfers_info *minfo = &info->tx_minfo;
1328 	struct device *dev = info->dev;
1329 
1330 	/* Ensure we have sane transfer sizes */
1331 	if (rx_size > info->desc->max_msg_size ||
1332 	    tx_size > info->desc->max_msg_size)
1333 		return -ERANGE;
1334 
1335 	xfer = scmi_xfer_get(pi->handle, minfo);
1336 	if (IS_ERR(xfer)) {
1337 		ret = PTR_ERR(xfer);
1338 		dev_err(dev, "failed to get free message slot(%d)\n", ret);
1339 		return ret;
1340 	}
1341 
1342 	/* Pick a sequence number and register this xfer as in-flight */
1343 	ret = scmi_xfer_pending_set(xfer, minfo);
1344 	if (ret) {
1345 		dev_err(pi->handle->dev,
1346 			"Failed to get monotonic token %d\n", ret);
1347 		__scmi_xfer_put(minfo, xfer);
1348 		return ret;
1349 	}
1350 
1351 	xfer->tx.len = tx_size;
1352 	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1353 	xfer->hdr.type = MSG_TYPE_COMMAND;
1354 	xfer->hdr.id = msg_id;
1355 	xfer->hdr.poll_completion = false;
1356 
1357 	*p = xfer;
1358 
1359 	return 0;
1360 }
1361 
1362 /**
1363  * version_get() - command to get the revision of the SCMI entity
1364  *
1365  * @ph: Pointer to SCMI protocol handle
1366  * @version: Holds returned version of protocol.
1367  *
1368  * Updates the SCMI information in the internal data structure.
1369  *
1370  * Return: 0 if all went fine, else return appropriate error.
1371  */
version_get(const struct scmi_protocol_handle * ph,u32 * version)1372 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1373 {
1374 	int ret;
1375 	__le32 *rev_info;
1376 	struct scmi_xfer *t;
1377 
1378 	ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1379 	if (ret)
1380 		return ret;
1381 
1382 	ret = do_xfer(ph, t);
1383 	if (!ret) {
1384 		rev_info = t->rx.buf;
1385 		*version = le32_to_cpu(*rev_info);
1386 	}
1387 
1388 	xfer_put(ph, t);
1389 	return ret;
1390 }
1391 
1392 /**
1393  * scmi_set_protocol_priv  - Set protocol specific data at init time
1394  *
1395  * @ph: A reference to the protocol handle.
1396  * @priv: The private data to set.
1397  * @version: The detected protocol version for the core to register.
1398  *
1399  * Return: 0 on Success
1400  */
scmi_set_protocol_priv(const struct scmi_protocol_handle * ph,void * priv,u32 version)1401 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1402 				  void *priv, u32 version)
1403 {
1404 	struct scmi_protocol_instance *pi = ph_to_pi(ph);
1405 
1406 	pi->priv = priv;
1407 	pi->version = version;
1408 
1409 	return 0;
1410 }
1411 
1412 /**
1413  * scmi_get_protocol_priv  - Set protocol specific data at init time
1414  *
1415  * @ph: A reference to the protocol handle.
1416  *
1417  * Return: Protocol private data if any was set.
1418  */
scmi_get_protocol_priv(const struct scmi_protocol_handle * ph)1419 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1420 {
1421 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1422 
1423 	return pi->priv;
1424 }
1425 
1426 static const struct scmi_xfer_ops xfer_ops = {
1427 	.version_get = version_get,
1428 	.xfer_get_init = xfer_get_init,
1429 	.reset_rx_to_maxsz = reset_rx_to_maxsz,
1430 	.do_xfer = do_xfer,
1431 	.do_xfer_with_response = do_xfer_with_response,
1432 	.xfer_put = xfer_put,
1433 };
1434 
1435 struct scmi_msg_resp_domain_name_get {
1436 	__le32 flags;
1437 	u8 name[SCMI_MAX_STR_SIZE];
1438 };
1439 
1440 /**
1441  * scmi_common_extended_name_get  - Common helper to get extended resources name
1442  * @ph: A protocol handle reference.
1443  * @cmd_id: The specific command ID to use.
1444  * @res_id: The specific resource ID to use.
1445  * @flags: A pointer to specific flags to use, if any.
1446  * @name: A pointer to the preallocated area where the retrieved name will be
1447  *	  stored as a NULL terminated string.
1448  * @len: The len in bytes of the @name char array.
1449  *
1450  * Return: 0 on Succcess
1451  */
scmi_common_extended_name_get(const struct scmi_protocol_handle * ph,u8 cmd_id,u32 res_id,u32 * flags,char * name,size_t len)1452 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1453 					 u8 cmd_id, u32 res_id, u32 *flags,
1454 					 char *name, size_t len)
1455 {
1456 	int ret;
1457 	size_t txlen;
1458 	struct scmi_xfer *t;
1459 	struct scmi_msg_resp_domain_name_get *resp;
1460 
1461 	txlen = !flags ? sizeof(res_id) : sizeof(res_id) + sizeof(*flags);
1462 	ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
1463 	if (ret)
1464 		goto out;
1465 
1466 	put_unaligned_le32(res_id, t->tx.buf);
1467 	if (flags)
1468 		put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
1469 	resp = t->rx.buf;
1470 
1471 	ret = ph->xops->do_xfer(ph, t);
1472 	if (!ret)
1473 		strscpy(name, resp->name, len);
1474 
1475 	ph->xops->xfer_put(ph, t);
1476 out:
1477 	if (ret)
1478 		dev_warn(ph->dev,
1479 			 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1480 			 res_id, ret, name);
1481 	return ret;
1482 }
1483 
1484 /**
1485  * scmi_common_get_max_msg_size  - Get maximum message size
1486  * @ph: A protocol handle reference.
1487  *
1488  * Return: Maximum message size for the current protocol.
1489  */
scmi_common_get_max_msg_size(const struct scmi_protocol_handle * ph)1490 static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
1491 {
1492 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1493 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1494 
1495 	return info->desc->max_msg_size;
1496 }
1497 
1498 /**
1499  * struct scmi_iterator  - Iterator descriptor
1500  * @msg: A reference to the message TX buffer; filled by @prepare_message with
1501  *	 a proper custom command payload for each multi-part command request.
1502  * @resp: A reference to the response RX buffer; used by @update_state and
1503  *	  @process_response to parse the multi-part replies.
1504  * @t: A reference to the underlying xfer initialized and used transparently by
1505  *     the iterator internal routines.
1506  * @ph: A reference to the associated protocol handle to be used.
1507  * @ops: A reference to the custom provided iterator operations.
1508  * @state: The current iterator state; used and updated in turn by the iterators
1509  *	   internal routines and by the caller-provided @scmi_iterator_ops.
1510  * @priv: A reference to optional private data as provided by the caller and
1511  *	  passed back to the @@scmi_iterator_ops.
1512  */
1513 struct scmi_iterator {
1514 	void *msg;
1515 	void *resp;
1516 	struct scmi_xfer *t;
1517 	const struct scmi_protocol_handle *ph;
1518 	struct scmi_iterator_ops *ops;
1519 	struct scmi_iterator_state state;
1520 	void *priv;
1521 };
1522 
scmi_iterator_init(const struct scmi_protocol_handle * ph,struct scmi_iterator_ops * ops,unsigned int max_resources,u8 msg_id,size_t tx_size,void * priv)1523 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1524 				struct scmi_iterator_ops *ops,
1525 				unsigned int max_resources, u8 msg_id,
1526 				size_t tx_size, void *priv)
1527 {
1528 	int ret;
1529 	struct scmi_iterator *i;
1530 
1531 	i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1532 	if (!i)
1533 		return ERR_PTR(-ENOMEM);
1534 
1535 	i->ph = ph;
1536 	i->ops = ops;
1537 	i->priv = priv;
1538 
1539 	ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1540 	if (ret) {
1541 		devm_kfree(ph->dev, i);
1542 		return ERR_PTR(ret);
1543 	}
1544 
1545 	i->state.max_resources = max_resources;
1546 	i->msg = i->t->tx.buf;
1547 	i->resp = i->t->rx.buf;
1548 
1549 	return i;
1550 }
1551 
scmi_iterator_run(void * iter)1552 static int scmi_iterator_run(void *iter)
1553 {
1554 	int ret = -EINVAL;
1555 	struct scmi_iterator_ops *iops;
1556 	const struct scmi_protocol_handle *ph;
1557 	struct scmi_iterator_state *st;
1558 	struct scmi_iterator *i = iter;
1559 
1560 	if (!i || !i->ops || !i->ph)
1561 		return ret;
1562 
1563 	iops = i->ops;
1564 	ph = i->ph;
1565 	st = &i->state;
1566 
1567 	do {
1568 		iops->prepare_message(i->msg, st->desc_index, i->priv);
1569 		ret = ph->xops->do_xfer(ph, i->t);
1570 		if (ret)
1571 			break;
1572 
1573 		st->rx_len = i->t->rx.len;
1574 		ret = iops->update_state(st, i->resp, i->priv);
1575 		if (ret)
1576 			break;
1577 
1578 		if (st->num_returned > st->max_resources - st->desc_index) {
1579 			dev_err(ph->dev,
1580 				"No. of resources can't exceed %d\n",
1581 				st->max_resources);
1582 			ret = -EINVAL;
1583 			break;
1584 		}
1585 
1586 		for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1587 		     st->loop_idx++) {
1588 			ret = iops->process_response(ph, i->resp, st, i->priv);
1589 			if (ret)
1590 				goto out;
1591 		}
1592 
1593 		st->desc_index += st->num_returned;
1594 		ph->xops->reset_rx_to_maxsz(ph, i->t);
1595 		/*
1596 		 * check for both returned and remaining to avoid infinite
1597 		 * loop due to buggy firmware
1598 		 */
1599 	} while (st->num_returned && st->num_remaining);
1600 
1601 out:
1602 	/* Finalize and destroy iterator */
1603 	ph->xops->xfer_put(ph, i->t);
1604 	devm_kfree(ph->dev, i);
1605 
1606 	return ret;
1607 }
1608 
1609 struct scmi_msg_get_fc_info {
1610 	__le32 domain;
1611 	__le32 message_id;
1612 };
1613 
1614 struct scmi_msg_resp_desc_fc {
1615 	__le32 attr;
1616 #define SUPPORTS_DOORBELL(x)		((x) & BIT(0))
1617 #define DOORBELL_REG_WIDTH(x)		FIELD_GET(GENMASK(2, 1), (x))
1618 	__le32 rate_limit;
1619 	__le32 chan_addr_low;
1620 	__le32 chan_addr_high;
1621 	__le32 chan_size;
1622 	__le32 db_addr_low;
1623 	__le32 db_addr_high;
1624 	__le32 db_set_lmask;
1625 	__le32 db_set_hmask;
1626 	__le32 db_preserve_lmask;
1627 	__le32 db_preserve_hmask;
1628 };
1629 
1630 static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle * ph,u8 describe_id,u32 message_id,u32 valid_size,u32 domain,void __iomem ** p_addr,struct scmi_fc_db_info ** p_db)1631 scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1632 			     u8 describe_id, u32 message_id, u32 valid_size,
1633 			     u32 domain, void __iomem **p_addr,
1634 			     struct scmi_fc_db_info **p_db)
1635 {
1636 	int ret;
1637 	u32 flags;
1638 	u64 phys_addr;
1639 	u8 size;
1640 	void __iomem *addr;
1641 	struct scmi_xfer *t;
1642 	struct scmi_fc_db_info *db = NULL;
1643 	struct scmi_msg_get_fc_info *info;
1644 	struct scmi_msg_resp_desc_fc *resp;
1645 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1646 
1647 	if (!p_addr) {
1648 		ret = -EINVAL;
1649 		goto err_out;
1650 	}
1651 
1652 	ret = ph->xops->xfer_get_init(ph, describe_id,
1653 				      sizeof(*info), sizeof(*resp), &t);
1654 	if (ret)
1655 		goto err_out;
1656 
1657 	info = t->tx.buf;
1658 	info->domain = cpu_to_le32(domain);
1659 	info->message_id = cpu_to_le32(message_id);
1660 
1661 	/*
1662 	 * Bail out on error leaving fc_info addresses zeroed; this includes
1663 	 * the case in which the requested domain/message_id does NOT support
1664 	 * fastchannels at all.
1665 	 */
1666 	ret = ph->xops->do_xfer(ph, t);
1667 	if (ret)
1668 		goto err_xfer;
1669 
1670 	resp = t->rx.buf;
1671 	flags = le32_to_cpu(resp->attr);
1672 	size = le32_to_cpu(resp->chan_size);
1673 	if (size != valid_size) {
1674 		ret = -EINVAL;
1675 		goto err_xfer;
1676 	}
1677 
1678 	phys_addr = le32_to_cpu(resp->chan_addr_low);
1679 	phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1680 	addr = devm_ioremap(ph->dev, phys_addr, size);
1681 	if (!addr) {
1682 		ret = -EADDRNOTAVAIL;
1683 		goto err_xfer;
1684 	}
1685 
1686 	*p_addr = addr;
1687 
1688 	if (p_db && SUPPORTS_DOORBELL(flags)) {
1689 		db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1690 		if (!db) {
1691 			ret = -ENOMEM;
1692 			goto err_db;
1693 		}
1694 
1695 		size = 1 << DOORBELL_REG_WIDTH(flags);
1696 		phys_addr = le32_to_cpu(resp->db_addr_low);
1697 		phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1698 		addr = devm_ioremap(ph->dev, phys_addr, size);
1699 		if (!addr) {
1700 			ret = -EADDRNOTAVAIL;
1701 			goto err_db_mem;
1702 		}
1703 
1704 		db->addr = addr;
1705 		db->width = size;
1706 		db->set = le32_to_cpu(resp->db_set_lmask);
1707 		db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1708 		db->mask = le32_to_cpu(resp->db_preserve_lmask);
1709 		db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1710 
1711 		*p_db = db;
1712 	}
1713 
1714 	ph->xops->xfer_put(ph, t);
1715 
1716 	dev_dbg(ph->dev,
1717 		"Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
1718 		pi->proto->id, message_id, domain);
1719 
1720 	return;
1721 
1722 err_db_mem:
1723 	devm_kfree(ph->dev, db);
1724 
1725 err_db:
1726 	*p_addr = NULL;
1727 
1728 err_xfer:
1729 	ph->xops->xfer_put(ph, t);
1730 
1731 err_out:
1732 	dev_warn(ph->dev,
1733 		 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
1734 		 pi->proto->id, message_id, domain, ret);
1735 }
1736 
1737 #define SCMI_PROTO_FC_RING_DB(w)			\
1738 do {							\
1739 	u##w val = 0;					\
1740 							\
1741 	if (db->mask)					\
1742 		val = ioread##w(db->addr) & db->mask;	\
1743 	iowrite##w((u##w)db->set | val, db->addr);	\
1744 } while (0)
1745 
scmi_common_fastchannel_db_ring(struct scmi_fc_db_info * db)1746 static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
1747 {
1748 	if (!db || !db->addr)
1749 		return;
1750 
1751 	if (db->width == 1)
1752 		SCMI_PROTO_FC_RING_DB(8);
1753 	else if (db->width == 2)
1754 		SCMI_PROTO_FC_RING_DB(16);
1755 	else if (db->width == 4)
1756 		SCMI_PROTO_FC_RING_DB(32);
1757 	else /* db->width == 8 */
1758 #ifdef CONFIG_64BIT
1759 		SCMI_PROTO_FC_RING_DB(64);
1760 #else
1761 	{
1762 		u64 val = 0;
1763 
1764 		if (db->mask)
1765 			val = ioread64_hi_lo(db->addr) & db->mask;
1766 		iowrite64_hi_lo(db->set | val, db->addr);
1767 	}
1768 #endif
1769 }
1770 
1771 static const struct scmi_proto_helpers_ops helpers_ops = {
1772 	.extended_name_get = scmi_common_extended_name_get,
1773 	.get_max_msg_size = scmi_common_get_max_msg_size,
1774 	.iter_response_init = scmi_iterator_init,
1775 	.iter_response_run = scmi_iterator_run,
1776 	.fastchannel_init = scmi_common_fastchannel_init,
1777 	.fastchannel_db_ring = scmi_common_fastchannel_db_ring,
1778 };
1779 
1780 /**
1781  * scmi_revision_area_get  - Retrieve version memory area.
1782  *
1783  * @ph: A reference to the protocol handle.
1784  *
1785  * A helper to grab the version memory area reference during SCMI Base protocol
1786  * initialization.
1787  *
1788  * Return: A reference to the version memory area associated to the SCMI
1789  *	   instance underlying this protocol handle.
1790  */
1791 struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle * ph)1792 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1793 {
1794 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1795 
1796 	return pi->handle->version;
1797 }
1798 
1799 /**
1800  * scmi_alloc_init_protocol_instance  - Allocate and initialize a protocol
1801  * instance descriptor.
1802  * @info: The reference to the related SCMI instance.
1803  * @proto: The protocol descriptor.
1804  *
1805  * Allocate a new protocol instance descriptor, using the provided @proto
1806  * description, against the specified SCMI instance @info, and initialize it;
1807  * all resources management is handled via a dedicated per-protocol devres
1808  * group.
1809  *
1810  * Context: Assumes to be called with @protocols_mtx already acquired.
1811  * Return: A reference to a freshly allocated and initialized protocol instance
1812  *	   or ERR_PTR on failure. On failure the @proto reference is at first
1813  *	   put using @scmi_protocol_put() before releasing all the devres group.
1814  */
1815 static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance(struct scmi_info * info,const struct scmi_protocol * proto)1816 scmi_alloc_init_protocol_instance(struct scmi_info *info,
1817 				  const struct scmi_protocol *proto)
1818 {
1819 	int ret = -ENOMEM;
1820 	void *gid;
1821 	struct scmi_protocol_instance *pi;
1822 	const struct scmi_handle *handle = &info->handle;
1823 
1824 	/* Protocol specific devres group */
1825 	gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1826 	if (!gid) {
1827 		scmi_protocol_put(proto->id);
1828 		goto out;
1829 	}
1830 
1831 	pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1832 	if (!pi)
1833 		goto clean;
1834 
1835 	pi->gid = gid;
1836 	pi->proto = proto;
1837 	pi->handle = handle;
1838 	pi->ph.dev = handle->dev;
1839 	pi->ph.xops = &xfer_ops;
1840 	pi->ph.hops = &helpers_ops;
1841 	pi->ph.set_priv = scmi_set_protocol_priv;
1842 	pi->ph.get_priv = scmi_get_protocol_priv;
1843 	refcount_set(&pi->users, 1);
1844 	/* proto->init is assured NON NULL by scmi_protocol_register */
1845 	ret = pi->proto->instance_init(&pi->ph);
1846 	if (ret)
1847 		goto clean;
1848 
1849 	ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1850 			GFP_KERNEL);
1851 	if (ret != proto->id)
1852 		goto clean;
1853 
1854 	/*
1855 	 * Warn but ignore events registration errors since we do not want
1856 	 * to skip whole protocols if their notifications are messed up.
1857 	 */
1858 	if (pi->proto->events) {
1859 		ret = scmi_register_protocol_events(handle, pi->proto->id,
1860 						    &pi->ph,
1861 						    pi->proto->events);
1862 		if (ret)
1863 			dev_warn(handle->dev,
1864 				 "Protocol:%X - Events Registration Failed - err:%d\n",
1865 				 pi->proto->id, ret);
1866 	}
1867 
1868 	devres_close_group(handle->dev, pi->gid);
1869 	dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1870 
1871 	if (pi->version > proto->supported_version)
1872 		dev_warn(handle->dev,
1873 			 "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X."
1874 			 "Backward compatibility is NOT assured.\n",
1875 			 pi->version, pi->proto->id);
1876 
1877 	return pi;
1878 
1879 clean:
1880 	/* Take care to put the protocol module's owner before releasing all */
1881 	scmi_protocol_put(proto->id);
1882 	devres_release_group(handle->dev, gid);
1883 out:
1884 	return ERR_PTR(ret);
1885 }
1886 
1887 /**
1888  * scmi_get_protocol_instance  - Protocol initialization helper.
1889  * @handle: A reference to the SCMI platform instance.
1890  * @protocol_id: The protocol being requested.
1891  *
1892  * In case the required protocol has never been requested before for this
1893  * instance, allocate and initialize all the needed structures while handling
1894  * resource allocation with a dedicated per-protocol devres subgroup.
1895  *
1896  * Return: A reference to an initialized protocol instance or error on failure:
1897  *	   in particular returns -EPROBE_DEFER when the desired protocol could
1898  *	   NOT be found.
1899  */
1900 static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance(const struct scmi_handle * handle,u8 protocol_id)1901 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1902 {
1903 	struct scmi_protocol_instance *pi;
1904 	struct scmi_info *info = handle_to_scmi_info(handle);
1905 
1906 	mutex_lock(&info->protocols_mtx);
1907 	pi = idr_find(&info->protocols, protocol_id);
1908 
1909 	if (pi) {
1910 		refcount_inc(&pi->users);
1911 	} else {
1912 		const struct scmi_protocol *proto;
1913 
1914 		/* Fails if protocol not registered on bus */
1915 		proto = scmi_protocol_get(protocol_id);
1916 		if (proto)
1917 			pi = scmi_alloc_init_protocol_instance(info, proto);
1918 		else
1919 			pi = ERR_PTR(-EPROBE_DEFER);
1920 	}
1921 	mutex_unlock(&info->protocols_mtx);
1922 
1923 	return pi;
1924 }
1925 
1926 /**
1927  * scmi_protocol_acquire  - Protocol acquire
1928  * @handle: A reference to the SCMI platform instance.
1929  * @protocol_id: The protocol being requested.
1930  *
1931  * Register a new user for the requested protocol on the specified SCMI
1932  * platform instance, possibly triggering its initialization on first user.
1933  *
1934  * Return: 0 if protocol was acquired successfully.
1935  */
scmi_protocol_acquire(const struct scmi_handle * handle,u8 protocol_id)1936 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1937 {
1938 	return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1939 }
1940 
1941 /**
1942  * scmi_protocol_release  - Protocol de-initialization helper.
1943  * @handle: A reference to the SCMI platform instance.
1944  * @protocol_id: The protocol being requested.
1945  *
1946  * Remove one user for the specified protocol and triggers de-initialization
1947  * and resources de-allocation once the last user has gone.
1948  */
scmi_protocol_release(const struct scmi_handle * handle,u8 protocol_id)1949 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1950 {
1951 	struct scmi_info *info = handle_to_scmi_info(handle);
1952 	struct scmi_protocol_instance *pi;
1953 
1954 	mutex_lock(&info->protocols_mtx);
1955 	pi = idr_find(&info->protocols, protocol_id);
1956 	if (WARN_ON(!pi))
1957 		goto out;
1958 
1959 	if (refcount_dec_and_test(&pi->users)) {
1960 		void *gid = pi->gid;
1961 
1962 		if (pi->proto->events)
1963 			scmi_deregister_protocol_events(handle, protocol_id);
1964 
1965 		if (pi->proto->instance_deinit)
1966 			pi->proto->instance_deinit(&pi->ph);
1967 
1968 		idr_remove(&info->protocols, protocol_id);
1969 
1970 		scmi_protocol_put(protocol_id);
1971 
1972 		devres_release_group(handle->dev, gid);
1973 		dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1974 			protocol_id);
1975 	}
1976 
1977 out:
1978 	mutex_unlock(&info->protocols_mtx);
1979 }
1980 
scmi_setup_protocol_implemented(const struct scmi_protocol_handle * ph,u8 * prot_imp)1981 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1982 				     u8 *prot_imp)
1983 {
1984 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1985 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1986 
1987 	info->protocols_imp = prot_imp;
1988 }
1989 
1990 static bool
scmi_is_protocol_implemented(const struct scmi_handle * handle,u8 prot_id)1991 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1992 {
1993 	int i;
1994 	struct scmi_info *info = handle_to_scmi_info(handle);
1995 	struct scmi_revision_info *rev = handle->version;
1996 
1997 	if (!info->protocols_imp)
1998 		return false;
1999 
2000 	for (i = 0; i < rev->num_protocols; i++)
2001 		if (info->protocols_imp[i] == prot_id)
2002 			return true;
2003 	return false;
2004 }
2005 
2006 struct scmi_protocol_devres {
2007 	const struct scmi_handle *handle;
2008 	u8 protocol_id;
2009 };
2010 
scmi_devm_release_protocol(struct device * dev,void * res)2011 static void scmi_devm_release_protocol(struct device *dev, void *res)
2012 {
2013 	struct scmi_protocol_devres *dres = res;
2014 
2015 	scmi_protocol_release(dres->handle, dres->protocol_id);
2016 }
2017 
2018 static struct scmi_protocol_instance __must_check *
scmi_devres_protocol_instance_get(struct scmi_device * sdev,u8 protocol_id)2019 scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
2020 {
2021 	struct scmi_protocol_instance *pi;
2022 	struct scmi_protocol_devres *dres;
2023 
2024 	dres = devres_alloc(scmi_devm_release_protocol,
2025 			    sizeof(*dres), GFP_KERNEL);
2026 	if (!dres)
2027 		return ERR_PTR(-ENOMEM);
2028 
2029 	pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2030 	if (IS_ERR(pi)) {
2031 		devres_free(dres);
2032 		return pi;
2033 	}
2034 
2035 	dres->handle = sdev->handle;
2036 	dres->protocol_id = protocol_id;
2037 	devres_add(&sdev->dev, dres);
2038 
2039 	return pi;
2040 }
2041 
2042 /**
2043  * scmi_devm_protocol_get  - Devres managed get protocol operations and handle
2044  * @sdev: A reference to an scmi_device whose embedded struct device is to
2045  *	  be used for devres accounting.
2046  * @protocol_id: The protocol being requested.
2047  * @ph: A pointer reference used to pass back the associated protocol handle.
2048  *
2049  * Get hold of a protocol accounting for its usage, eventually triggering its
2050  * initialization, and returning the protocol specific operations and related
2051  * protocol handle which will be used as first argument in most of the
2052  * protocols operations methods.
2053  * Being a devres based managed method, protocol hold will be automatically
2054  * released, and possibly de-initialized on last user, once the SCMI driver
2055  * owning the scmi_device is unbound from it.
2056  *
2057  * Return: A reference to the requested protocol operations or error.
2058  *	   Must be checked for errors by caller.
2059  */
2060 static const void __must_check *
scmi_devm_protocol_get(struct scmi_device * sdev,u8 protocol_id,struct scmi_protocol_handle ** ph)2061 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
2062 		       struct scmi_protocol_handle **ph)
2063 {
2064 	struct scmi_protocol_instance *pi;
2065 
2066 	if (!ph)
2067 		return ERR_PTR(-EINVAL);
2068 
2069 	pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2070 	if (IS_ERR(pi))
2071 		return pi;
2072 
2073 	*ph = &pi->ph;
2074 
2075 	return pi->proto->ops;
2076 }
2077 
2078 /**
2079  * scmi_devm_protocol_acquire  - Devres managed helper to get hold of a protocol
2080  * @sdev: A reference to an scmi_device whose embedded struct device is to
2081  *	  be used for devres accounting.
2082  * @protocol_id: The protocol being requested.
2083  *
2084  * Get hold of a protocol accounting for its usage, possibly triggering its
2085  * initialization but without getting access to its protocol specific operations
2086  * and handle.
2087  *
2088  * Being a devres based managed method, protocol hold will be automatically
2089  * released, and possibly de-initialized on last user, once the SCMI driver
2090  * owning the scmi_device is unbound from it.
2091  *
2092  * Return: 0 on SUCCESS
2093  */
scmi_devm_protocol_acquire(struct scmi_device * sdev,u8 protocol_id)2094 static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
2095 						   u8 protocol_id)
2096 {
2097 	struct scmi_protocol_instance *pi;
2098 
2099 	pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2100 	if (IS_ERR(pi))
2101 		return PTR_ERR(pi);
2102 
2103 	return 0;
2104 }
2105 
scmi_devm_protocol_match(struct device * dev,void * res,void * data)2106 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
2107 {
2108 	struct scmi_protocol_devres *dres = res;
2109 
2110 	if (WARN_ON(!dres || !data))
2111 		return 0;
2112 
2113 	return dres->protocol_id == *((u8 *)data);
2114 }
2115 
2116 /**
2117  * scmi_devm_protocol_put  - Devres managed put protocol operations and handle
2118  * @sdev: A reference to an scmi_device whose embedded struct device is to
2119  *	  be used for devres accounting.
2120  * @protocol_id: The protocol being requested.
2121  *
2122  * Explicitly release a protocol hold previously obtained calling the above
2123  * @scmi_devm_protocol_get.
2124  */
scmi_devm_protocol_put(struct scmi_device * sdev,u8 protocol_id)2125 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
2126 {
2127 	int ret;
2128 
2129 	ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2130 			     scmi_devm_protocol_match, &protocol_id);
2131 	WARN_ON(ret);
2132 }
2133 
2134 /**
2135  * scmi_is_transport_atomic  - Method to check if underlying transport for an
2136  * SCMI instance is configured as atomic.
2137  *
2138  * @handle: A reference to the SCMI platform instance.
2139  * @atomic_threshold: An optional return value for the system wide currently
2140  *		      configured threshold for atomic operations.
2141  *
2142  * Return: True if transport is configured as atomic
2143  */
scmi_is_transport_atomic(const struct scmi_handle * handle,unsigned int * atomic_threshold)2144 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
2145 				     unsigned int *atomic_threshold)
2146 {
2147 	bool ret;
2148 	struct scmi_info *info = handle_to_scmi_info(handle);
2149 
2150 	ret = info->desc->atomic_enabled &&
2151 		is_transport_polling_capable(info->desc);
2152 	if (ret && atomic_threshold)
2153 		*atomic_threshold = info->atomic_threshold;
2154 
2155 	return ret;
2156 }
2157 
2158 /**
2159  * scmi_handle_get() - Get the SCMI handle for a device
2160  *
2161  * @dev: pointer to device for which we want SCMI handle
2162  *
2163  * NOTE: The function does not track individual clients of the framework
2164  * and is expected to be maintained by caller of SCMI protocol library.
2165  * scmi_handle_put must be balanced with successful scmi_handle_get
2166  *
2167  * Return: pointer to handle if successful, NULL on error
2168  */
scmi_handle_get(struct device * dev)2169 static struct scmi_handle *scmi_handle_get(struct device *dev)
2170 {
2171 	struct list_head *p;
2172 	struct scmi_info *info;
2173 	struct scmi_handle *handle = NULL;
2174 
2175 	mutex_lock(&scmi_list_mutex);
2176 	list_for_each(p, &scmi_list) {
2177 		info = list_entry(p, struct scmi_info, node);
2178 		if (dev->parent == info->dev) {
2179 			info->users++;
2180 			handle = &info->handle;
2181 			break;
2182 		}
2183 	}
2184 	mutex_unlock(&scmi_list_mutex);
2185 
2186 	return handle;
2187 }
2188 
2189 /**
2190  * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2191  *
2192  * @handle: handle acquired by scmi_handle_get
2193  *
2194  * NOTE: The function does not track individual clients of the framework
2195  * and is expected to be maintained by caller of SCMI protocol library.
2196  * scmi_handle_put must be balanced with successful scmi_handle_get
2197  *
2198  * Return: 0 is successfully released
2199  *	if null was passed, it returns -EINVAL;
2200  */
scmi_handle_put(const struct scmi_handle * handle)2201 static int scmi_handle_put(const struct scmi_handle *handle)
2202 {
2203 	struct scmi_info *info;
2204 
2205 	if (!handle)
2206 		return -EINVAL;
2207 
2208 	info = handle_to_scmi_info(handle);
2209 	mutex_lock(&scmi_list_mutex);
2210 	if (!WARN_ON(!info->users))
2211 		info->users--;
2212 	mutex_unlock(&scmi_list_mutex);
2213 
2214 	return 0;
2215 }
2216 
scmi_device_link_add(struct device * consumer,struct device * supplier)2217 static void scmi_device_link_add(struct device *consumer,
2218 				 struct device *supplier)
2219 {
2220 	struct device_link *link;
2221 
2222 	link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
2223 
2224 	WARN_ON(!link);
2225 }
2226 
scmi_set_handle(struct scmi_device * scmi_dev)2227 static void scmi_set_handle(struct scmi_device *scmi_dev)
2228 {
2229 	scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2230 	if (scmi_dev->handle)
2231 		scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2232 }
2233 
__scmi_xfer_info_init(struct scmi_info * sinfo,struct scmi_xfers_info * info)2234 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
2235 				 struct scmi_xfers_info *info)
2236 {
2237 	int i;
2238 	struct scmi_xfer *xfer;
2239 	struct device *dev = sinfo->dev;
2240 	const struct scmi_desc *desc = sinfo->desc;
2241 
2242 	/* Pre-allocated messages, no more than what hdr.seq can support */
2243 	if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2244 		dev_err(dev,
2245 			"Invalid maximum messages %d, not in range [1 - %lu]\n",
2246 			info->max_msg, MSG_TOKEN_MAX);
2247 		return -EINVAL;
2248 	}
2249 
2250 	hash_init(info->pending_xfers);
2251 
2252 	/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2253 	info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2254 						    GFP_KERNEL);
2255 	if (!info->xfer_alloc_table)
2256 		return -ENOMEM;
2257 
2258 	/*
2259 	 * Preallocate a number of xfers equal to max inflight messages,
2260 	 * pre-initialize the buffer pointer to pre-allocated buffers and
2261 	 * attach all of them to the free list
2262 	 */
2263 	INIT_HLIST_HEAD(&info->free_xfers);
2264 	for (i = 0; i < info->max_msg; i++) {
2265 		xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2266 		if (!xfer)
2267 			return -ENOMEM;
2268 
2269 		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2270 					    GFP_KERNEL);
2271 		if (!xfer->rx.buf)
2272 			return -ENOMEM;
2273 
2274 		xfer->tx.buf = xfer->rx.buf;
2275 		init_completion(&xfer->done);
2276 		spin_lock_init(&xfer->lock);
2277 
2278 		/* Add initialized xfer to the free list */
2279 		hlist_add_head(&xfer->node, &info->free_xfers);
2280 	}
2281 
2282 	spin_lock_init(&info->xfer_lock);
2283 
2284 	return 0;
2285 }
2286 
scmi_channels_max_msg_configure(struct scmi_info * sinfo)2287 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
2288 {
2289 	const struct scmi_desc *desc = sinfo->desc;
2290 
2291 	if (!desc->ops->get_max_msg) {
2292 		sinfo->tx_minfo.max_msg = desc->max_msg;
2293 		sinfo->rx_minfo.max_msg = desc->max_msg;
2294 	} else {
2295 		struct scmi_chan_info *base_cinfo;
2296 
2297 		base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2298 		if (!base_cinfo)
2299 			return -EINVAL;
2300 		sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2301 
2302 		/* RX channel is optional so can be skipped */
2303 		base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2304 		if (base_cinfo)
2305 			sinfo->rx_minfo.max_msg =
2306 				desc->ops->get_max_msg(base_cinfo);
2307 	}
2308 
2309 	return 0;
2310 }
2311 
scmi_xfer_info_init(struct scmi_info * sinfo)2312 static int scmi_xfer_info_init(struct scmi_info *sinfo)
2313 {
2314 	int ret;
2315 
2316 	ret = scmi_channels_max_msg_configure(sinfo);
2317 	if (ret)
2318 		return ret;
2319 
2320 	ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2321 	if (!ret && !idr_is_empty(&sinfo->rx_idr))
2322 		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2323 
2324 	return ret;
2325 }
2326 
scmi_chan_setup(struct scmi_info * info,struct device_node * of_node,int prot_id,bool tx)2327 static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
2328 			   int prot_id, bool tx)
2329 {
2330 	int ret, idx;
2331 	char name[32];
2332 	struct scmi_chan_info *cinfo;
2333 	struct idr *idr;
2334 	struct scmi_device *tdev = NULL;
2335 
2336 	/* Transmit channel is first entry i.e. index 0 */
2337 	idx = tx ? 0 : 1;
2338 	idr = tx ? &info->tx_idr : &info->rx_idr;
2339 
2340 	if (!info->desc->ops->chan_available(of_node, idx)) {
2341 		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2342 		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2343 			return -EINVAL;
2344 		goto idr_alloc;
2345 	}
2346 
2347 	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2348 	if (!cinfo)
2349 		return -ENOMEM;
2350 
2351 	cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2352 
2353 	/* Create a unique name for this transport device */
2354 	snprintf(name, 32, "__scmi_transport_device_%s_%02X",
2355 		 idx ? "rx" : "tx", prot_id);
2356 	/* Create a uniquely named, dedicated transport device for this chan */
2357 	tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2358 	if (!tdev) {
2359 		dev_err(info->dev,
2360 			"failed to create transport device (%s)\n", name);
2361 		devm_kfree(info->dev, cinfo);
2362 		return -EINVAL;
2363 	}
2364 	of_node_get(of_node);
2365 
2366 	cinfo->id = prot_id;
2367 	cinfo->dev = &tdev->dev;
2368 	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2369 	if (ret) {
2370 		of_node_put(of_node);
2371 		scmi_device_destroy(info->dev, prot_id, name);
2372 		devm_kfree(info->dev, cinfo);
2373 		return ret;
2374 	}
2375 
2376 	if (tx && is_polling_required(cinfo, info->desc)) {
2377 		if (is_transport_polling_capable(info->desc))
2378 			dev_info(&tdev->dev,
2379 				 "Enabled polling mode TX channel - prot_id:%d\n",
2380 				 prot_id);
2381 		else
2382 			dev_warn(&tdev->dev,
2383 				 "Polling mode NOT supported by transport.\n");
2384 	}
2385 
2386 idr_alloc:
2387 	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2388 	if (ret != prot_id) {
2389 		dev_err(info->dev,
2390 			"unable to allocate SCMI idr slot err %d\n", ret);
2391 		/* Destroy channel and device only if created by this call. */
2392 		if (tdev) {
2393 			of_node_put(of_node);
2394 			scmi_device_destroy(info->dev, prot_id, name);
2395 			devm_kfree(info->dev, cinfo);
2396 		}
2397 		return ret;
2398 	}
2399 
2400 	cinfo->handle = &info->handle;
2401 	return 0;
2402 }
2403 
2404 static inline int
scmi_txrx_setup(struct scmi_info * info,struct device_node * of_node,int prot_id)2405 scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
2406 		int prot_id)
2407 {
2408 	int ret = scmi_chan_setup(info, of_node, prot_id, true);
2409 
2410 	if (!ret) {
2411 		/* Rx is optional, report only memory errors */
2412 		ret = scmi_chan_setup(info, of_node, prot_id, false);
2413 		if (ret && ret != -ENOMEM)
2414 			ret = 0;
2415 	}
2416 
2417 	return ret;
2418 }
2419 
2420 /**
2421  * scmi_channels_setup  - Helper to initialize all required channels
2422  *
2423  * @info: The SCMI instance descriptor.
2424  *
2425  * Initialize all the channels found described in the DT against the underlying
2426  * configured transport using custom defined dedicated devices instead of
2427  * borrowing devices from the SCMI drivers; this way channels are initialized
2428  * upfront during core SCMI stack probing and are no more coupled with SCMI
2429  * devices used by SCMI drivers.
2430  *
2431  * Note that, even though a pair of TX/RX channels is associated to each
2432  * protocol defined in the DT, a distinct freshly initialized channel is
2433  * created only if the DT node for the protocol at hand describes a dedicated
2434  * channel: in all the other cases the common BASE protocol channel is reused.
2435  *
2436  * Return: 0 on Success
2437  */
scmi_channels_setup(struct scmi_info * info)2438 static int scmi_channels_setup(struct scmi_info *info)
2439 {
2440 	int ret;
2441 	struct device_node *child, *top_np = info->dev->of_node;
2442 
2443 	/* Initialize a common generic channel at first */
2444 	ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
2445 	if (ret)
2446 		return ret;
2447 
2448 	for_each_available_child_of_node(top_np, child) {
2449 		u32 prot_id;
2450 
2451 		if (of_property_read_u32(child, "reg", &prot_id))
2452 			continue;
2453 
2454 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2455 			dev_err(info->dev,
2456 				"Out of range protocol %d\n", prot_id);
2457 
2458 		ret = scmi_txrx_setup(info, child, prot_id);
2459 		if (ret) {
2460 			of_node_put(child);
2461 			return ret;
2462 		}
2463 	}
2464 
2465 	return 0;
2466 }
2467 
scmi_chan_destroy(int id,void * p,void * idr)2468 static int scmi_chan_destroy(int id, void *p, void *idr)
2469 {
2470 	struct scmi_chan_info *cinfo = p;
2471 
2472 	if (cinfo->dev) {
2473 		struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2474 		struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2475 
2476 		of_node_put(cinfo->dev->of_node);
2477 		scmi_device_destroy(info->dev, id, sdev->name);
2478 		cinfo->dev = NULL;
2479 	}
2480 
2481 	idr_remove(idr, id);
2482 
2483 	return 0;
2484 }
2485 
scmi_cleanup_channels(struct scmi_info * info,struct idr * idr)2486 static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
2487 {
2488 	/* At first free all channels at the transport layer ... */
2489 	idr_for_each(idr, info->desc->ops->chan_free, idr);
2490 
2491 	/* ...then destroy all underlying devices */
2492 	idr_for_each(idr, scmi_chan_destroy, idr);
2493 
2494 	idr_destroy(idr);
2495 }
2496 
scmi_cleanup_txrx_channels(struct scmi_info * info)2497 static void scmi_cleanup_txrx_channels(struct scmi_info *info)
2498 {
2499 	scmi_cleanup_channels(info, &info->tx_idr);
2500 
2501 	scmi_cleanup_channels(info, &info->rx_idr);
2502 }
2503 
scmi_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)2504 static int scmi_bus_notifier(struct notifier_block *nb,
2505 			     unsigned long action, void *data)
2506 {
2507 	struct scmi_info *info = bus_nb_to_scmi_info(nb);
2508 	struct scmi_device *sdev = to_scmi_dev(data);
2509 
2510 	/* Skip transport devices and devices of different SCMI instances */
2511 	if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
2512 	    sdev->dev.parent != info->dev)
2513 		return NOTIFY_DONE;
2514 
2515 	switch (action) {
2516 	case BUS_NOTIFY_BIND_DRIVER:
2517 		/* setup handle now as the transport is ready */
2518 		scmi_set_handle(sdev);
2519 		break;
2520 	case BUS_NOTIFY_UNBOUND_DRIVER:
2521 		scmi_handle_put(sdev->handle);
2522 		sdev->handle = NULL;
2523 		break;
2524 	default:
2525 		return NOTIFY_DONE;
2526 	}
2527 
2528 	dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2529 		sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2530 		"about to be BOUND." : "UNBOUND.");
2531 
2532 	return NOTIFY_OK;
2533 }
2534 
scmi_device_request_notifier(struct notifier_block * nb,unsigned long action,void * data)2535 static int scmi_device_request_notifier(struct notifier_block *nb,
2536 					unsigned long action, void *data)
2537 {
2538 	struct device_node *np;
2539 	struct scmi_device_id *id_table = data;
2540 	struct scmi_info *info = req_nb_to_scmi_info(nb);
2541 
2542 	np = idr_find(&info->active_protocols, id_table->protocol_id);
2543 	if (!np)
2544 		return NOTIFY_DONE;
2545 
2546 	dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2547 		action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2548 		id_table->name, id_table->protocol_id);
2549 
2550 	switch (action) {
2551 	case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
2552 		scmi_create_protocol_devices(np, info, id_table->protocol_id,
2553 					     id_table->name);
2554 		break;
2555 	case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
2556 		scmi_destroy_protocol_devices(info, id_table->protocol_id,
2557 					      id_table->name);
2558 		break;
2559 	default:
2560 		return NOTIFY_DONE;
2561 	}
2562 
2563 	return NOTIFY_OK;
2564 }
2565 
scmi_debugfs_common_cleanup(void * d)2566 static void scmi_debugfs_common_cleanup(void *d)
2567 {
2568 	struct scmi_debug_info *dbg = d;
2569 
2570 	if (!dbg)
2571 		return;
2572 
2573 	debugfs_remove_recursive(dbg->top_dentry);
2574 	kfree(dbg->name);
2575 	kfree(dbg->type);
2576 }
2577 
scmi_debugfs_common_setup(struct scmi_info * info)2578 static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
2579 {
2580 	char top_dir[16];
2581 	struct dentry *trans, *top_dentry;
2582 	struct scmi_debug_info *dbg;
2583 	const char *c_ptr = NULL;
2584 
2585 	dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
2586 	if (!dbg)
2587 		return NULL;
2588 
2589 	dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
2590 	if (!dbg->name) {
2591 		devm_kfree(info->dev, dbg);
2592 		return NULL;
2593 	}
2594 
2595 	of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
2596 	dbg->type = kstrdup(c_ptr, GFP_KERNEL);
2597 	if (!dbg->type) {
2598 		kfree(dbg->name);
2599 		devm_kfree(info->dev, dbg);
2600 		return NULL;
2601 	}
2602 
2603 	snprintf(top_dir, 16, "%d", info->id);
2604 	top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
2605 	trans = debugfs_create_dir("transport", top_dentry);
2606 
2607 	dbg->is_atomic = info->desc->atomic_enabled &&
2608 				is_transport_polling_capable(info->desc);
2609 
2610 	debugfs_create_str("instance_name", 0400, top_dentry,
2611 			   (char **)&dbg->name);
2612 
2613 	debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
2614 			   &info->atomic_threshold);
2615 
2616 	debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
2617 
2618 	debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
2619 
2620 	debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
2621 			   (u32 *)&info->desc->max_rx_timeout_ms);
2622 
2623 	debugfs_create_u32("max_msg_size", 0400, trans,
2624 			   (u32 *)&info->desc->max_msg_size);
2625 
2626 	debugfs_create_u32("tx_max_msg", 0400, trans,
2627 			   (u32 *)&info->tx_minfo.max_msg);
2628 
2629 	debugfs_create_u32("rx_max_msg", 0400, trans,
2630 			   (u32 *)&info->rx_minfo.max_msg);
2631 
2632 	dbg->top_dentry = top_dentry;
2633 
2634 	if (devm_add_action_or_reset(info->dev,
2635 				     scmi_debugfs_common_cleanup, dbg)) {
2636 		scmi_debugfs_common_cleanup(dbg);
2637 		return NULL;
2638 	}
2639 
2640 	return dbg;
2641 }
2642 
scmi_debugfs_raw_mode_setup(struct scmi_info * info)2643 static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
2644 {
2645 	int id, num_chans = 0, ret = 0;
2646 	struct scmi_chan_info *cinfo;
2647 	u8 channels[SCMI_MAX_CHANNELS] = {};
2648 	DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
2649 
2650 	if (!info->dbg)
2651 		return -EINVAL;
2652 
2653 	/* Enumerate all channels to collect their ids */
2654 	idr_for_each_entry(&info->tx_idr, cinfo, id) {
2655 		/*
2656 		 * Cannot happen, but be defensive.
2657 		 * Zero as num_chans is ok, warn and carry on.
2658 		 */
2659 		if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
2660 			dev_warn(info->dev,
2661 				 "SCMI RAW - Error enumerating channels\n");
2662 			break;
2663 		}
2664 
2665 		if (!test_bit(cinfo->id, protos)) {
2666 			channels[num_chans++] = cinfo->id;
2667 			set_bit(cinfo->id, protos);
2668 		}
2669 	}
2670 
2671 	info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
2672 				       info->id, channels, num_chans,
2673 				       info->desc, info->tx_minfo.max_msg);
2674 	if (IS_ERR(info->raw)) {
2675 		dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
2676 		ret = PTR_ERR(info->raw);
2677 		info->raw = NULL;
2678 	}
2679 
2680 	return ret;
2681 }
2682 
scmi_probe(struct platform_device * pdev)2683 static int scmi_probe(struct platform_device *pdev)
2684 {
2685 	int ret;
2686 	struct scmi_handle *handle;
2687 	const struct scmi_desc *desc;
2688 	struct scmi_info *info;
2689 	bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
2690 	struct device *dev = &pdev->dev;
2691 	struct device_node *child, *np = dev->of_node;
2692 
2693 	desc = of_device_get_match_data(dev);
2694 	if (!desc)
2695 		return -EINVAL;
2696 
2697 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2698 	if (!info)
2699 		return -ENOMEM;
2700 
2701 	info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
2702 	if (info->id < 0)
2703 		return info->id;
2704 
2705 	info->dev = dev;
2706 	info->desc = desc;
2707 	info->bus_nb.notifier_call = scmi_bus_notifier;
2708 	info->dev_req_nb.notifier_call = scmi_device_request_notifier;
2709 	INIT_LIST_HEAD(&info->node);
2710 	idr_init(&info->protocols);
2711 	mutex_init(&info->protocols_mtx);
2712 	idr_init(&info->active_protocols);
2713 	mutex_init(&info->devreq_mtx);
2714 
2715 	platform_set_drvdata(pdev, info);
2716 	idr_init(&info->tx_idr);
2717 	idr_init(&info->rx_idr);
2718 
2719 	handle = &info->handle;
2720 	handle->dev = info->dev;
2721 	handle->version = &info->version;
2722 	handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
2723 	handle->devm_protocol_get = scmi_devm_protocol_get;
2724 	handle->devm_protocol_put = scmi_devm_protocol_put;
2725 
2726 	/* System wide atomic threshold for atomic ops .. if any */
2727 	if (!of_property_read_u32(np, "atomic-threshold-us",
2728 				  &info->atomic_threshold))
2729 		dev_info(dev,
2730 			 "SCMI System wide atomic threshold set to %d us\n",
2731 			 info->atomic_threshold);
2732 	handle->is_transport_atomic = scmi_is_transport_atomic;
2733 
2734 	if (desc->ops->link_supplier) {
2735 		ret = desc->ops->link_supplier(dev);
2736 		if (ret)
2737 			goto clear_ida;
2738 	}
2739 
2740 	/* Setup all channels described in the DT at first */
2741 	ret = scmi_channels_setup(info);
2742 	if (ret)
2743 		goto clear_ida;
2744 
2745 	ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
2746 	if (ret)
2747 		goto clear_txrx_setup;
2748 
2749 	ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
2750 					       &info->dev_req_nb);
2751 	if (ret)
2752 		goto clear_bus_notifier;
2753 
2754 	ret = scmi_xfer_info_init(info);
2755 	if (ret)
2756 		goto clear_dev_req_notifier;
2757 
2758 	if (scmi_top_dentry) {
2759 		info->dbg = scmi_debugfs_common_setup(info);
2760 		if (!info->dbg)
2761 			dev_warn(dev, "Failed to setup SCMI debugfs.\n");
2762 
2763 		if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
2764 			ret = scmi_debugfs_raw_mode_setup(info);
2765 			if (!coex) {
2766 				if (ret)
2767 					goto clear_dev_req_notifier;
2768 
2769 				/* Bail out anyway when coex disabled. */
2770 				return 0;
2771 			}
2772 
2773 			/* Coex enabled, carry on in any case. */
2774 			dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
2775 		}
2776 	}
2777 
2778 	if (scmi_notification_init(handle))
2779 		dev_err(dev, "SCMI Notifications NOT available.\n");
2780 
2781 	if (info->desc->atomic_enabled &&
2782 	    !is_transport_polling_capable(info->desc))
2783 		dev_err(dev,
2784 			"Transport is not polling capable. Atomic mode not supported.\n");
2785 
2786 	/*
2787 	 * Trigger SCMI Base protocol initialization.
2788 	 * It's mandatory and won't be ever released/deinit until the
2789 	 * SCMI stack is shutdown/unloaded as a whole.
2790 	 */
2791 	ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2792 	if (ret) {
2793 		dev_err(dev, "unable to communicate with SCMI\n");
2794 		if (coex)
2795 			return 0;
2796 		goto notification_exit;
2797 	}
2798 
2799 	mutex_lock(&scmi_list_mutex);
2800 	list_add_tail(&info->node, &scmi_list);
2801 	mutex_unlock(&scmi_list_mutex);
2802 
2803 	for_each_available_child_of_node(np, child) {
2804 		u32 prot_id;
2805 
2806 		if (of_property_read_u32(child, "reg", &prot_id))
2807 			continue;
2808 
2809 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2810 			dev_err(dev, "Out of range protocol %d\n", prot_id);
2811 
2812 		if (!scmi_is_protocol_implemented(handle, prot_id)) {
2813 			dev_err(dev, "SCMI protocol %d not implemented\n",
2814 				prot_id);
2815 			continue;
2816 		}
2817 
2818 		/*
2819 		 * Save this valid DT protocol descriptor amongst
2820 		 * @active_protocols for this SCMI instance/
2821 		 */
2822 		ret = idr_alloc(&info->active_protocols, child,
2823 				prot_id, prot_id + 1, GFP_KERNEL);
2824 		if (ret != prot_id) {
2825 			dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2826 				prot_id);
2827 			continue;
2828 		}
2829 
2830 		of_node_get(child);
2831 		scmi_create_protocol_devices(child, info, prot_id, NULL);
2832 	}
2833 
2834 	return 0;
2835 
2836 notification_exit:
2837 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
2838 		scmi_raw_mode_cleanup(info->raw);
2839 	scmi_notification_exit(&info->handle);
2840 clear_dev_req_notifier:
2841 	blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
2842 					   &info->dev_req_nb);
2843 clear_bus_notifier:
2844 	bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
2845 clear_txrx_setup:
2846 	scmi_cleanup_txrx_channels(info);
2847 clear_ida:
2848 	ida_free(&scmi_id, info->id);
2849 	return ret;
2850 }
2851 
scmi_remove(struct platform_device * pdev)2852 static int scmi_remove(struct platform_device *pdev)
2853 {
2854 	int id;
2855 	struct scmi_info *info = platform_get_drvdata(pdev);
2856 	struct device_node *child;
2857 
2858 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
2859 		scmi_raw_mode_cleanup(info->raw);
2860 
2861 	mutex_lock(&scmi_list_mutex);
2862 	if (info->users)
2863 		dev_warn(&pdev->dev,
2864 			 "Still active SCMI users will be forcibly unbound.\n");
2865 	list_del(&info->node);
2866 	mutex_unlock(&scmi_list_mutex);
2867 
2868 	scmi_notification_exit(&info->handle);
2869 
2870 	mutex_lock(&info->protocols_mtx);
2871 	idr_destroy(&info->protocols);
2872 	mutex_unlock(&info->protocols_mtx);
2873 
2874 	idr_for_each_entry(&info->active_protocols, child, id)
2875 		of_node_put(child);
2876 	idr_destroy(&info->active_protocols);
2877 
2878 	blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
2879 					   &info->dev_req_nb);
2880 	bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
2881 
2882 	/* Safe to free channels since no more users */
2883 	scmi_cleanup_txrx_channels(info);
2884 
2885 	ida_free(&scmi_id, info->id);
2886 
2887 	return 0;
2888 }
2889 
protocol_version_show(struct device * dev,struct device_attribute * attr,char * buf)2890 static ssize_t protocol_version_show(struct device *dev,
2891 				     struct device_attribute *attr, char *buf)
2892 {
2893 	struct scmi_info *info = dev_get_drvdata(dev);
2894 
2895 	return sprintf(buf, "%u.%u\n", info->version.major_ver,
2896 		       info->version.minor_ver);
2897 }
2898 static DEVICE_ATTR_RO(protocol_version);
2899 
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)2900 static ssize_t firmware_version_show(struct device *dev,
2901 				     struct device_attribute *attr, char *buf)
2902 {
2903 	struct scmi_info *info = dev_get_drvdata(dev);
2904 
2905 	return sprintf(buf, "0x%x\n", info->version.impl_ver);
2906 }
2907 static DEVICE_ATTR_RO(firmware_version);
2908 
vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)2909 static ssize_t vendor_id_show(struct device *dev,
2910 			      struct device_attribute *attr, char *buf)
2911 {
2912 	struct scmi_info *info = dev_get_drvdata(dev);
2913 
2914 	return sprintf(buf, "%s\n", info->version.vendor_id);
2915 }
2916 static DEVICE_ATTR_RO(vendor_id);
2917 
sub_vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)2918 static ssize_t sub_vendor_id_show(struct device *dev,
2919 				  struct device_attribute *attr, char *buf)
2920 {
2921 	struct scmi_info *info = dev_get_drvdata(dev);
2922 
2923 	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2924 }
2925 static DEVICE_ATTR_RO(sub_vendor_id);
2926 
2927 static struct attribute *versions_attrs[] = {
2928 	&dev_attr_firmware_version.attr,
2929 	&dev_attr_protocol_version.attr,
2930 	&dev_attr_vendor_id.attr,
2931 	&dev_attr_sub_vendor_id.attr,
2932 	NULL,
2933 };
2934 ATTRIBUTE_GROUPS(versions);
2935 
2936 /* Each compatible listed below must have descriptor associated with it */
2937 static const struct of_device_id scmi_of_match[] = {
2938 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
2939 	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2940 #endif
2941 #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2942 	{ .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2943 #endif
2944 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
2945 	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2946 	{ .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
2947 #endif
2948 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2949 	{ .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2950 #endif
2951 	{ /* Sentinel */ },
2952 };
2953 
2954 MODULE_DEVICE_TABLE(of, scmi_of_match);
2955 
2956 static struct platform_driver scmi_driver = {
2957 	.driver = {
2958 		   .name = "arm-scmi",
2959 		   .suppress_bind_attrs = true,
2960 		   .of_match_table = scmi_of_match,
2961 		   .dev_groups = versions_groups,
2962 		   },
2963 	.probe = scmi_probe,
2964 	.remove = scmi_remove,
2965 };
2966 
2967 /**
2968  * __scmi_transports_setup  - Common helper to call transport-specific
2969  * .init/.exit code if provided.
2970  *
2971  * @init: A flag to distinguish between init and exit.
2972  *
2973  * Note that, if provided, we invoke .init/.exit functions for all the
2974  * transports currently compiled in.
2975  *
2976  * Return: 0 on Success.
2977  */
__scmi_transports_setup(bool init)2978 static inline int __scmi_transports_setup(bool init)
2979 {
2980 	int ret = 0;
2981 	const struct of_device_id *trans;
2982 
2983 	for (trans = scmi_of_match; trans->data; trans++) {
2984 		const struct scmi_desc *tdesc = trans->data;
2985 
2986 		if ((init && !tdesc->transport_init) ||
2987 		    (!init && !tdesc->transport_exit))
2988 			continue;
2989 
2990 		if (init)
2991 			ret = tdesc->transport_init();
2992 		else
2993 			tdesc->transport_exit();
2994 
2995 		if (ret) {
2996 			pr_err("SCMI transport %s FAILED initialization!\n",
2997 			       trans->compatible);
2998 			break;
2999 		}
3000 	}
3001 
3002 	return ret;
3003 }
3004 
scmi_transports_init(void)3005 static int __init scmi_transports_init(void)
3006 {
3007 	return __scmi_transports_setup(true);
3008 }
3009 
scmi_transports_exit(void)3010 static void __exit scmi_transports_exit(void)
3011 {
3012 	__scmi_transports_setup(false);
3013 }
3014 
scmi_debugfs_init(void)3015 static struct dentry *scmi_debugfs_init(void)
3016 {
3017 	struct dentry *d;
3018 
3019 	d = debugfs_create_dir("scmi", NULL);
3020 	if (IS_ERR(d)) {
3021 		pr_err("Could NOT create SCMI top dentry.\n");
3022 		return NULL;
3023 	}
3024 
3025 	return d;
3026 }
3027 
scmi_driver_init(void)3028 static int __init scmi_driver_init(void)
3029 {
3030 	int ret;
3031 
3032 	/* Bail out if no SCMI transport was configured */
3033 	if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
3034 		return -EINVAL;
3035 
3036 	/* Initialize any compiled-in transport which provided an init/exit */
3037 	ret = scmi_transports_init();
3038 	if (ret)
3039 		return ret;
3040 
3041 	if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
3042 		scmi_top_dentry = scmi_debugfs_init();
3043 
3044 	scmi_base_register();
3045 
3046 	scmi_clock_register();
3047 	scmi_perf_register();
3048 	scmi_power_register();
3049 	scmi_reset_register();
3050 	scmi_sensors_register();
3051 	scmi_voltage_register();
3052 	scmi_system_register();
3053 	scmi_powercap_register();
3054 	scmi_pinctrl_register();
3055 
3056 	return platform_driver_register(&scmi_driver);
3057 }
3058 module_init(scmi_driver_init);
3059 
scmi_driver_exit(void)3060 static void __exit scmi_driver_exit(void)
3061 {
3062 	scmi_base_unregister();
3063 
3064 	scmi_clock_unregister();
3065 	scmi_perf_unregister();
3066 	scmi_power_unregister();
3067 	scmi_reset_unregister();
3068 	scmi_sensors_unregister();
3069 	scmi_voltage_unregister();
3070 	scmi_system_unregister();
3071 	scmi_powercap_unregister();
3072 	scmi_pinctrl_unregister();
3073 
3074 	scmi_transports_exit();
3075 
3076 	platform_driver_unregister(&scmi_driver);
3077 
3078 	debugfs_remove_recursive(scmi_top_dentry);
3079 }
3080 module_exit(scmi_driver_exit);
3081 
3082 MODULE_ALIAS("platform:arm-scmi");
3083 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
3084 MODULE_DESCRIPTION("ARM SCMI protocol driver");
3085 MODULE_LICENSE("GPL v2");
3086