1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Arm Firmware Framework for ARMv8-A(FFA) interface driver
4 *
5 * The Arm FFA specification[1] describes a software architecture to
6 * leverages the virtualization extension to isolate software images
7 * provided by an ecosystem of vendors from each other and describes
8 * interfaces that standardize communication between the various software
9 * images including communication between images in the Secure world and
10 * Normal world. Any Hypervisor could use the FFA interfaces to enable
11 * communication between VMs it manages.
12 *
13 * The Hypervisor a.k.a Partition managers in FFA terminology can assign
14 * system resources(Memory regions, Devices, CPU cycles) to the partitions
15 * and manage isolation amongst them.
16 *
17 * [1] https://developer.arm.com/docs/den0077/latest
18 *
19 * Copyright (C) 2021 ARM Ltd.
20 */
21
22 #define DRIVER_NAME "ARM FF-A"
23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
24
25 #include <linux/acpi.h>
26 #include <linux/arm_ffa.h>
27 #include <linux/bitfield.h>
28 #include <linux/cpuhotplug.h>
29 #include <linux/delay.h>
30 #include <linux/device.h>
31 #include <linux/hashtable.h>
32 #include <linux/interrupt.h>
33 #include <linux/io.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/mm.h>
37 #include <linux/mutex.h>
38 #include <linux/of_irq.h>
39 #include <linux/scatterlist.h>
40 #include <linux/slab.h>
41 #include <linux/smp.h>
42 #include <linux/uuid.h>
43 #include <linux/xarray.h>
44
45 #include "common.h"
46
47 #define FFA_DRIVER_VERSION FFA_VERSION_1_2
48 #define FFA_MIN_VERSION FFA_VERSION_1_0
49
50 #define SENDER_ID_MASK GENMASK(31, 16)
51 #define RECEIVER_ID_MASK GENMASK(15, 0)
52 #define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
53 #define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
54 #define PACK_TARGET_INFO(s, r) \
55 (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
56
57 #define RXTX_MAP_MIN_BUFSZ_MASK GENMASK(1, 0)
58 #define RXTX_MAP_MIN_BUFSZ(x) ((x) & RXTX_MAP_MIN_BUFSZ_MASK)
59
60 #define FFA_MAX_NOTIFICATIONS 64
61
62 static ffa_fn *invoke_ffa_fn;
63
64 struct ffa_pcpu_irq {
65 struct ffa_drv_info *info;
66 };
67
68 struct ffa_drv_info {
69 u32 version;
70 u16 vm_id;
71 struct mutex rx_lock; /* lock to protect Rx buffer */
72 struct mutex tx_lock; /* lock to protect Tx buffer */
73 void *rx_buffer;
74 void *tx_buffer;
75 size_t rxtx_bufsz;
76 bool mem_ops_native;
77 bool msg_direct_req2_supp;
78 bool bitmap_created;
79 bool notif_enabled;
80 unsigned int sched_recv_irq;
81 unsigned int notif_pend_irq;
82 unsigned int cpuhp_state;
83 struct ffa_pcpu_irq __percpu *irq_pcpu;
84 struct workqueue_struct *notif_pcpu_wq;
85 struct work_struct notif_pcpu_work;
86 struct work_struct sched_recv_irq_work;
87 struct xarray partition_info;
88 DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
89 rwlock_t notify_lock; /* lock to protect notifier hashtable */
90 };
91
92 static struct ffa_drv_info *drv_info;
93 static void ffa_partitions_cleanup(void);
94
95 /*
96 * The driver must be able to support all the versions from the earliest
97 * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
98 * The specification states that if firmware supports a FFA implementation
99 * that is incompatible with and at a greater version number than specified
100 * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
101 * it must return the NOT_SUPPORTED error code.
102 */
ffa_compatible_version_find(u32 version)103 static u32 ffa_compatible_version_find(u32 version)
104 {
105 u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version);
106 u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION);
107 u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION);
108
109 if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
110 return version;
111
112 pr_info("Firmware version higher than driver version, downgrading\n");
113 return FFA_DRIVER_VERSION;
114 }
115
ffa_version_check(u32 * version)116 static int ffa_version_check(u32 *version)
117 {
118 ffa_value_t ver;
119
120 invoke_ffa_fn((ffa_value_t){
121 .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
122 }, &ver);
123
124 if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) {
125 pr_info("FFA_VERSION returned not supported\n");
126 return -EOPNOTSUPP;
127 }
128
129 if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
130 pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
131 FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
132 FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
133 FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
134 return -EINVAL;
135 }
136
137 if (ver.a0 < FFA_MIN_VERSION) {
138 pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
139 FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
140 FFA_MAJOR_VERSION(FFA_MIN_VERSION),
141 FFA_MINOR_VERSION(FFA_MIN_VERSION));
142 return -EINVAL;
143 }
144
145 pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
146 FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
147 pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0),
148 FFA_MINOR_VERSION(ver.a0));
149 *version = ffa_compatible_version_find(ver.a0);
150
151 return 0;
152 }
153
ffa_rx_release(void)154 static int ffa_rx_release(void)
155 {
156 ffa_value_t ret;
157
158 invoke_ffa_fn((ffa_value_t){
159 .a0 = FFA_RX_RELEASE,
160 }, &ret);
161
162 if (ret.a0 == FFA_ERROR)
163 return ffa_to_linux_errno((int)ret.a2);
164
165 /* check for ret.a0 == FFA_RX_RELEASE ? */
166
167 return 0;
168 }
169
ffa_rxtx_map(phys_addr_t tx_buf,phys_addr_t rx_buf,u32 pg_cnt)170 static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
171 {
172 ffa_value_t ret;
173
174 invoke_ffa_fn((ffa_value_t){
175 .a0 = FFA_FN_NATIVE(RXTX_MAP),
176 .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
177 }, &ret);
178
179 if (ret.a0 == FFA_ERROR)
180 return ffa_to_linux_errno((int)ret.a2);
181
182 return 0;
183 }
184
ffa_rxtx_unmap(u16 vm_id)185 static int ffa_rxtx_unmap(u16 vm_id)
186 {
187 ffa_value_t ret;
188
189 invoke_ffa_fn((ffa_value_t){
190 .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
191 }, &ret);
192
193 if (ret.a0 == FFA_ERROR)
194 return ffa_to_linux_errno((int)ret.a2);
195
196 return 0;
197 }
198
ffa_features(u32 func_feat_id,u32 input_props,u32 * if_props_1,u32 * if_props_2)199 static int ffa_features(u32 func_feat_id, u32 input_props,
200 u32 *if_props_1, u32 *if_props_2)
201 {
202 ffa_value_t id;
203
204 if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
205 pr_err("%s: Invalid Parameters: %x, %x", __func__,
206 func_feat_id, input_props);
207 return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
208 }
209
210 invoke_ffa_fn((ffa_value_t){
211 .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
212 }, &id);
213
214 if (id.a0 == FFA_ERROR)
215 return ffa_to_linux_errno((int)id.a2);
216
217 if (if_props_1)
218 *if_props_1 = id.a2;
219 if (if_props_2)
220 *if_props_2 = id.a3;
221
222 return 0;
223 }
224
225 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
226 static int
__ffa_partition_info_get(u32 uuid0,u32 uuid1,u32 uuid2,u32 uuid3,struct ffa_partition_info * buffer,int num_partitions)227 __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
228 struct ffa_partition_info *buffer, int num_partitions)
229 {
230 int idx, count, flags = 0, sz, buf_sz;
231 ffa_value_t partition_info;
232
233 if (drv_info->version > FFA_VERSION_1_0 &&
234 (!buffer || !num_partitions)) /* Just get the count for now */
235 flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
236
237 mutex_lock(&drv_info->rx_lock);
238 invoke_ffa_fn((ffa_value_t){
239 .a0 = FFA_PARTITION_INFO_GET,
240 .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
241 .a5 = flags,
242 }, &partition_info);
243
244 if (partition_info.a0 == FFA_ERROR) {
245 mutex_unlock(&drv_info->rx_lock);
246 return ffa_to_linux_errno((int)partition_info.a2);
247 }
248
249 count = partition_info.a2;
250
251 if (drv_info->version > FFA_VERSION_1_0) {
252 buf_sz = sz = partition_info.a3;
253 if (sz > sizeof(*buffer))
254 buf_sz = sizeof(*buffer);
255 } else {
256 /* FFA_VERSION_1_0 lacks size in the response */
257 buf_sz = sz = 8;
258 }
259
260 if (buffer && count <= num_partitions)
261 for (idx = 0; idx < count; idx++)
262 memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
263 buf_sz);
264
265 if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
266 ffa_rx_release();
267
268 mutex_unlock(&drv_info->rx_lock);
269
270 return count;
271 }
272
273 #define LAST_INDEX_MASK GENMASK(15, 0)
274 #define CURRENT_INDEX_MASK GENMASK(31, 16)
275 #define UUID_INFO_TAG_MASK GENMASK(47, 32)
276 #define PARTITION_INFO_SZ_MASK GENMASK(63, 48)
277 #define PARTITION_COUNT(x) ((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1)
278 #define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
279 #define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
280 #define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
281 static int
__ffa_partition_info_get_regs(u32 uuid0,u32 uuid1,u32 uuid2,u32 uuid3,struct ffa_partition_info * buffer,int num_parts)282 __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
283 struct ffa_partition_info *buffer, int num_parts)
284 {
285 u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
286 ffa_value_t partition_info;
287
288 do {
289 start_idx = prev_idx ? prev_idx + 1 : 0;
290
291 invoke_ffa_fn((ffa_value_t){
292 .a0 = FFA_PARTITION_INFO_GET_REGS,
293 .a1 = (u64)uuid1 << 32 | uuid0,
294 .a2 = (u64)uuid3 << 32 | uuid2,
295 .a3 = start_idx | tag << 16,
296 }, &partition_info);
297
298 if (partition_info.a0 == FFA_ERROR)
299 return ffa_to_linux_errno((int)partition_info.a2);
300
301 if (!count)
302 count = PARTITION_COUNT(partition_info.a2);
303 if (!buffer || !num_parts) /* count only */
304 return count;
305
306 cur_idx = CURRENT_INDEX(partition_info.a2);
307 tag = UUID_INFO_TAG(partition_info.a2);
308 buf_sz = PARTITION_INFO_SZ(partition_info.a2);
309 if (buf_sz > sizeof(*buffer))
310 buf_sz = sizeof(*buffer);
311
312 memcpy(buffer + prev_idx * buf_sz, &partition_info.a3,
313 (cur_idx - start_idx + 1) * buf_sz);
314 prev_idx = cur_idx;
315
316 } while (cur_idx < (count - 1));
317
318 return count;
319 }
320
321 /* buffer is allocated and caller must free the same if returned count > 0 */
322 static int
ffa_partition_probe(const uuid_t * uuid,struct ffa_partition_info ** buffer)323 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
324 {
325 int count;
326 u32 uuid0_4[4];
327 bool reg_mode = false;
328 struct ffa_partition_info *pbuf;
329
330 if (!ffa_features(FFA_PARTITION_INFO_GET_REGS, 0, NULL, NULL))
331 reg_mode = true;
332
333 export_uuid((u8 *)uuid0_4, uuid);
334 if (reg_mode)
335 count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1],
336 uuid0_4[2], uuid0_4[3],
337 NULL, 0);
338 else
339 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1],
340 uuid0_4[2], uuid0_4[3],
341 NULL, 0);
342 if (count <= 0)
343 return count;
344
345 pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
346 if (!pbuf)
347 return -ENOMEM;
348
349 if (reg_mode)
350 count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1],
351 uuid0_4[2], uuid0_4[3],
352 pbuf, count);
353 else
354 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1],
355 uuid0_4[2], uuid0_4[3],
356 pbuf, count);
357 if (count <= 0)
358 kfree(pbuf);
359 else
360 *buffer = pbuf;
361
362 return count;
363 }
364
365 #define VM_ID_MASK GENMASK(15, 0)
ffa_id_get(u16 * vm_id)366 static int ffa_id_get(u16 *vm_id)
367 {
368 ffa_value_t id;
369
370 invoke_ffa_fn((ffa_value_t){
371 .a0 = FFA_ID_GET,
372 }, &id);
373
374 if (id.a0 == FFA_ERROR)
375 return ffa_to_linux_errno((int)id.a2);
376
377 *vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
378
379 return 0;
380 }
381
ffa_msg_send_wait_for_completion(ffa_value_t * ret)382 static inline void ffa_msg_send_wait_for_completion(ffa_value_t *ret)
383 {
384 while (ret->a0 == FFA_INTERRUPT || ret->a0 == FFA_YIELD) {
385 if (ret->a0 == FFA_YIELD)
386 fsleep(1000);
387
388 invoke_ffa_fn((ffa_value_t){
389 .a0 = FFA_RUN, .a1 = ret->a1,
390 }, ret);
391 }
392 }
393
ffa_msg_send_direct_req(u16 src_id,u16 dst_id,bool mode_32bit,struct ffa_send_direct_data * data)394 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
395 struct ffa_send_direct_data *data)
396 {
397 u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
398 ffa_value_t ret;
399
400 if (mode_32bit) {
401 req_id = FFA_MSG_SEND_DIRECT_REQ;
402 resp_id = FFA_MSG_SEND_DIRECT_RESP;
403 } else {
404 req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
405 resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
406 }
407
408 invoke_ffa_fn((ffa_value_t){
409 .a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
410 .a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
411 .a6 = data->data3, .a7 = data->data4,
412 }, &ret);
413
414 ffa_msg_send_wait_for_completion(&ret);
415
416 if (ret.a0 == FFA_ERROR)
417 return ffa_to_linux_errno((int)ret.a2);
418
419 if (ret.a0 == resp_id) {
420 data->data0 = ret.a3;
421 data->data1 = ret.a4;
422 data->data2 = ret.a5;
423 data->data3 = ret.a6;
424 data->data4 = ret.a7;
425 return 0;
426 }
427
428 return -EINVAL;
429 }
430
ffa_msg_send2(u16 src_id,u16 dst_id,void * buf,size_t sz)431 static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
432 {
433 u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
434 struct ffa_indirect_msg_hdr *msg;
435 ffa_value_t ret;
436 int retval = 0;
437
438 if (sz > (drv_info->rxtx_bufsz - sizeof(*msg)))
439 return -ERANGE;
440
441 mutex_lock(&drv_info->tx_lock);
442
443 msg = drv_info->tx_buffer;
444 msg->flags = 0;
445 msg->res0 = 0;
446 msg->offset = sizeof(*msg);
447 msg->send_recv_id = src_dst_ids;
448 msg->size = sz;
449 memcpy((u8 *)msg + msg->offset, buf, sz);
450
451 /* flags = 0, sender VMID = 0 works for both physical/virtual NS */
452 invoke_ffa_fn((ffa_value_t){
453 .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0
454 }, &ret);
455
456 if (ret.a0 == FFA_ERROR)
457 retval = ffa_to_linux_errno((int)ret.a2);
458
459 mutex_unlock(&drv_info->tx_lock);
460 return retval;
461 }
462
ffa_msg_send_direct_req2(u16 src_id,u16 dst_id,const uuid_t * uuid,struct ffa_send_direct_data2 * data)463 static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid,
464 struct ffa_send_direct_data2 *data)
465 {
466 u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
467 union {
468 uuid_t uuid;
469 __le64 regs[2];
470 } uuid_regs = { .uuid = *uuid };
471 ffa_value_t ret, args = {
472 .a0 = FFA_MSG_SEND_DIRECT_REQ2,
473 .a1 = src_dst_ids,
474 .a2 = le64_to_cpu(uuid_regs.regs[0]),
475 .a3 = le64_to_cpu(uuid_regs.regs[1]),
476 };
477 memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data));
478
479 invoke_ffa_fn(args, &ret);
480
481 ffa_msg_send_wait_for_completion(&ret);
482
483 if (ret.a0 == FFA_ERROR)
484 return ffa_to_linux_errno((int)ret.a2);
485
486 if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) {
487 memcpy(data, (void *)&ret + offsetof(ffa_value_t, a4), sizeof(*data));
488 return 0;
489 }
490
491 return -EINVAL;
492 }
493
ffa_mem_first_frag(u32 func_id,phys_addr_t buf,u32 buf_sz,u32 frag_len,u32 len,u64 * handle)494 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
495 u32 frag_len, u32 len, u64 *handle)
496 {
497 ffa_value_t ret;
498
499 invoke_ffa_fn((ffa_value_t){
500 .a0 = func_id, .a1 = len, .a2 = frag_len,
501 .a3 = buf, .a4 = buf_sz,
502 }, &ret);
503
504 while (ret.a0 == FFA_MEM_OP_PAUSE)
505 invoke_ffa_fn((ffa_value_t){
506 .a0 = FFA_MEM_OP_RESUME,
507 .a1 = ret.a1, .a2 = ret.a2,
508 }, &ret);
509
510 if (ret.a0 == FFA_ERROR)
511 return ffa_to_linux_errno((int)ret.a2);
512
513 if (ret.a0 == FFA_SUCCESS) {
514 if (handle)
515 *handle = PACK_HANDLE(ret.a2, ret.a3);
516 } else if (ret.a0 == FFA_MEM_FRAG_RX) {
517 if (handle)
518 *handle = PACK_HANDLE(ret.a1, ret.a2);
519 } else {
520 return -EOPNOTSUPP;
521 }
522
523 return frag_len;
524 }
525
ffa_mem_next_frag(u64 handle,u32 frag_len)526 static int ffa_mem_next_frag(u64 handle, u32 frag_len)
527 {
528 ffa_value_t ret;
529
530 invoke_ffa_fn((ffa_value_t){
531 .a0 = FFA_MEM_FRAG_TX,
532 .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
533 .a3 = frag_len,
534 }, &ret);
535
536 while (ret.a0 == FFA_MEM_OP_PAUSE)
537 invoke_ffa_fn((ffa_value_t){
538 .a0 = FFA_MEM_OP_RESUME,
539 .a1 = ret.a1, .a2 = ret.a2,
540 }, &ret);
541
542 if (ret.a0 == FFA_ERROR)
543 return ffa_to_linux_errno((int)ret.a2);
544
545 if (ret.a0 == FFA_MEM_FRAG_RX)
546 return ret.a3;
547 else if (ret.a0 == FFA_SUCCESS)
548 return 0;
549
550 return -EOPNOTSUPP;
551 }
552
553 static int
ffa_transmit_fragment(u32 func_id,phys_addr_t buf,u32 buf_sz,u32 frag_len,u32 len,u64 * handle,bool first)554 ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
555 u32 len, u64 *handle, bool first)
556 {
557 if (!first)
558 return ffa_mem_next_frag(*handle, frag_len);
559
560 return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
561 }
562
ffa_get_num_pages_sg(struct scatterlist * sg)563 static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
564 {
565 u32 num_pages = 0;
566
567 do {
568 num_pages += sg->length / FFA_PAGE_SIZE;
569 } while ((sg = sg_next(sg)));
570
571 return num_pages;
572 }
573
ffa_memory_attributes_get(u32 func_id)574 static u16 ffa_memory_attributes_get(u32 func_id)
575 {
576 /*
577 * For the memory lend or donate operation, if the receiver is a PE or
578 * a proxy endpoint, the owner/sender must not specify the attributes
579 */
580 if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
581 func_id == FFA_MEM_LEND)
582 return 0;
583
584 return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
585 }
586
587 static int
ffa_setup_and_transmit(u32 func_id,void * buffer,u32 max_fragsize,struct ffa_mem_ops_args * args)588 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
589 struct ffa_mem_ops_args *args)
590 {
591 int rc = 0;
592 bool first = true;
593 u32 composite_offset;
594 phys_addr_t addr = 0;
595 struct ffa_mem_region *mem_region = buffer;
596 struct ffa_composite_mem_region *composite;
597 struct ffa_mem_region_addr_range *constituents;
598 struct ffa_mem_region_attributes *ep_mem_access;
599 u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
600
601 mem_region->tag = args->tag;
602 mem_region->flags = args->flags;
603 mem_region->sender_id = drv_info->vm_id;
604 mem_region->attributes = ffa_memory_attributes_get(func_id);
605 ep_mem_access = buffer +
606 ffa_mem_desc_offset(buffer, 0, drv_info->version);
607 composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
608 drv_info->version);
609
610 for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
611 ep_mem_access->receiver = args->attrs[idx].receiver;
612 ep_mem_access->attrs = args->attrs[idx].attrs;
613 ep_mem_access->composite_off = composite_offset;
614 ep_mem_access->flag = 0;
615 ep_mem_access->reserved = 0;
616 }
617 mem_region->handle = 0;
618 mem_region->ep_count = args->nattrs;
619 if (drv_info->version <= FFA_VERSION_1_0) {
620 mem_region->ep_mem_size = 0;
621 } else {
622 mem_region->ep_mem_size = sizeof(*ep_mem_access);
623 mem_region->ep_mem_offset = sizeof(*mem_region);
624 memset(mem_region->reserved, 0, 12);
625 }
626
627 composite = buffer + composite_offset;
628 composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
629 composite->addr_range_cnt = num_entries;
630 composite->reserved = 0;
631
632 length = composite_offset + CONSTITUENTS_OFFSET(num_entries);
633 frag_len = composite_offset + CONSTITUENTS_OFFSET(0);
634 if (frag_len > max_fragsize)
635 return -ENXIO;
636
637 if (!args->use_txbuf) {
638 addr = virt_to_phys(buffer);
639 buf_sz = max_fragsize / FFA_PAGE_SIZE;
640 }
641
642 constituents = buffer + frag_len;
643 idx = 0;
644 do {
645 if (frag_len == max_fragsize) {
646 rc = ffa_transmit_fragment(func_id, addr, buf_sz,
647 frag_len, length,
648 &args->g_handle, first);
649 if (rc < 0)
650 return -ENXIO;
651
652 first = false;
653 idx = 0;
654 frag_len = 0;
655 constituents = buffer;
656 }
657
658 if ((void *)constituents - buffer > max_fragsize) {
659 pr_err("Memory Region Fragment > Tx Buffer size\n");
660 return -EFAULT;
661 }
662
663 constituents->address = sg_phys(args->sg);
664 constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
665 constituents->reserved = 0;
666 constituents++;
667 frag_len += sizeof(struct ffa_mem_region_addr_range);
668 } while ((args->sg = sg_next(args->sg)));
669
670 return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
671 length, &args->g_handle, first);
672 }
673
ffa_memory_ops(u32 func_id,struct ffa_mem_ops_args * args)674 static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
675 {
676 int ret;
677 void *buffer;
678 size_t rxtx_bufsz = drv_info->rxtx_bufsz;
679
680 if (!args->use_txbuf) {
681 buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
682 if (!buffer)
683 return -ENOMEM;
684 } else {
685 buffer = drv_info->tx_buffer;
686 mutex_lock(&drv_info->tx_lock);
687 }
688
689 ret = ffa_setup_and_transmit(func_id, buffer, rxtx_bufsz, args);
690
691 if (args->use_txbuf)
692 mutex_unlock(&drv_info->tx_lock);
693 else
694 free_pages_exact(buffer, rxtx_bufsz);
695
696 return ret < 0 ? ret : 0;
697 }
698
ffa_memory_reclaim(u64 g_handle,u32 flags)699 static int ffa_memory_reclaim(u64 g_handle, u32 flags)
700 {
701 ffa_value_t ret;
702
703 invoke_ffa_fn((ffa_value_t){
704 .a0 = FFA_MEM_RECLAIM,
705 .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
706 .a3 = flags,
707 }, &ret);
708
709 if (ret.a0 == FFA_ERROR)
710 return ffa_to_linux_errno((int)ret.a2);
711
712 return 0;
713 }
714
ffa_notification_bitmap_create(void)715 static int ffa_notification_bitmap_create(void)
716 {
717 ffa_value_t ret;
718 u16 vcpu_count = nr_cpu_ids;
719
720 invoke_ffa_fn((ffa_value_t){
721 .a0 = FFA_NOTIFICATION_BITMAP_CREATE,
722 .a1 = drv_info->vm_id, .a2 = vcpu_count,
723 }, &ret);
724
725 if (ret.a0 == FFA_ERROR)
726 return ffa_to_linux_errno((int)ret.a2);
727
728 return 0;
729 }
730
ffa_notification_bitmap_destroy(void)731 static int ffa_notification_bitmap_destroy(void)
732 {
733 ffa_value_t ret;
734
735 invoke_ffa_fn((ffa_value_t){
736 .a0 = FFA_NOTIFICATION_BITMAP_DESTROY,
737 .a1 = drv_info->vm_id,
738 }, &ret);
739
740 if (ret.a0 == FFA_ERROR)
741 return ffa_to_linux_errno((int)ret.a2);
742
743 return 0;
744 }
745
746 #define NOTIFICATION_LOW_MASK GENMASK(31, 0)
747 #define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
748 #define NOTIFICATION_BITMAP_HIGH(x) \
749 ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x))))
750 #define NOTIFICATION_BITMAP_LOW(x) \
751 ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x))))
752 #define PACK_NOTIFICATION_BITMAP(low, high) \
753 (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \
754 FIELD_PREP(NOTIFICATION_HIGH_MASK, (high)))
755
756 #define RECEIVER_VCPU_MASK GENMASK(31, 16)
757 #define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \
758 (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \
759 FIELD_PREP(RECEIVER_ID_MASK, (r)))
760
761 #define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0)
762 #define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7)
763 #define ID_LIST_MASK_64 GENMASK(51, 12)
764 #define ID_LIST_MASK_32 GENMASK(31, 12)
765 #define MAX_IDS_64 20
766 #define MAX_IDS_32 10
767
768 #define PER_VCPU_NOTIFICATION_FLAG BIT(0)
769 #define SECURE_PARTITION_BITMAP BIT(0)
770 #define NON_SECURE_VM_BITMAP BIT(1)
771 #define SPM_FRAMEWORK_BITMAP BIT(2)
772 #define NS_HYP_FRAMEWORK_BITMAP BIT(3)
773
ffa_notification_bind_common(u16 dst_id,u64 bitmap,u32 flags,bool is_bind)774 static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
775 u32 flags, bool is_bind)
776 {
777 ffa_value_t ret;
778 u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id);
779
780 func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND;
781
782 invoke_ffa_fn((ffa_value_t){
783 .a0 = func, .a1 = src_dst_ids, .a2 = flags,
784 .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
785 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
786 }, &ret);
787
788 if (ret.a0 == FFA_ERROR)
789 return ffa_to_linux_errno((int)ret.a2);
790 else if (ret.a0 != FFA_SUCCESS)
791 return -EINVAL;
792
793 return 0;
794 }
795
796 static
ffa_notification_set(u16 src_id,u16 dst_id,u32 flags,u64 bitmap)797 int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap)
798 {
799 ffa_value_t ret;
800 u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id);
801
802 invoke_ffa_fn((ffa_value_t) {
803 .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags,
804 .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
805 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
806 }, &ret);
807
808 if (ret.a0 == FFA_ERROR)
809 return ffa_to_linux_errno((int)ret.a2);
810 else if (ret.a0 != FFA_SUCCESS)
811 return -EINVAL;
812
813 return 0;
814 }
815
816 struct ffa_notify_bitmaps {
817 u64 sp_map;
818 u64 vm_map;
819 u64 arch_map;
820 };
821
ffa_notification_get(u32 flags,struct ffa_notify_bitmaps * notify)822 static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
823 {
824 ffa_value_t ret;
825 u16 src_id = drv_info->vm_id;
826 u16 cpu_id = smp_processor_id();
827 u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id);
828
829 invoke_ffa_fn((ffa_value_t){
830 .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags,
831 }, &ret);
832
833 if (ret.a0 == FFA_ERROR)
834 return ffa_to_linux_errno((int)ret.a2);
835 else if (ret.a0 != FFA_SUCCESS)
836 return -EINVAL; /* Something else went wrong. */
837
838 notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
839 notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
840 notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
841
842 return 0;
843 }
844
845 struct ffa_dev_part_info {
846 ffa_sched_recv_cb callback;
847 void *cb_data;
848 rwlock_t rw_lock;
849 };
850
__do_sched_recv_cb(u16 part_id,u16 vcpu,bool is_per_vcpu)851 static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
852 {
853 struct ffa_dev_part_info *partition;
854 ffa_sched_recv_cb callback;
855 void *cb_data;
856
857 partition = xa_load(&drv_info->partition_info, part_id);
858 if (!partition) {
859 pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
860 return;
861 }
862
863 read_lock(&partition->rw_lock);
864 callback = partition->callback;
865 cb_data = partition->cb_data;
866 read_unlock(&partition->rw_lock);
867
868 if (callback)
869 callback(vcpu, is_per_vcpu, cb_data);
870 }
871
ffa_notification_info_get(void)872 static void ffa_notification_info_get(void)
873 {
874 int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64];
875 bool is_64b_resp;
876 ffa_value_t ret;
877 u64 id_list;
878
879 do {
880 invoke_ffa_fn((ffa_value_t){
881 .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET),
882 }, &ret);
883
884 if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
885 if ((s32)ret.a2 != FFA_RET_NO_DATA)
886 pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
887 ret.a0, ret.a2);
888 return;
889 }
890
891 is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS);
892
893 ids_processed = 0;
894 lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2);
895 if (is_64b_resp) {
896 max_ids = MAX_IDS_64;
897 id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2);
898 } else {
899 max_ids = MAX_IDS_32;
900 id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2);
901 }
902
903 for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2)
904 ids_count[idx] = (id_list & 0x3) + 1;
905
906 /* Process IDs */
907 for (list = 0; list < lists_cnt; list++) {
908 u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3;
909
910 if (ids_processed >= max_ids - 1)
911 break;
912
913 part_id = packed_id_list[ids_processed++];
914
915 if (ids_count[list] == 1) { /* Global Notification */
916 __do_sched_recv_cb(part_id, 0, false);
917 continue;
918 }
919
920 /* Per vCPU Notification */
921 for (idx = 1; idx < ids_count[list]; idx++) {
922 if (ids_processed >= max_ids - 1)
923 break;
924
925 vcpu_id = packed_id_list[ids_processed++];
926
927 __do_sched_recv_cb(part_id, vcpu_id, true);
928 }
929 }
930 } while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK);
931 }
932
ffa_run(struct ffa_device * dev,u16 vcpu)933 static int ffa_run(struct ffa_device *dev, u16 vcpu)
934 {
935 ffa_value_t ret;
936 u32 target = dev->vm_id << 16 | vcpu;
937
938 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret);
939
940 while (ret.a0 == FFA_INTERRUPT)
941 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, },
942 &ret);
943
944 if (ret.a0 == FFA_ERROR)
945 return ffa_to_linux_errno((int)ret.a2);
946
947 return 0;
948 }
949
ffa_drvinfo_flags_init(void)950 static void ffa_drvinfo_flags_init(void)
951 {
952 if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
953 !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
954 drv_info->mem_ops_native = true;
955
956 if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2, 0, NULL, NULL) ||
957 !ffa_features(FFA_MSG_SEND_DIRECT_RESP2, 0, NULL, NULL))
958 drv_info->msg_direct_req2_supp = true;
959 }
960
ffa_api_version_get(void)961 static u32 ffa_api_version_get(void)
962 {
963 return drv_info->version;
964 }
965
ffa_partition_info_get(const char * uuid_str,struct ffa_partition_info * buffer)966 static int ffa_partition_info_get(const char *uuid_str,
967 struct ffa_partition_info *buffer)
968 {
969 int count;
970 uuid_t uuid;
971 struct ffa_partition_info *pbuf;
972
973 if (uuid_parse(uuid_str, &uuid)) {
974 pr_err("invalid uuid (%s)\n", uuid_str);
975 return -ENODEV;
976 }
977
978 count = ffa_partition_probe(&uuid, &pbuf);
979 if (count <= 0)
980 return -ENOENT;
981
982 memcpy(buffer, pbuf, sizeof(*pbuf) * count);
983 kfree(pbuf);
984 return 0;
985 }
986
ffa_mode_32bit_set(struct ffa_device * dev)987 static void ffa_mode_32bit_set(struct ffa_device *dev)
988 {
989 dev->mode_32bit = true;
990 }
991
ffa_sync_send_receive(struct ffa_device * dev,struct ffa_send_direct_data * data)992 static int ffa_sync_send_receive(struct ffa_device *dev,
993 struct ffa_send_direct_data *data)
994 {
995 return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
996 dev->mode_32bit, data);
997 }
998
ffa_indirect_msg_send(struct ffa_device * dev,void * buf,size_t sz)999 static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
1000 {
1001 return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
1002 }
1003
ffa_sync_send_receive2(struct ffa_device * dev,const uuid_t * uuid,struct ffa_send_direct_data2 * data)1004 static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid,
1005 struct ffa_send_direct_data2 *data)
1006 {
1007 if (!drv_info->msg_direct_req2_supp)
1008 return -EOPNOTSUPP;
1009
1010 return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
1011 uuid, data);
1012 }
1013
ffa_memory_share(struct ffa_mem_ops_args * args)1014 static int ffa_memory_share(struct ffa_mem_ops_args *args)
1015 {
1016 if (drv_info->mem_ops_native)
1017 return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
1018
1019 return ffa_memory_ops(FFA_MEM_SHARE, args);
1020 }
1021
ffa_memory_lend(struct ffa_mem_ops_args * args)1022 static int ffa_memory_lend(struct ffa_mem_ops_args *args)
1023 {
1024 /* Note that upon a successful MEM_LEND request the caller
1025 * must ensure that the memory region specified is not accessed
1026 * until a successful MEM_RECALIM call has been made.
1027 * On systems with a hypervisor present this will been enforced,
1028 * however on systems without a hypervisor the responsibility
1029 * falls to the calling kernel driver to prevent access.
1030 */
1031 if (drv_info->mem_ops_native)
1032 return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
1033
1034 return ffa_memory_ops(FFA_MEM_LEND, args);
1035 }
1036
1037 #define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
1038
1039 #define ffa_notifications_disabled() (!drv_info->notif_enabled)
1040
1041 enum notify_type {
1042 NON_SECURE_VM,
1043 SECURE_PARTITION,
1044 FRAMEWORK,
1045 };
1046
1047 struct notifier_cb_info {
1048 struct hlist_node hnode;
1049 ffa_notifier_cb cb;
1050 void *cb_data;
1051 enum notify_type type;
1052 };
1053
ffa_sched_recv_cb_update(u16 part_id,ffa_sched_recv_cb callback,void * cb_data,bool is_registration)1054 static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
1055 void *cb_data, bool is_registration)
1056 {
1057 struct ffa_dev_part_info *partition;
1058 bool cb_valid;
1059
1060 if (ffa_notifications_disabled())
1061 return -EOPNOTSUPP;
1062
1063 partition = xa_load(&drv_info->partition_info, part_id);
1064 if (!partition) {
1065 pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
1066 return -EINVAL;
1067 }
1068
1069 write_lock(&partition->rw_lock);
1070
1071 cb_valid = !!partition->callback;
1072 if (!(is_registration ^ cb_valid)) {
1073 write_unlock(&partition->rw_lock);
1074 return -EINVAL;
1075 }
1076
1077 partition->callback = callback;
1078 partition->cb_data = cb_data;
1079
1080 write_unlock(&partition->rw_lock);
1081 return 0;
1082 }
1083
ffa_sched_recv_cb_register(struct ffa_device * dev,ffa_sched_recv_cb cb,void * cb_data)1084 static int ffa_sched_recv_cb_register(struct ffa_device *dev,
1085 ffa_sched_recv_cb cb, void *cb_data)
1086 {
1087 return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
1088 }
1089
ffa_sched_recv_cb_unregister(struct ffa_device * dev)1090 static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
1091 {
1092 return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
1093 }
1094
ffa_notification_bind(u16 dst_id,u64 bitmap,u32 flags)1095 static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
1096 {
1097 return ffa_notification_bind_common(dst_id, bitmap, flags, true);
1098 }
1099
ffa_notification_unbind(u16 dst_id,u64 bitmap)1100 static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
1101 {
1102 return ffa_notification_bind_common(dst_id, bitmap, 0, false);
1103 }
1104
1105 /* Should be called while the notify_lock is taken */
1106 static struct notifier_cb_info *
notifier_hash_node_get(u16 notify_id,enum notify_type type)1107 notifier_hash_node_get(u16 notify_id, enum notify_type type)
1108 {
1109 struct notifier_cb_info *node;
1110
1111 hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
1112 if (type == node->type)
1113 return node;
1114
1115 return NULL;
1116 }
1117
update_notifier_cb(int notify_id,enum notify_type type,struct notifier_cb_info * cb)1118 static int update_notifier_cb(int notify_id, enum notify_type type,
1119 struct notifier_cb_info *cb)
1120 {
1121 struct notifier_cb_info *cb_info = NULL;
1122 bool cb_found, is_registration = !!cb;
1123
1124 cb_info = notifier_hash_node_get(notify_id, type);
1125 cb_found = !!cb_info;
1126
1127 if (!(is_registration ^ cb_found))
1128 return -EINVAL;
1129
1130 if (is_registration) {
1131 hash_add(drv_info->notifier_hash, &cb->hnode, notify_id);
1132 } else {
1133 hash_del(&cb_info->hnode);
1134 kfree(cb_info);
1135 }
1136
1137 return 0;
1138 }
1139
ffa_notify_type_get(u16 vm_id)1140 static enum notify_type ffa_notify_type_get(u16 vm_id)
1141 {
1142 if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
1143 return SECURE_PARTITION;
1144 else
1145 return NON_SECURE_VM;
1146 }
1147
ffa_notify_relinquish(struct ffa_device * dev,int notify_id)1148 static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
1149 {
1150 int rc;
1151 enum notify_type type = ffa_notify_type_get(dev->vm_id);
1152
1153 if (ffa_notifications_disabled())
1154 return -EOPNOTSUPP;
1155
1156 if (notify_id >= FFA_MAX_NOTIFICATIONS)
1157 return -EINVAL;
1158
1159 write_lock(&drv_info->notify_lock);
1160
1161 rc = update_notifier_cb(notify_id, type, NULL);
1162 if (rc) {
1163 pr_err("Could not unregister notification callback\n");
1164 write_unlock(&drv_info->notify_lock);
1165 return rc;
1166 }
1167
1168 rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1169
1170 write_unlock(&drv_info->notify_lock);
1171
1172 return rc;
1173 }
1174
ffa_notify_request(struct ffa_device * dev,bool is_per_vcpu,ffa_notifier_cb cb,void * cb_data,int notify_id)1175 static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
1176 ffa_notifier_cb cb, void *cb_data, int notify_id)
1177 {
1178 int rc;
1179 u32 flags = 0;
1180 struct notifier_cb_info *cb_info = NULL;
1181 enum notify_type type = ffa_notify_type_get(dev->vm_id);
1182
1183 if (ffa_notifications_disabled())
1184 return -EOPNOTSUPP;
1185
1186 if (notify_id >= FFA_MAX_NOTIFICATIONS)
1187 return -EINVAL;
1188
1189 cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
1190 if (!cb_info)
1191 return -ENOMEM;
1192
1193 cb_info->type = type;
1194 cb_info->cb_data = cb_data;
1195 cb_info->cb = cb;
1196
1197 write_lock(&drv_info->notify_lock);
1198
1199 if (is_per_vcpu)
1200 flags = PER_VCPU_NOTIFICATION_FLAG;
1201
1202 rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
1203 if (rc)
1204 goto out_unlock_free;
1205
1206 rc = update_notifier_cb(notify_id, type, cb_info);
1207 if (rc) {
1208 pr_err("Failed to register callback for %d - %d\n",
1209 notify_id, rc);
1210 ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1211 }
1212
1213 out_unlock_free:
1214 write_unlock(&drv_info->notify_lock);
1215 if (rc)
1216 kfree(cb_info);
1217
1218 return rc;
1219 }
1220
ffa_notify_send(struct ffa_device * dev,int notify_id,bool is_per_vcpu,u16 vcpu)1221 static int ffa_notify_send(struct ffa_device *dev, int notify_id,
1222 bool is_per_vcpu, u16 vcpu)
1223 {
1224 u32 flags = 0;
1225
1226 if (ffa_notifications_disabled())
1227 return -EOPNOTSUPP;
1228
1229 if (is_per_vcpu)
1230 flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16);
1231
1232 return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags,
1233 BIT(notify_id));
1234 }
1235
handle_notif_callbacks(u64 bitmap,enum notify_type type)1236 static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
1237 {
1238 int notify_id;
1239 struct notifier_cb_info *cb_info = NULL;
1240
1241 for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap;
1242 notify_id++, bitmap >>= 1) {
1243 if (!(bitmap & 1))
1244 continue;
1245
1246 read_lock(&drv_info->notify_lock);
1247 cb_info = notifier_hash_node_get(notify_id, type);
1248 read_unlock(&drv_info->notify_lock);
1249
1250 if (cb_info && cb_info->cb)
1251 cb_info->cb(notify_id, cb_info->cb_data);
1252 }
1253 }
1254
notif_get_and_handle(void * unused)1255 static void notif_get_and_handle(void *unused)
1256 {
1257 int rc;
1258 struct ffa_notify_bitmaps bitmaps;
1259
1260 rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
1261 SPM_FRAMEWORK_BITMAP, &bitmaps);
1262 if (rc) {
1263 pr_err("Failed to retrieve notifications with %d!\n", rc);
1264 return;
1265 }
1266
1267 handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
1268 handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
1269 handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
1270 }
1271
1272 static void
ffa_self_notif_handle(u16 vcpu,bool is_per_vcpu,void * cb_data)1273 ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data)
1274 {
1275 struct ffa_drv_info *info = cb_data;
1276
1277 if (!is_per_vcpu)
1278 notif_get_and_handle(info);
1279 else
1280 smp_call_function_single(vcpu, notif_get_and_handle, info, 0);
1281 }
1282
notif_pcpu_irq_work_fn(struct work_struct * work)1283 static void notif_pcpu_irq_work_fn(struct work_struct *work)
1284 {
1285 struct ffa_drv_info *info = container_of(work, struct ffa_drv_info,
1286 notif_pcpu_work);
1287
1288 ffa_self_notif_handle(smp_processor_id(), true, info);
1289 }
1290
1291 static const struct ffa_info_ops ffa_drv_info_ops = {
1292 .api_version_get = ffa_api_version_get,
1293 .partition_info_get = ffa_partition_info_get,
1294 };
1295
1296 static const struct ffa_msg_ops ffa_drv_msg_ops = {
1297 .mode_32bit_set = ffa_mode_32bit_set,
1298 .sync_send_receive = ffa_sync_send_receive,
1299 .indirect_send = ffa_indirect_msg_send,
1300 .sync_send_receive2 = ffa_sync_send_receive2,
1301 };
1302
1303 static const struct ffa_mem_ops ffa_drv_mem_ops = {
1304 .memory_reclaim = ffa_memory_reclaim,
1305 .memory_share = ffa_memory_share,
1306 .memory_lend = ffa_memory_lend,
1307 };
1308
1309 static const struct ffa_cpu_ops ffa_drv_cpu_ops = {
1310 .run = ffa_run,
1311 };
1312
1313 static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
1314 .sched_recv_cb_register = ffa_sched_recv_cb_register,
1315 .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
1316 .notify_request = ffa_notify_request,
1317 .notify_relinquish = ffa_notify_relinquish,
1318 .notify_send = ffa_notify_send,
1319 };
1320
1321 static const struct ffa_ops ffa_drv_ops = {
1322 .info_ops = &ffa_drv_info_ops,
1323 .msg_ops = &ffa_drv_msg_ops,
1324 .mem_ops = &ffa_drv_mem_ops,
1325 .cpu_ops = &ffa_drv_cpu_ops,
1326 .notifier_ops = &ffa_drv_notifier_ops,
1327 };
1328
ffa_device_match_uuid(struct ffa_device * ffa_dev,const uuid_t * uuid)1329 void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
1330 {
1331 int count, idx;
1332 struct ffa_partition_info *pbuf, *tpbuf;
1333
1334 count = ffa_partition_probe(uuid, &pbuf);
1335 if (count <= 0)
1336 return;
1337
1338 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
1339 if (tpbuf->id == ffa_dev->vm_id)
1340 uuid_copy(&ffa_dev->uuid, uuid);
1341 kfree(pbuf);
1342 }
1343
1344 static int
ffa_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)1345 ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data)
1346 {
1347 struct device *dev = data;
1348 struct ffa_device *fdev = to_ffa_dev(dev);
1349
1350 if (action == BUS_NOTIFY_BIND_DRIVER) {
1351 struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
1352 const struct ffa_device_id *id_table = ffa_drv->id_table;
1353
1354 /*
1355 * FF-A v1.1 provides UUID for each partition as part of the
1356 * discovery API, the discovered UUID must be populated in the
1357 * device's UUID and there is no need to workaround by copying
1358 * the same from the driver table.
1359 */
1360 if (uuid_is_null(&fdev->uuid))
1361 ffa_device_match_uuid(fdev, &id_table->uuid);
1362
1363 return NOTIFY_OK;
1364 }
1365
1366 return NOTIFY_DONE;
1367 }
1368
1369 static struct notifier_block ffa_bus_nb = {
1370 .notifier_call = ffa_bus_notifier,
1371 };
1372
ffa_setup_partitions(void)1373 static int ffa_setup_partitions(void)
1374 {
1375 int count, idx, ret;
1376 struct ffa_device *ffa_dev;
1377 struct ffa_dev_part_info *info;
1378 struct ffa_partition_info *pbuf, *tpbuf;
1379
1380 if (drv_info->version == FFA_VERSION_1_0) {
1381 ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb);
1382 if (ret)
1383 pr_err("Failed to register FF-A bus notifiers\n");
1384 }
1385
1386 count = ffa_partition_probe(&uuid_null, &pbuf);
1387 if (count <= 0) {
1388 pr_info("%s: No partitions found, error %d\n", __func__, count);
1389 return -EINVAL;
1390 }
1391
1392 xa_init(&drv_info->partition_info);
1393 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
1394 /* Note that if the UUID will be uuid_null, that will require
1395 * ffa_bus_notifier() to find the UUID of this partition id
1396 * with help of ffa_device_match_uuid(). FF-A v1.1 and above
1397 * provides UUID here for each partition as part of the
1398 * discovery API and the same is passed.
1399 */
1400 ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops);
1401 if (!ffa_dev) {
1402 pr_err("%s: failed to register partition ID 0x%x\n",
1403 __func__, tpbuf->id);
1404 continue;
1405 }
1406
1407 if (drv_info->version > FFA_VERSION_1_0 &&
1408 !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
1409 ffa_mode_32bit_set(ffa_dev);
1410
1411 info = kzalloc(sizeof(*info), GFP_KERNEL);
1412 if (!info) {
1413 ffa_device_unregister(ffa_dev);
1414 continue;
1415 }
1416 rwlock_init(&info->rw_lock);
1417 ret = xa_insert(&drv_info->partition_info, tpbuf->id,
1418 info, GFP_KERNEL);
1419 if (ret) {
1420 pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
1421 __func__, tpbuf->id, ret);
1422 ffa_device_unregister(ffa_dev);
1423 kfree(info);
1424 }
1425 }
1426
1427 kfree(pbuf);
1428
1429 /* Check if the host is already added as part of partition info */
1430 if (xa_load(&drv_info->partition_info, drv_info->vm_id))
1431 return 0;
1432
1433 /* Allocate for the host */
1434 info = kzalloc(sizeof(*info), GFP_KERNEL);
1435 if (!info) {
1436 /* Already registered devices are freed on bus_exit */
1437 ffa_partitions_cleanup();
1438 return -ENOMEM;
1439 }
1440
1441 rwlock_init(&info->rw_lock);
1442 ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
1443 info, GFP_KERNEL);
1444 if (ret) {
1445 pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
1446 __func__, drv_info->vm_id, ret);
1447 kfree(info);
1448 /* Already registered devices are freed on bus_exit */
1449 ffa_partitions_cleanup();
1450 }
1451
1452 return ret;
1453 }
1454
ffa_partitions_cleanup(void)1455 static void ffa_partitions_cleanup(void)
1456 {
1457 struct ffa_dev_part_info *info;
1458 unsigned long idx;
1459
1460 xa_for_each(&drv_info->partition_info, idx, info) {
1461 xa_erase(&drv_info->partition_info, idx);
1462 kfree(info);
1463 }
1464
1465 xa_destroy(&drv_info->partition_info);
1466 }
1467
1468 /* FFA FEATURE IDs */
1469 #define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
1470 #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
1471 #define FFA_FEAT_MANAGED_EXIT_INT (3)
1472
ffa_sched_recv_irq_handler(int irq,void * irq_data)1473 static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data)
1474 {
1475 struct ffa_pcpu_irq *pcpu = irq_data;
1476 struct ffa_drv_info *info = pcpu->info;
1477
1478 queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work);
1479
1480 return IRQ_HANDLED;
1481 }
1482
notif_pend_irq_handler(int irq,void * irq_data)1483 static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data)
1484 {
1485 struct ffa_pcpu_irq *pcpu = irq_data;
1486 struct ffa_drv_info *info = pcpu->info;
1487
1488 queue_work_on(smp_processor_id(), info->notif_pcpu_wq,
1489 &info->notif_pcpu_work);
1490
1491 return IRQ_HANDLED;
1492 }
1493
ffa_sched_recv_irq_work_fn(struct work_struct * work)1494 static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
1495 {
1496 ffa_notification_info_get();
1497 }
1498
ffa_irq_map(u32 id)1499 static int ffa_irq_map(u32 id)
1500 {
1501 char *err_str;
1502 int ret, irq, intid;
1503
1504 if (id == FFA_FEAT_NOTIFICATION_PENDING_INT)
1505 err_str = "Notification Pending Interrupt";
1506 else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT)
1507 err_str = "Schedule Receiver Interrupt";
1508 else
1509 err_str = "Unknown ID";
1510
1511 /* The returned intid is assumed to be SGI donated to NS world */
1512 ret = ffa_features(id, 0, &intid, NULL);
1513 if (ret < 0) {
1514 if (ret != -EOPNOTSUPP)
1515 pr_err("Failed to retrieve FF-A %s %u\n", err_str, id);
1516 return ret;
1517 }
1518
1519 if (acpi_disabled) {
1520 struct of_phandle_args oirq = {};
1521 struct device_node *gic;
1522
1523 /* Only GICv3 supported currently with the device tree */
1524 gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
1525 if (!gic)
1526 return -ENXIO;
1527
1528 oirq.np = gic;
1529 oirq.args_count = 1;
1530 oirq.args[0] = intid;
1531 irq = irq_create_of_mapping(&oirq);
1532 of_node_put(gic);
1533 #ifdef CONFIG_ACPI
1534 } else {
1535 irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE,
1536 ACPI_ACTIVE_HIGH);
1537 #endif
1538 }
1539
1540 if (irq <= 0) {
1541 pr_err("Failed to create IRQ mapping!\n");
1542 return -ENODATA;
1543 }
1544
1545 return irq;
1546 }
1547
ffa_irq_unmap(unsigned int irq)1548 static void ffa_irq_unmap(unsigned int irq)
1549 {
1550 if (!irq)
1551 return;
1552 irq_dispose_mapping(irq);
1553 }
1554
ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)1555 static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
1556 {
1557 if (drv_info->sched_recv_irq)
1558 enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
1559 if (drv_info->notif_pend_irq)
1560 enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE);
1561 return 0;
1562 }
1563
ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)1564 static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
1565 {
1566 if (drv_info->sched_recv_irq)
1567 disable_percpu_irq(drv_info->sched_recv_irq);
1568 if (drv_info->notif_pend_irq)
1569 disable_percpu_irq(drv_info->notif_pend_irq);
1570 return 0;
1571 }
1572
ffa_uninit_pcpu_irq(void)1573 static void ffa_uninit_pcpu_irq(void)
1574 {
1575 if (drv_info->cpuhp_state) {
1576 cpuhp_remove_state(drv_info->cpuhp_state);
1577 drv_info->cpuhp_state = 0;
1578 }
1579
1580 if (drv_info->notif_pcpu_wq) {
1581 destroy_workqueue(drv_info->notif_pcpu_wq);
1582 drv_info->notif_pcpu_wq = NULL;
1583 }
1584
1585 if (drv_info->sched_recv_irq)
1586 free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
1587
1588 if (drv_info->notif_pend_irq)
1589 free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu);
1590
1591 if (drv_info->irq_pcpu) {
1592 free_percpu(drv_info->irq_pcpu);
1593 drv_info->irq_pcpu = NULL;
1594 }
1595 }
1596
ffa_init_pcpu_irq(void)1597 static int ffa_init_pcpu_irq(void)
1598 {
1599 struct ffa_pcpu_irq __percpu *irq_pcpu;
1600 int ret, cpu;
1601
1602 irq_pcpu = alloc_percpu(struct ffa_pcpu_irq);
1603 if (!irq_pcpu)
1604 return -ENOMEM;
1605
1606 for_each_present_cpu(cpu)
1607 per_cpu_ptr(irq_pcpu, cpu)->info = drv_info;
1608
1609 drv_info->irq_pcpu = irq_pcpu;
1610
1611 if (drv_info->sched_recv_irq) {
1612 ret = request_percpu_irq(drv_info->sched_recv_irq,
1613 ffa_sched_recv_irq_handler,
1614 "ARM-FFA-SRI", irq_pcpu);
1615 if (ret) {
1616 pr_err("Error registering percpu SRI nIRQ %d : %d\n",
1617 drv_info->sched_recv_irq, ret);
1618 drv_info->sched_recv_irq = 0;
1619 return ret;
1620 }
1621 }
1622
1623 if (drv_info->notif_pend_irq) {
1624 ret = request_percpu_irq(drv_info->notif_pend_irq,
1625 notif_pend_irq_handler,
1626 "ARM-FFA-NPI", irq_pcpu);
1627 if (ret) {
1628 pr_err("Error registering percpu NPI nIRQ %d : %d\n",
1629 drv_info->notif_pend_irq, ret);
1630 drv_info->notif_pend_irq = 0;
1631 return ret;
1632 }
1633 }
1634
1635 INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn);
1636 INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn);
1637 drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification");
1638 if (!drv_info->notif_pcpu_wq)
1639 return -EINVAL;
1640
1641 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting",
1642 ffa_cpuhp_pcpu_irq_enable,
1643 ffa_cpuhp_pcpu_irq_disable);
1644
1645 if (ret < 0)
1646 return ret;
1647
1648 drv_info->cpuhp_state = ret;
1649 return 0;
1650 }
1651
ffa_notifications_cleanup(void)1652 static void ffa_notifications_cleanup(void)
1653 {
1654 ffa_uninit_pcpu_irq();
1655 ffa_irq_unmap(drv_info->sched_recv_irq);
1656 drv_info->sched_recv_irq = 0;
1657 ffa_irq_unmap(drv_info->notif_pend_irq);
1658 drv_info->notif_pend_irq = 0;
1659
1660 if (drv_info->bitmap_created) {
1661 ffa_notification_bitmap_destroy();
1662 drv_info->bitmap_created = false;
1663 }
1664 drv_info->notif_enabled = false;
1665 }
1666
ffa_notifications_setup(void)1667 static void ffa_notifications_setup(void)
1668 {
1669 int ret;
1670
1671 ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
1672 if (!ret) {
1673 ret = ffa_notification_bitmap_create();
1674 if (ret) {
1675 pr_err("Notification bitmap create error %d\n", ret);
1676 return;
1677 }
1678
1679 drv_info->bitmap_created = true;
1680 }
1681
1682 ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT);
1683 if (ret > 0)
1684 drv_info->sched_recv_irq = ret;
1685
1686 ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT);
1687 if (ret > 0)
1688 drv_info->notif_pend_irq = ret;
1689
1690 if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq)
1691 goto cleanup;
1692
1693 ret = ffa_init_pcpu_irq();
1694 if (ret)
1695 goto cleanup;
1696
1697 hash_init(drv_info->notifier_hash);
1698 rwlock_init(&drv_info->notify_lock);
1699
1700 drv_info->notif_enabled = true;
1701 return;
1702 cleanup:
1703 pr_info("Notification setup failed %d, not enabled\n", ret);
1704 ffa_notifications_cleanup();
1705 }
1706
ffa_init(void)1707 static int __init ffa_init(void)
1708 {
1709 int ret;
1710 u32 buf_sz;
1711 size_t rxtx_bufsz = SZ_4K;
1712
1713 ret = ffa_transport_init(&invoke_ffa_fn);
1714 if (ret)
1715 return ret;
1716
1717 drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
1718 if (!drv_info)
1719 return -ENOMEM;
1720
1721 ret = ffa_version_check(&drv_info->version);
1722 if (ret)
1723 goto free_drv_info;
1724
1725 if (ffa_id_get(&drv_info->vm_id)) {
1726 pr_err("failed to obtain VM id for self\n");
1727 ret = -ENODEV;
1728 goto free_drv_info;
1729 }
1730
1731 ret = ffa_features(FFA_FN_NATIVE(RXTX_MAP), 0, &buf_sz, NULL);
1732 if (!ret) {
1733 if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 1)
1734 rxtx_bufsz = SZ_64K;
1735 else if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 2)
1736 rxtx_bufsz = SZ_16K;
1737 else
1738 rxtx_bufsz = SZ_4K;
1739 }
1740
1741 drv_info->rxtx_bufsz = rxtx_bufsz;
1742 drv_info->rx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
1743 if (!drv_info->rx_buffer) {
1744 ret = -ENOMEM;
1745 goto free_pages;
1746 }
1747
1748 drv_info->tx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
1749 if (!drv_info->tx_buffer) {
1750 ret = -ENOMEM;
1751 goto free_pages;
1752 }
1753
1754 ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
1755 virt_to_phys(drv_info->rx_buffer),
1756 rxtx_bufsz / FFA_PAGE_SIZE);
1757 if (ret) {
1758 pr_err("failed to register FFA RxTx buffers\n");
1759 goto free_pages;
1760 }
1761
1762 mutex_init(&drv_info->rx_lock);
1763 mutex_init(&drv_info->tx_lock);
1764
1765 ffa_drvinfo_flags_init();
1766
1767 ffa_notifications_setup();
1768
1769 ret = ffa_setup_partitions();
1770 if (ret) {
1771 pr_err("failed to setup partitions\n");
1772 goto cleanup_notifs;
1773 }
1774
1775 ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
1776 drv_info, true);
1777 if (ret)
1778 pr_info("Failed to register driver sched callback %d\n", ret);
1779
1780 return 0;
1781
1782 cleanup_notifs:
1783 ffa_notifications_cleanup();
1784 free_pages:
1785 if (drv_info->tx_buffer)
1786 free_pages_exact(drv_info->tx_buffer, rxtx_bufsz);
1787 free_pages_exact(drv_info->rx_buffer, rxtx_bufsz);
1788 free_drv_info:
1789 kfree(drv_info);
1790 return ret;
1791 }
1792 rootfs_initcall(ffa_init);
1793
ffa_exit(void)1794 static void __exit ffa_exit(void)
1795 {
1796 ffa_notifications_cleanup();
1797 ffa_partitions_cleanup();
1798 ffa_rxtx_unmap(drv_info->vm_id);
1799 free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz);
1800 free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz);
1801 kfree(drv_info);
1802 }
1803 module_exit(ffa_exit);
1804
1805 MODULE_ALIAS("arm-ffa");
1806 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
1807 MODULE_DESCRIPTION("Arm FF-A interface driver");
1808 MODULE_LICENSE("GPL v2");
1809