• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for the Intel SCU IPC mechanism
4  *
5  * (C) Copyright 2008-2010,2015 Intel Corporation
6  * Author: Sreedhara DS (sreedhara.ds@intel.com)
7  *
8  * SCU running in ARC processor communicates with other entity running in IA
9  * core through IPC mechanism which in turn messaging between IA core ad SCU.
10  * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
11  * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
12  * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
13  * along with other APIs.
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/device.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 
26 #include <asm/intel_scu_ipc.h>
27 
28 /* IPC defines the following message types */
29 #define IPCMSG_PCNTRL         0xff /* Power controller unit read/write */
30 
31 /* Command id associated with message IPCMSG_PCNTRL */
32 #define IPC_CMD_PCNTRL_W      0 /* Register write */
33 #define IPC_CMD_PCNTRL_R      1 /* Register read */
34 #define IPC_CMD_PCNTRL_M      2 /* Register read-modify-write */
35 
36 /*
37  * IPC register summary
38  *
39  * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
40  * To read or write information to the SCU, driver writes to IPC-1 memory
41  * mapped registers. The following is the IPC mechanism
42  *
43  * 1. IA core cDMI interface claims this transaction and converts it to a
44  *    Transaction Layer Packet (TLP) message which is sent across the cDMI.
45  *
46  * 2. South Complex cDMI block receives this message and writes it to
47  *    the IPC-1 register block, causing an interrupt to the SCU
48  *
49  * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
50  *    message handler is called within firmware.
51  */
52 
53 #define IPC_WWBUF_SIZE    20		/* IPC Write buffer Size */
54 #define IPC_RWBUF_SIZE    20		/* IPC Read buffer Size */
55 #define IPC_IOC	          0x100		/* IPC command register IOC bit */
56 
57 struct intel_scu_ipc_dev {
58 	struct device dev;
59 	struct resource mem;
60 	struct module *owner;
61 	int irq;
62 	void __iomem *ipc_base;
63 	struct completion cmd_complete;
64 };
65 
66 #define IPC_STATUS		0x04
67 #define IPC_STATUS_IRQ		BIT(2)
68 #define IPC_STATUS_ERR		BIT(1)
69 #define IPC_STATUS_BUSY		BIT(0)
70 
71 /*
72  * IPC Write/Read Buffers:
73  * 16 byte buffer for sending and receiving data to and from SCU.
74  */
75 #define IPC_WRITE_BUFFER	0x80
76 #define IPC_READ_BUFFER		0x90
77 
78 /* Timeout in jiffies */
79 #define IPC_TIMEOUT		(3 * HZ)
80 
81 static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
82 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
83 
84 static struct class intel_scu_ipc_class = {
85 	.name = "intel_scu_ipc",
86 	.owner = THIS_MODULE,
87 };
88 
89 /**
90  * intel_scu_ipc_dev_get() - Get SCU IPC instance
91  *
92  * The recommended new API takes SCU IPC instance as parameter and this
93  * function can be called by driver to get the instance. This also makes
94  * sure the driver providing the IPC functionality cannot be unloaded
95  * while the caller has the instance.
96  *
97  * Call intel_scu_ipc_dev_put() to release the instance.
98  *
99  * Returns %NULL if SCU IPC is not currently available.
100  */
intel_scu_ipc_dev_get(void)101 struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
102 {
103 	struct intel_scu_ipc_dev *scu = NULL;
104 
105 	mutex_lock(&ipclock);
106 	if (ipcdev) {
107 		get_device(&ipcdev->dev);
108 		/*
109 		 * Prevent the IPC provider from being unloaded while it
110 		 * is being used.
111 		 */
112 		if (!try_module_get(ipcdev->owner))
113 			put_device(&ipcdev->dev);
114 		else
115 			scu = ipcdev;
116 	}
117 
118 	mutex_unlock(&ipclock);
119 	return scu;
120 }
121 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
122 
123 /**
124  * intel_scu_ipc_dev_put() - Put SCU IPC instance
125  * @scu: SCU IPC instance
126  *
127  * This function releases the SCU IPC instance retrieved from
128  * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
129  * unloaded.
130  */
intel_scu_ipc_dev_put(struct intel_scu_ipc_dev * scu)131 void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
132 {
133 	if (scu) {
134 		module_put(scu->owner);
135 		put_device(&scu->dev);
136 	}
137 }
138 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
139 
140 struct intel_scu_ipc_devres {
141 	struct intel_scu_ipc_dev *scu;
142 };
143 
devm_intel_scu_ipc_dev_release(struct device * dev,void * res)144 static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
145 {
146 	struct intel_scu_ipc_devres *dr = res;
147 	struct intel_scu_ipc_dev *scu = dr->scu;
148 
149 	intel_scu_ipc_dev_put(scu);
150 }
151 
152 /**
153  * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
154  * @dev: Device requesting the SCU IPC device
155  *
156  * The recommended new API takes SCU IPC instance as parameter and this
157  * function can be called by driver to get the instance. This also makes
158  * sure the driver providing the IPC functionality cannot be unloaded
159  * while the caller has the instance.
160  *
161  * Returns %NULL if SCU IPC is not currently available.
162  */
devm_intel_scu_ipc_dev_get(struct device * dev)163 struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
164 {
165 	struct intel_scu_ipc_devres *dr;
166 	struct intel_scu_ipc_dev *scu;
167 
168 	dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
169 	if (!dr)
170 		return NULL;
171 
172 	scu = intel_scu_ipc_dev_get();
173 	if (!scu) {
174 		devres_free(dr);
175 		return NULL;
176 	}
177 
178 	dr->scu = scu;
179 	devres_add(dev, dr);
180 
181 	return scu;
182 }
183 EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
184 
185 /*
186  * Send ipc command
187  * Command Register (Write Only):
188  * A write to this register results in an interrupt to the SCU core processor
189  * Format:
190  * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
191  */
ipc_command(struct intel_scu_ipc_dev * scu,u32 cmd)192 static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
193 {
194 	reinit_completion(&scu->cmd_complete);
195 	writel(cmd | IPC_IOC, scu->ipc_base);
196 }
197 
198 /*
199  * Write ipc data
200  * IPC Write Buffer (Write Only):
201  * 16-byte buffer for sending data associated with IPC command to
202  * SCU. Size of the data is specified in the IPC_COMMAND_REG register
203  */
ipc_data_writel(struct intel_scu_ipc_dev * scu,u32 data,u32 offset)204 static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
205 {
206 	writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
207 }
208 
209 /*
210  * Status Register (Read Only):
211  * Driver will read this register to get the ready/busy status of the IPC
212  * block and error status of the IPC command that was just processed by SCU
213  * Format:
214  * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
215  */
ipc_read_status(struct intel_scu_ipc_dev * scu)216 static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
217 {
218 	return __raw_readl(scu->ipc_base + IPC_STATUS);
219 }
220 
221 /* Read ipc byte data */
ipc_data_readb(struct intel_scu_ipc_dev * scu,u32 offset)222 static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
223 {
224 	return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
225 }
226 
227 /* Read ipc u32 data */
ipc_data_readl(struct intel_scu_ipc_dev * scu,u32 offset)228 static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
229 {
230 	return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
231 }
232 
233 /* Wait till scu status is busy */
busy_loop(struct intel_scu_ipc_dev * scu)234 static inline int busy_loop(struct intel_scu_ipc_dev *scu)
235 {
236 	u8 status;
237 	int err;
238 
239 	err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
240 				 100, jiffies_to_usecs(IPC_TIMEOUT));
241 	if (err)
242 		return err;
243 
244 	return (status & IPC_STATUS_ERR) ? -EIO : 0;
245 }
246 
247 /* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
ipc_wait_for_interrupt(struct intel_scu_ipc_dev * scu)248 static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
249 {
250 	int status;
251 
252 	wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
253 
254 	status = ipc_read_status(scu);
255 	if (status & IPC_STATUS_BUSY)
256 		return -ETIMEDOUT;
257 
258 	if (status & IPC_STATUS_ERR)
259 		return -EIO;
260 
261 	return 0;
262 }
263 
intel_scu_ipc_check_status(struct intel_scu_ipc_dev * scu)264 static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
265 {
266 	return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
267 }
268 
intel_scu_ipc_get(struct intel_scu_ipc_dev * scu)269 static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
270 {
271 	u8 status;
272 
273 	if (!scu)
274 		scu = ipcdev;
275 	if (!scu)
276 		return ERR_PTR(-ENODEV);
277 
278 	status = ipc_read_status(scu);
279 	if (status & IPC_STATUS_BUSY) {
280 		dev_dbg(&scu->dev, "device is busy\n");
281 		return ERR_PTR(-EBUSY);
282 	}
283 
284 	return scu;
285 }
286 
287 /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
pwr_reg_rdwr(struct intel_scu_ipc_dev * scu,u16 * addr,u8 * data,u32 count,u32 op,u32 id)288 static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
289 			u32 count, u32 op, u32 id)
290 {
291 	int nc;
292 	u32 offset = 0;
293 	int err;
294 	u8 cbuf[IPC_WWBUF_SIZE];
295 	u32 *wbuf = (u32 *)&cbuf;
296 
297 	memset(cbuf, 0, sizeof(cbuf));
298 
299 	mutex_lock(&ipclock);
300 	scu = intel_scu_ipc_get(scu);
301 	if (IS_ERR(scu)) {
302 		mutex_unlock(&ipclock);
303 		return PTR_ERR(scu);
304 	}
305 
306 	for (nc = 0; nc < count; nc++, offset += 2) {
307 		cbuf[offset] = addr[nc];
308 		cbuf[offset + 1] = addr[nc] >> 8;
309 	}
310 
311 	if (id == IPC_CMD_PCNTRL_R) {
312 		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
313 			ipc_data_writel(scu, wbuf[nc], offset);
314 		ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
315 	} else if (id == IPC_CMD_PCNTRL_W) {
316 		for (nc = 0; nc < count; nc++, offset += 1)
317 			cbuf[offset] = data[nc];
318 		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
319 			ipc_data_writel(scu, wbuf[nc], offset);
320 		ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
321 	} else if (id == IPC_CMD_PCNTRL_M) {
322 		cbuf[offset] = data[0];
323 		cbuf[offset + 1] = data[1];
324 		ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
325 		ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
326 	}
327 
328 	err = intel_scu_ipc_check_status(scu);
329 	if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
330 		/* Workaround: values are read as 0 without memcpy_fromio */
331 		memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
332 		for (nc = 0; nc < count; nc++)
333 			data[nc] = ipc_data_readb(scu, nc);
334 	}
335 	mutex_unlock(&ipclock);
336 	return err;
337 }
338 
339 /**
340  * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
341  * @scu: Optional SCU IPC instance
342  * @addr: Register on SCU
343  * @data: Return pointer for read byte
344  *
345  * Read a single register. Returns %0 on success or an error code. All
346  * locking between SCU accesses is handled for the caller.
347  *
348  * This function may sleep.
349  */
intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev * scu,u16 addr,u8 * data)350 int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
351 {
352 	return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
353 }
354 EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
355 
356 /**
357  * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
358  * @scu: Optional SCU IPC instance
359  * @addr: Register on SCU
360  * @data: Byte to write
361  *
362  * Write a single register. Returns %0 on success or an error code. All
363  * locking between SCU accesses is handled for the caller.
364  *
365  * This function may sleep.
366  */
intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev * scu,u16 addr,u8 data)367 int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
368 {
369 	return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
370 }
371 EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
372 
373 /**
374  * intel_scu_ipc_dev_readv() - Read a set of registers
375  * @scu: Optional SCU IPC instance
376  * @addr: Register list
377  * @data: Bytes to return
378  * @len: Length of array
379  *
380  * Read registers. Returns %0 on success or an error code. All locking
381  * between SCU accesses is handled for the caller.
382  *
383  * The largest array length permitted by the hardware is 5 items.
384  *
385  * This function may sleep.
386  */
intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev * scu,u16 * addr,u8 * data,size_t len)387 int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
388 			    size_t len)
389 {
390 	return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
391 }
392 EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
393 
394 /**
395  * intel_scu_ipc_dev_writev() - Write a set of registers
396  * @scu: Optional SCU IPC instance
397  * @addr: Register list
398  * @data: Bytes to write
399  * @len: Length of array
400  *
401  * Write registers. Returns %0 on success or an error code. All locking
402  * between SCU accesses is handled for the caller.
403  *
404  * The largest array length permitted by the hardware is 5 items.
405  *
406  * This function may sleep.
407  */
intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev * scu,u16 * addr,u8 * data,size_t len)408 int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
409 			     size_t len)
410 {
411 	return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
412 }
413 EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
414 
415 /**
416  * intel_scu_ipc_dev_update() - Update a register
417  * @scu: Optional SCU IPC instance
418  * @addr: Register address
419  * @data: Bits to update
420  * @mask: Mask of bits to update
421  *
422  * Read-modify-write power control unit register. The first data argument
423  * must be register value and second is mask value mask is a bitmap that
424  * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
425  * modify this bit. returns %0 on success or an error code.
426  *
427  * This function may sleep. Locking between SCU accesses is handled
428  * for the caller.
429  */
intel_scu_ipc_dev_update(struct intel_scu_ipc_dev * scu,u16 addr,u8 data,u8 mask)430 int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
431 			     u8 mask)
432 {
433 	u8 tmp[2] = { data, mask };
434 	return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
435 }
436 EXPORT_SYMBOL(intel_scu_ipc_dev_update);
437 
438 /**
439  * intel_scu_ipc_dev_simple_command() - Send a simple command
440  * @scu: Optional SCU IPC instance
441  * @cmd: Command
442  * @sub: Sub type
443  *
444  * Issue a simple command to the SCU. Do not use this interface if you must
445  * then access data as any data values may be overwritten by another SCU
446  * access by the time this function returns.
447  *
448  * This function may sleep. Locking for SCU accesses is handled for the
449  * caller.
450  */
intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev * scu,int cmd,int sub)451 int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
452 				     int sub)
453 {
454 	u32 cmdval;
455 	int err;
456 
457 	mutex_lock(&ipclock);
458 	scu = intel_scu_ipc_get(scu);
459 	if (IS_ERR(scu)) {
460 		mutex_unlock(&ipclock);
461 		return PTR_ERR(scu);
462 	}
463 
464 	cmdval = sub << 12 | cmd;
465 	ipc_command(scu, cmdval);
466 	err = intel_scu_ipc_check_status(scu);
467 	mutex_unlock(&ipclock);
468 	if (err)
469 		dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
470 	return err;
471 }
472 EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
473 
474 /**
475  * intel_scu_ipc_command_with_size() - Command with data
476  * @scu: Optional SCU IPC instance
477  * @cmd: Command
478  * @sub: Sub type
479  * @in: Input data
480  * @inlen: Input length in bytes
481  * @size: Input size written to the IPC command register in whatever
482  *	  units (dword, byte) the particular firmware requires. Normally
483  *	  should be the same as @inlen.
484  * @out: Output data
485  * @outlen: Output length in bytes
486  *
487  * Issue a command to the SCU which involves data transfers. Do the
488  * data copies under the lock but leave it for the caller to interpret.
489  */
intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev * scu,int cmd,int sub,const void * in,size_t inlen,size_t size,void * out,size_t outlen)490 int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
491 					int sub, const void *in, size_t inlen,
492 					size_t size, void *out, size_t outlen)
493 {
494 	size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
495 	size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
496 	u32 cmdval, inbuf[4] = {};
497 	int i, err;
498 
499 	if (inbuflen > 4 || outbuflen > 4)
500 		return -EINVAL;
501 
502 	mutex_lock(&ipclock);
503 	scu = intel_scu_ipc_get(scu);
504 	if (IS_ERR(scu)) {
505 		mutex_unlock(&ipclock);
506 		return PTR_ERR(scu);
507 	}
508 
509 	memcpy(inbuf, in, inlen);
510 	for (i = 0; i < inbuflen; i++)
511 		ipc_data_writel(scu, inbuf[i], 4 * i);
512 
513 	cmdval = (size << 16) | (sub << 12) | cmd;
514 	ipc_command(scu, cmdval);
515 	err = intel_scu_ipc_check_status(scu);
516 
517 	if (!err) {
518 		u32 outbuf[4] = {};
519 
520 		for (i = 0; i < outbuflen; i++)
521 			outbuf[i] = ipc_data_readl(scu, 4 * i);
522 
523 		memcpy(out, outbuf, outlen);
524 	}
525 
526 	mutex_unlock(&ipclock);
527 	if (err)
528 		dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
529 	return err;
530 }
531 EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
532 
533 /*
534  * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
535  * When ioc bit is set to 1, caller api must wait for interrupt handler called
536  * which in turn unlocks the caller api. Currently this is not used
537  *
538  * This is edge triggered so we need take no action to clear anything
539  */
ioc(int irq,void * dev_id)540 static irqreturn_t ioc(int irq, void *dev_id)
541 {
542 	struct intel_scu_ipc_dev *scu = dev_id;
543 	int status = ipc_read_status(scu);
544 
545 	writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
546 	complete(&scu->cmd_complete);
547 
548 	return IRQ_HANDLED;
549 }
550 
intel_scu_ipc_release(struct device * dev)551 static void intel_scu_ipc_release(struct device *dev)
552 {
553 	struct intel_scu_ipc_dev *scu;
554 
555 	scu = container_of(dev, struct intel_scu_ipc_dev, dev);
556 	if (scu->irq > 0)
557 		free_irq(scu->irq, scu);
558 	iounmap(scu->ipc_base);
559 	release_mem_region(scu->mem.start, resource_size(&scu->mem));
560 	kfree(scu);
561 }
562 
563 /**
564  * __intel_scu_ipc_register() - Register SCU IPC device
565  * @parent: Parent device
566  * @scu_data: Data used to configure SCU IPC
567  * @owner: Module registering the SCU IPC device
568  *
569  * Call this function to register SCU IPC mechanism under @parent.
570  * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
571  * failure. The caller may use the returned instance if it needs to do
572  * SCU IPC calls itself.
573  */
574 struct intel_scu_ipc_dev *
__intel_scu_ipc_register(struct device * parent,const struct intel_scu_ipc_data * scu_data,struct module * owner)575 __intel_scu_ipc_register(struct device *parent,
576 			 const struct intel_scu_ipc_data *scu_data,
577 			 struct module *owner)
578 {
579 	int err;
580 	struct intel_scu_ipc_dev *scu;
581 	void __iomem *ipc_base;
582 
583 	mutex_lock(&ipclock);
584 	/* We support only one IPC */
585 	if (ipcdev) {
586 		err = -EBUSY;
587 		goto err_unlock;
588 	}
589 
590 	scu = kzalloc(sizeof(*scu), GFP_KERNEL);
591 	if (!scu) {
592 		err = -ENOMEM;
593 		goto err_unlock;
594 	}
595 
596 	scu->owner = owner;
597 	scu->dev.parent = parent;
598 	scu->dev.class = &intel_scu_ipc_class;
599 	scu->dev.release = intel_scu_ipc_release;
600 
601 	if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
602 				"intel_scu_ipc")) {
603 		err = -EBUSY;
604 		goto err_free;
605 	}
606 
607 	ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
608 	if (!ipc_base) {
609 		err = -ENOMEM;
610 		goto err_release;
611 	}
612 
613 	scu->ipc_base = ipc_base;
614 	scu->mem = scu_data->mem;
615 	scu->irq = scu_data->irq;
616 	init_completion(&scu->cmd_complete);
617 
618 	if (scu->irq > 0) {
619 		err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
620 		if (err)
621 			goto err_unmap;
622 	}
623 
624 	/*
625 	 * After this point intel_scu_ipc_release() takes care of
626 	 * releasing the SCU IPC resources once refcount drops to zero.
627 	 */
628 	dev_set_name(&scu->dev, "intel_scu_ipc");
629 	err = device_register(&scu->dev);
630 	if (err) {
631 		put_device(&scu->dev);
632 		goto err_unlock;
633 	}
634 
635 	/* Assign device at last */
636 	ipcdev = scu;
637 	mutex_unlock(&ipclock);
638 
639 	return scu;
640 
641 err_unmap:
642 	iounmap(ipc_base);
643 err_release:
644 	release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
645 err_free:
646 	kfree(scu);
647 err_unlock:
648 	mutex_unlock(&ipclock);
649 
650 	return ERR_PTR(err);
651 }
652 EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
653 
654 /**
655  * intel_scu_ipc_unregister() - Unregister SCU IPC
656  * @scu: SCU IPC handle
657  *
658  * This unregisters the SCU IPC device and releases the acquired
659  * resources once the refcount goes to zero.
660  */
intel_scu_ipc_unregister(struct intel_scu_ipc_dev * scu)661 void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
662 {
663 	mutex_lock(&ipclock);
664 	if (!WARN_ON(!ipcdev)) {
665 		ipcdev = NULL;
666 		device_unregister(&scu->dev);
667 	}
668 	mutex_unlock(&ipclock);
669 }
670 EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
671 
devm_intel_scu_ipc_unregister(struct device * dev,void * res)672 static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
673 {
674 	struct intel_scu_ipc_devres *dr = res;
675 	struct intel_scu_ipc_dev *scu = dr->scu;
676 
677 	intel_scu_ipc_unregister(scu);
678 }
679 
680 /**
681  * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
682  * @parent: Parent device
683  * @scu_data: Data used to configure SCU IPC
684  * @owner: Module registering the SCU IPC device
685  *
686  * Call this function to register managed SCU IPC mechanism under
687  * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
688  * case of failure. The caller may use the returned instance if it needs
689  * to do SCU IPC calls itself.
690  */
691 struct intel_scu_ipc_dev *
__devm_intel_scu_ipc_register(struct device * parent,const struct intel_scu_ipc_data * scu_data,struct module * owner)692 __devm_intel_scu_ipc_register(struct device *parent,
693 			      const struct intel_scu_ipc_data *scu_data,
694 			      struct module *owner)
695 {
696 	struct intel_scu_ipc_devres *dr;
697 	struct intel_scu_ipc_dev *scu;
698 
699 	dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
700 	if (!dr)
701 		return NULL;
702 
703 	scu = __intel_scu_ipc_register(parent, scu_data, owner);
704 	if (IS_ERR(scu)) {
705 		devres_free(dr);
706 		return scu;
707 	}
708 
709 	dr->scu = scu;
710 	devres_add(parent, dr);
711 
712 	return scu;
713 }
714 EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
715 
intel_scu_ipc_init(void)716 static int __init intel_scu_ipc_init(void)
717 {
718 	return class_register(&intel_scu_ipc_class);
719 }
720 subsys_initcall(intel_scu_ipc_init);
721 
intel_scu_ipc_exit(void)722 static void __exit intel_scu_ipc_exit(void)
723 {
724 	class_unregister(&intel_scu_ipc_class);
725 }
726 module_exit(intel_scu_ipc_exit);
727