• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VFIO based Physical Subchannel device driver
4  *
5  * Copyright IBM Corp. 2017
6  *
7  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/uuid.h>
16 #include <linux/mdev.h>
17 
18 #include <asm/isc.h>
19 
20 #include "ioasm.h"
21 #include "css.h"
22 #include "vfio_ccw_private.h"
23 
24 struct workqueue_struct *vfio_ccw_work_q;
25 struct kmem_cache *vfio_ccw_io_region;
26 
27 /*
28  * Helpers
29  */
vfio_ccw_sch_quiesce(struct subchannel * sch)30 int vfio_ccw_sch_quiesce(struct subchannel *sch)
31 {
32 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
33 	DECLARE_COMPLETION_ONSTACK(completion);
34 	int iretry, ret = 0;
35 
36 	spin_lock_irq(sch->lock);
37 	if (!sch->schib.pmcw.ena)
38 		goto out_unlock;
39 	ret = cio_disable_subchannel(sch);
40 	if (ret != -EBUSY)
41 		goto out_unlock;
42 
43 	iretry = 255;
44 	do {
45 
46 		ret = cio_cancel_halt_clear(sch, &iretry);
47 
48 		if (ret == -EIO) {
49 			pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
50 			       sch->schid.ssid, sch->schid.sch_no);
51 			break;
52 		}
53 
54 		/*
55 		 * Flush all I/O and wait for
56 		 * cancel/halt/clear completion.
57 		 */
58 		private->completion = &completion;
59 		spin_unlock_irq(sch->lock);
60 
61 		if (ret == -EBUSY)
62 			wait_for_completion_timeout(&completion, 3*HZ);
63 
64 		private->completion = NULL;
65 		flush_workqueue(vfio_ccw_work_q);
66 		spin_lock_irq(sch->lock);
67 		ret = cio_disable_subchannel(sch);
68 	} while (ret == -EBUSY);
69 out_unlock:
70 	private->state = VFIO_CCW_STATE_NOT_OPER;
71 	spin_unlock_irq(sch->lock);
72 	return ret;
73 }
74 
vfio_ccw_sch_io_todo(struct work_struct * work)75 static void vfio_ccw_sch_io_todo(struct work_struct *work)
76 {
77 	struct vfio_ccw_private *private;
78 	struct irb *irb;
79 	bool is_final;
80 
81 	private = container_of(work, struct vfio_ccw_private, io_work);
82 	irb = &private->irb;
83 
84 	is_final = !(scsw_actl(&irb->scsw) &
85 		     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
86 	if (scsw_is_solicited(&irb->scsw)) {
87 		cp_update_scsw(&private->cp, &irb->scsw);
88 		if (is_final)
89 			cp_free(&private->cp);
90 	}
91 	memcpy(private->io_region->irb_area, irb, sizeof(*irb));
92 
93 	if (private->io_trigger)
94 		eventfd_signal(private->io_trigger, 1);
95 
96 	if (private->mdev && is_final)
97 		private->state = VFIO_CCW_STATE_IDLE;
98 }
99 
100 /*
101  * Css driver callbacks
102  */
vfio_ccw_sch_irq(struct subchannel * sch)103 static void vfio_ccw_sch_irq(struct subchannel *sch)
104 {
105 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
106 
107 	inc_irq_stat(IRQIO_CIO);
108 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
109 }
110 
vfio_ccw_sch_probe(struct subchannel * sch)111 static int vfio_ccw_sch_probe(struct subchannel *sch)
112 {
113 	struct pmcw *pmcw = &sch->schib.pmcw;
114 	struct vfio_ccw_private *private;
115 	int ret;
116 
117 	if (pmcw->qf) {
118 		dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
119 			 dev_name(&sch->dev));
120 		return -ENODEV;
121 	}
122 
123 	private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
124 	if (!private)
125 		return -ENOMEM;
126 
127 	private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
128 					       GFP_KERNEL | GFP_DMA);
129 	if (!private->io_region) {
130 		kfree(private);
131 		return -ENOMEM;
132 	}
133 
134 	private->sch = sch;
135 	dev_set_drvdata(&sch->dev, private);
136 
137 	spin_lock_irq(sch->lock);
138 	private->state = VFIO_CCW_STATE_NOT_OPER;
139 	sch->isc = VFIO_CCW_ISC;
140 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
141 	spin_unlock_irq(sch->lock);
142 	if (ret)
143 		goto out_free;
144 
145 	ret = vfio_ccw_mdev_reg(sch);
146 	if (ret)
147 		goto out_disable;
148 
149 	INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
150 	atomic_set(&private->avail, 1);
151 	private->state = VFIO_CCW_STATE_STANDBY;
152 
153 	return 0;
154 
155 out_disable:
156 	cio_disable_subchannel(sch);
157 out_free:
158 	dev_set_drvdata(&sch->dev, NULL);
159 	kmem_cache_free(vfio_ccw_io_region, private->io_region);
160 	kfree(private);
161 	return ret;
162 }
163 
vfio_ccw_sch_remove(struct subchannel * sch)164 static int vfio_ccw_sch_remove(struct subchannel *sch)
165 {
166 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
167 
168 	vfio_ccw_sch_quiesce(sch);
169 
170 	vfio_ccw_mdev_unreg(sch);
171 
172 	dev_set_drvdata(&sch->dev, NULL);
173 
174 	kmem_cache_free(vfio_ccw_io_region, private->io_region);
175 	kfree(private);
176 
177 	return 0;
178 }
179 
vfio_ccw_sch_shutdown(struct subchannel * sch)180 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
181 {
182 	vfio_ccw_sch_quiesce(sch);
183 }
184 
185 /**
186  * vfio_ccw_sch_event - process subchannel event
187  * @sch: subchannel
188  * @process: non-zero if function is called in process context
189  *
190  * An unspecified event occurred for this subchannel. Adjust data according
191  * to the current operational state of the subchannel. Return zero when the
192  * event has been handled sufficiently or -EAGAIN when this function should
193  * be called again in process context.
194  */
vfio_ccw_sch_event(struct subchannel * sch,int process)195 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
196 {
197 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
198 	unsigned long flags;
199 	int rc = -EAGAIN;
200 
201 	spin_lock_irqsave(sch->lock, flags);
202 	if (!device_is_registered(&sch->dev))
203 		goto out_unlock;
204 
205 	if (work_pending(&sch->todo_work))
206 		goto out_unlock;
207 
208 	if (cio_update_schib(sch)) {
209 		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
210 		rc = 0;
211 		goto out_unlock;
212 	}
213 
214 	private = dev_get_drvdata(&sch->dev);
215 	if (private->state == VFIO_CCW_STATE_NOT_OPER) {
216 		private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
217 				 VFIO_CCW_STATE_STANDBY;
218 	}
219 	rc = 0;
220 
221 out_unlock:
222 	spin_unlock_irqrestore(sch->lock, flags);
223 
224 	return rc;
225 }
226 
227 static struct css_device_id vfio_ccw_sch_ids[] = {
228 	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
229 	{ /* end of list */ },
230 };
231 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
232 
233 static struct css_driver vfio_ccw_sch_driver = {
234 	.drv = {
235 		.name = "vfio_ccw",
236 		.owner = THIS_MODULE,
237 	},
238 	.subchannel_type = vfio_ccw_sch_ids,
239 	.irq = vfio_ccw_sch_irq,
240 	.probe = vfio_ccw_sch_probe,
241 	.remove = vfio_ccw_sch_remove,
242 	.shutdown = vfio_ccw_sch_shutdown,
243 	.sch_event = vfio_ccw_sch_event,
244 };
245 
vfio_ccw_sch_init(void)246 static int __init vfio_ccw_sch_init(void)
247 {
248 	int ret;
249 
250 	vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
251 	if (!vfio_ccw_work_q)
252 		return -ENOMEM;
253 
254 	vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
255 					sizeof(struct ccw_io_region), 0,
256 					SLAB_ACCOUNT, 0,
257 					sizeof(struct ccw_io_region), NULL);
258 	if (!vfio_ccw_io_region) {
259 		destroy_workqueue(vfio_ccw_work_q);
260 		return -ENOMEM;
261 	}
262 
263 	isc_register(VFIO_CCW_ISC);
264 	ret = css_driver_register(&vfio_ccw_sch_driver);
265 	if (ret) {
266 		isc_unregister(VFIO_CCW_ISC);
267 		kmem_cache_destroy(vfio_ccw_io_region);
268 		destroy_workqueue(vfio_ccw_work_q);
269 	}
270 
271 	return ret;
272 }
273 
vfio_ccw_sch_exit(void)274 static void __exit vfio_ccw_sch_exit(void)
275 {
276 	css_driver_unregister(&vfio_ccw_sch_driver);
277 	isc_unregister(VFIO_CCW_ISC);
278 	kmem_cache_destroy(vfio_ccw_io_region);
279 	destroy_workqueue(vfio_ccw_work_q);
280 }
281 module_init(vfio_ccw_sch_init);
282 module_exit(vfio_ccw_sch_exit);
283 
284 MODULE_LICENSE("GPL v2");
285