• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * VFIO based Physical Subchannel device driver
3  *
4  * Copyright IBM Corp. 2017
5  *
6  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
7  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/slab.h>
14 #include <linux/uuid.h>
15 #include <linux/mdev.h>
16 
17 #include <asm/isc.h>
18 
19 #include "ioasm.h"
20 #include "css.h"
21 #include "vfio_ccw_private.h"
22 
23 struct workqueue_struct *vfio_ccw_work_q;
24 
25 /*
26  * Helpers
27  */
vfio_ccw_sch_quiesce(struct subchannel * sch)28 int vfio_ccw_sch_quiesce(struct subchannel *sch)
29 {
30 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
31 	DECLARE_COMPLETION_ONSTACK(completion);
32 	int iretry, ret = 0;
33 
34 	spin_lock_irq(sch->lock);
35 	if (!sch->schib.pmcw.ena)
36 		goto out_unlock;
37 	ret = cio_disable_subchannel(sch);
38 	if (ret != -EBUSY)
39 		goto out_unlock;
40 
41 	iretry = 255;
42 	do {
43 
44 		ret = cio_cancel_halt_clear(sch, &iretry);
45 
46 		if (ret == -EIO) {
47 			pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
48 			       sch->schid.ssid, sch->schid.sch_no);
49 			break;
50 		}
51 
52 		/*
53 		 * Flush all I/O and wait for
54 		 * cancel/halt/clear completion.
55 		 */
56 		private->completion = &completion;
57 		spin_unlock_irq(sch->lock);
58 
59 		if (ret == -EBUSY)
60 			wait_for_completion_timeout(&completion, 3*HZ);
61 
62 		private->completion = NULL;
63 		flush_workqueue(vfio_ccw_work_q);
64 		spin_lock_irq(sch->lock);
65 		ret = cio_disable_subchannel(sch);
66 	} while (ret == -EBUSY);
67 out_unlock:
68 	private->state = VFIO_CCW_STATE_NOT_OPER;
69 	spin_unlock_irq(sch->lock);
70 	return ret;
71 }
72 
vfio_ccw_sch_io_todo(struct work_struct * work)73 static void vfio_ccw_sch_io_todo(struct work_struct *work)
74 {
75 	struct vfio_ccw_private *private;
76 	struct irb *irb;
77 	bool is_final;
78 
79 	private = container_of(work, struct vfio_ccw_private, io_work);
80 	irb = &private->irb;
81 
82 	is_final = !(scsw_actl(&irb->scsw) &
83 		     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
84 	if (scsw_is_solicited(&irb->scsw)) {
85 		cp_update_scsw(&private->cp, &irb->scsw);
86 		if (is_final)
87 			cp_free(&private->cp);
88 	}
89 	memcpy(private->io_region.irb_area, irb, sizeof(*irb));
90 
91 	if (private->io_trigger)
92 		eventfd_signal(private->io_trigger, 1);
93 
94 	if (private->mdev && is_final)
95 		private->state = VFIO_CCW_STATE_IDLE;
96 }
97 
98 /*
99  * Css driver callbacks
100  */
vfio_ccw_sch_irq(struct subchannel * sch)101 static void vfio_ccw_sch_irq(struct subchannel *sch)
102 {
103 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
104 
105 	inc_irq_stat(IRQIO_CIO);
106 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
107 }
108 
vfio_ccw_sch_probe(struct subchannel * sch)109 static int vfio_ccw_sch_probe(struct subchannel *sch)
110 {
111 	struct pmcw *pmcw = &sch->schib.pmcw;
112 	struct vfio_ccw_private *private;
113 	int ret;
114 
115 	if (pmcw->qf) {
116 		dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
117 			 dev_name(&sch->dev));
118 		return -ENODEV;
119 	}
120 
121 	private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
122 	if (!private)
123 		return -ENOMEM;
124 	private->sch = sch;
125 	dev_set_drvdata(&sch->dev, private);
126 
127 	spin_lock_irq(sch->lock);
128 	private->state = VFIO_CCW_STATE_NOT_OPER;
129 	sch->isc = VFIO_CCW_ISC;
130 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
131 	spin_unlock_irq(sch->lock);
132 	if (ret)
133 		goto out_free;
134 
135 	ret = vfio_ccw_mdev_reg(sch);
136 	if (ret)
137 		goto out_disable;
138 
139 	INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
140 	atomic_set(&private->avail, 1);
141 	private->state = VFIO_CCW_STATE_STANDBY;
142 
143 	return 0;
144 
145 out_disable:
146 	cio_disable_subchannel(sch);
147 out_free:
148 	dev_set_drvdata(&sch->dev, NULL);
149 	kfree(private);
150 	return ret;
151 }
152 
vfio_ccw_sch_remove(struct subchannel * sch)153 static int vfio_ccw_sch_remove(struct subchannel *sch)
154 {
155 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
156 
157 	vfio_ccw_sch_quiesce(sch);
158 
159 	vfio_ccw_mdev_unreg(sch);
160 
161 	dev_set_drvdata(&sch->dev, NULL);
162 
163 	kfree(private);
164 
165 	return 0;
166 }
167 
vfio_ccw_sch_shutdown(struct subchannel * sch)168 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
169 {
170 	vfio_ccw_sch_quiesce(sch);
171 }
172 
173 /**
174  * vfio_ccw_sch_event - process subchannel event
175  * @sch: subchannel
176  * @process: non-zero if function is called in process context
177  *
178  * An unspecified event occurred for this subchannel. Adjust data according
179  * to the current operational state of the subchannel. Return zero when the
180  * event has been handled sufficiently or -EAGAIN when this function should
181  * be called again in process context.
182  */
vfio_ccw_sch_event(struct subchannel * sch,int process)183 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
184 {
185 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
186 	unsigned long flags;
187 	int rc = -EAGAIN;
188 
189 	spin_lock_irqsave(sch->lock, flags);
190 	if (!device_is_registered(&sch->dev))
191 		goto out_unlock;
192 
193 	if (work_pending(&sch->todo_work))
194 		goto out_unlock;
195 
196 	if (cio_update_schib(sch)) {
197 		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
198 		rc = 0;
199 		goto out_unlock;
200 	}
201 
202 	private = dev_get_drvdata(&sch->dev);
203 	if (private->state == VFIO_CCW_STATE_NOT_OPER) {
204 		private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
205 				 VFIO_CCW_STATE_STANDBY;
206 	}
207 	rc = 0;
208 
209 out_unlock:
210 	spin_unlock_irqrestore(sch->lock, flags);
211 
212 	return rc;
213 }
214 
215 static struct css_device_id vfio_ccw_sch_ids[] = {
216 	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
217 	{ /* end of list */ },
218 };
219 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
220 
221 static struct css_driver vfio_ccw_sch_driver = {
222 	.drv = {
223 		.name = "vfio_ccw",
224 		.owner = THIS_MODULE,
225 	},
226 	.subchannel_type = vfio_ccw_sch_ids,
227 	.irq = vfio_ccw_sch_irq,
228 	.probe = vfio_ccw_sch_probe,
229 	.remove = vfio_ccw_sch_remove,
230 	.shutdown = vfio_ccw_sch_shutdown,
231 	.sch_event = vfio_ccw_sch_event,
232 };
233 
vfio_ccw_sch_init(void)234 static int __init vfio_ccw_sch_init(void)
235 {
236 	int ret;
237 
238 	vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
239 	if (!vfio_ccw_work_q)
240 		return -ENOMEM;
241 
242 	isc_register(VFIO_CCW_ISC);
243 	ret = css_driver_register(&vfio_ccw_sch_driver);
244 	if (ret) {
245 		isc_unregister(VFIO_CCW_ISC);
246 		destroy_workqueue(vfio_ccw_work_q);
247 	}
248 
249 	return ret;
250 }
251 
vfio_ccw_sch_exit(void)252 static void __exit vfio_ccw_sch_exit(void)
253 {
254 	css_driver_unregister(&vfio_ccw_sch_driver);
255 	isc_unregister(VFIO_CCW_ISC);
256 	destroy_workqueue(vfio_ccw_work_q);
257 }
258 module_init(vfio_ccw_sch_init);
259 module_exit(vfio_ccw_sch_exit);
260 
261 MODULE_LICENSE("GPL v2");
262