• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* IBM POWER Barrier Synchronization Register Driver
2  *
3  * Copyright IBM Corporation 2008
4  *
5  * Author: Sonny Rao <sonnyrao@us.ibm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_device.h>
26 #include <linux/of_platform.h>
27 #include <linux/fs.h>
28 #include <linux/module.h>
29 #include <linux/cdev.h>
30 #include <linux/list.h>
31 #include <linux/mm.h>
32 #include <linux/slab.h>
33 #include <asm/pgtable.h>
34 #include <asm/io.h>
35 
36 /*
37  This driver exposes a special register which can be used for fast
38  synchronization across a large SMP machine.  The hardware is exposed
39  as an array of bytes where each process will write to one of the bytes to
40  indicate it has finished the current stage and this update is broadcast to
41  all processors without having to bounce a cacheline between them. In
42  POWER5 and POWER6 there is one of these registers per SMP,  but it is
43  presented in two forms; first, it is given as a whole and then as a number
44  of smaller registers which alias to parts of the single whole register.
45  This can potentially allow multiple groups of processes to each have their
46  own private synchronization device.
47 
48  Note that this hardware *must* be written to using *only* single byte writes.
49  It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
50  this region is treated as cache-inhibited  processes should also use a
51  full sync before and after writing to the BSR to ensure all stores and
52  the BSR update have made it to all chips in the system
53 */
54 
55 /* This is arbitrary number, up to Power6 it's been 17 or fewer  */
56 #define BSR_MAX_DEVS (32)
57 
58 struct bsr_dev {
59 	u64      bsr_addr;     /* Real address */
60 	u64      bsr_len;      /* length of mem region we can map */
61 	unsigned bsr_bytes;    /* size of the BSR reg itself */
62 	unsigned bsr_stride;   /* interval at which BSR repeats in the page */
63 	unsigned bsr_type;     /* maps to enum below */
64 	unsigned bsr_num;      /* bsr id number for its type */
65 	int      bsr_minor;
66 
67 	struct list_head bsr_list;
68 
69 	dev_t    bsr_dev;
70 	struct cdev bsr_cdev;
71 	struct device *bsr_device;
72 	char     bsr_name[32];
73 
74 };
75 
76 static unsigned total_bsr_devs;
77 static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
78 static struct class *bsr_class;
79 static int bsr_major;
80 
81 enum {
82 	BSR_8    = 0,
83 	BSR_16   = 1,
84 	BSR_64   = 2,
85 	BSR_128  = 3,
86 	BSR_4096 = 4,
87 	BSR_UNKNOWN = 5,
88 	BSR_MAX  = 6,
89 };
90 
91 static unsigned bsr_types[BSR_MAX];
92 
93 static ssize_t
bsr_size_show(struct device * dev,struct device_attribute * attr,char * buf)94 bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
95 {
96 	struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
97 	return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
98 }
99 static DEVICE_ATTR_RO(bsr_size);
100 
101 static ssize_t
bsr_stride_show(struct device * dev,struct device_attribute * attr,char * buf)102 bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
103 {
104 	struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
105 	return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
106 }
107 static DEVICE_ATTR_RO(bsr_stride);
108 
109 static ssize_t
bsr_length_show(struct device * dev,struct device_attribute * attr,char * buf)110 bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
111 {
112 	struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
113 	return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
114 }
115 static DEVICE_ATTR_RO(bsr_length);
116 
117 static struct attribute *bsr_dev_attrs[] = {
118 	&dev_attr_bsr_size.attr,
119 	&dev_attr_bsr_stride.attr,
120 	&dev_attr_bsr_length.attr,
121 	NULL,
122 };
123 ATTRIBUTE_GROUPS(bsr_dev);
124 
bsr_mmap(struct file * filp,struct vm_area_struct * vma)125 static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
126 {
127 	unsigned long size   = vma->vm_end - vma->vm_start;
128 	struct bsr_dev *dev = filp->private_data;
129 	int ret;
130 
131 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
132 
133 	/* check for the case of a small BSR device and map one 4k page for it*/
134 	if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
135 		ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
136 				   vma->vm_page_prot);
137 	else if (size <= dev->bsr_len)
138 		ret = io_remap_pfn_range(vma, vma->vm_start,
139 					 dev->bsr_addr >> PAGE_SHIFT,
140 					 size, vma->vm_page_prot);
141 	else
142 		return -EINVAL;
143 
144 	if (ret)
145 		return -EAGAIN;
146 
147 	return 0;
148 }
149 
bsr_open(struct inode * inode,struct file * filp)150 static int bsr_open(struct inode * inode, struct file * filp)
151 {
152 	struct cdev *cdev = inode->i_cdev;
153 	struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
154 
155 	filp->private_data = dev;
156 	return 0;
157 }
158 
159 static const struct file_operations bsr_fops = {
160 	.owner = THIS_MODULE,
161 	.mmap  = bsr_mmap,
162 	.open  = bsr_open,
163 	.llseek = noop_llseek,
164 };
165 
bsr_cleanup_devs(void)166 static void bsr_cleanup_devs(void)
167 {
168 	struct bsr_dev *cur, *n;
169 
170 	list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
171 		if (cur->bsr_device) {
172 			cdev_del(&cur->bsr_cdev);
173 			device_del(cur->bsr_device);
174 		}
175 		list_del(&cur->bsr_list);
176 		kfree(cur);
177 	}
178 }
179 
bsr_add_node(struct device_node * bn)180 static int bsr_add_node(struct device_node *bn)
181 {
182 	int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
183 	const u32 *bsr_stride;
184 	const u32 *bsr_bytes;
185 	unsigned i;
186 	int ret = -ENODEV;
187 
188 	bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
189 	bsr_bytes  = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
190 
191 	if (!bsr_stride || !bsr_bytes ||
192 	    (bsr_stride_len != bsr_bytes_len)) {
193 		printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
194 		return ret;
195 	}
196 
197 	num_bsr_devs = bsr_bytes_len / sizeof(u32);
198 
199 	for (i = 0 ; i < num_bsr_devs; i++) {
200 		struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
201 					      GFP_KERNEL);
202 		struct resource res;
203 		int result;
204 
205 		if (!cur) {
206 			printk(KERN_ERR "Unable to alloc bsr dev\n");
207 			ret = -ENOMEM;
208 			goto out_err;
209 		}
210 
211 		result = of_address_to_resource(bn, i, &res);
212 		if (result < 0) {
213 			printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
214 			kfree(cur);
215 			continue;
216 		}
217 
218 		cur->bsr_minor  = i + total_bsr_devs;
219 		cur->bsr_addr   = res.start;
220 		cur->bsr_len    = resource_size(&res);
221 		cur->bsr_bytes  = bsr_bytes[i];
222 		cur->bsr_stride = bsr_stride[i];
223 		cur->bsr_dev    = MKDEV(bsr_major, i + total_bsr_devs);
224 
225 		/* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
226 		/* we can only map 4k of it, so only advertise the 4k in sysfs */
227 		if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
228 			cur->bsr_len = 4096;
229 
230 		switch(cur->bsr_bytes) {
231 		case 8:
232 			cur->bsr_type = BSR_8;
233 			break;
234 		case 16:
235 			cur->bsr_type = BSR_16;
236 			break;
237 		case 64:
238 			cur->bsr_type = BSR_64;
239 			break;
240 		case 128:
241 			cur->bsr_type = BSR_128;
242 			break;
243 		case 4096:
244 			cur->bsr_type = BSR_4096;
245 			break;
246 		default:
247 			cur->bsr_type = BSR_UNKNOWN;
248 		}
249 
250 		cur->bsr_num = bsr_types[cur->bsr_type];
251 		snprintf(cur->bsr_name, 32, "bsr%d_%d",
252 			 cur->bsr_bytes, cur->bsr_num);
253 
254 		cdev_init(&cur->bsr_cdev, &bsr_fops);
255 		result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
256 		if (result) {
257 			kfree(cur);
258 			goto out_err;
259 		}
260 
261 		cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
262 						cur, "%s", cur->bsr_name);
263 		if (IS_ERR(cur->bsr_device)) {
264 			printk(KERN_ERR "device_create failed for %s\n",
265 			       cur->bsr_name);
266 			cdev_del(&cur->bsr_cdev);
267 			kfree(cur);
268 			goto out_err;
269 		}
270 
271 		bsr_types[cur->bsr_type] = cur->bsr_num + 1;
272 		list_add_tail(&cur->bsr_list, &bsr_devs);
273 	}
274 
275 	total_bsr_devs += num_bsr_devs;
276 
277 	return 0;
278 
279  out_err:
280 
281 	bsr_cleanup_devs();
282 	return ret;
283 }
284 
bsr_create_devs(struct device_node * bn)285 static int bsr_create_devs(struct device_node *bn)
286 {
287 	int ret;
288 
289 	while (bn) {
290 		ret = bsr_add_node(bn);
291 		if (ret) {
292 			of_node_put(bn);
293 			return ret;
294 		}
295 		bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
296 	}
297 	return 0;
298 }
299 
bsr_init(void)300 static int __init bsr_init(void)
301 {
302 	struct device_node *np;
303 	dev_t bsr_dev;
304 	int ret = -ENODEV;
305 
306 	np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
307 	if (!np)
308 		goto out_err;
309 
310 	bsr_class = class_create(THIS_MODULE, "bsr");
311 	if (IS_ERR(bsr_class)) {
312 		printk(KERN_ERR "class_create() failed for bsr_class\n");
313 		ret = PTR_ERR(bsr_class);
314 		goto out_err_1;
315 	}
316 	bsr_class->dev_groups = bsr_dev_groups;
317 
318 	ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
319 	bsr_major = MAJOR(bsr_dev);
320 	if (ret < 0) {
321 		printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
322 		goto out_err_2;
323 	}
324 
325 	if ((ret = bsr_create_devs(np)) < 0) {
326 		np = NULL;
327 		goto out_err_3;
328 	}
329 
330 	return 0;
331 
332  out_err_3:
333 	unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
334 
335  out_err_2:
336 	class_destroy(bsr_class);
337 
338  out_err_1:
339 	of_node_put(np);
340 
341  out_err:
342 
343 	return ret;
344 }
345 
bsr_exit(void)346 static void __exit  bsr_exit(void)
347 {
348 
349 	bsr_cleanup_devs();
350 
351 	if (bsr_class)
352 		class_destroy(bsr_class);
353 
354 	if (bsr_major)
355 		unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
356 }
357 
358 module_init(bsr_init);
359 module_exit(bsr_exit);
360 MODULE_LICENSE("GPL");
361 MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
362