1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* IBM POWER Barrier Synchronization Register Driver
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright IBM Corporation 2008
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Author: Sonny Rao <sonnyrao@us.ibm.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/of.h>
11*4882a593Smuzhiyun #include <linux/of_address.h>
12*4882a593Smuzhiyun #include <linux/of_device.h>
13*4882a593Smuzhiyun #include <linux/of_platform.h>
14*4882a593Smuzhiyun #include <linux/fs.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/cdev.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <asm/io.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun This driver exposes a special register which can be used for fast
24*4882a593Smuzhiyun synchronization across a large SMP machine. The hardware is exposed
25*4882a593Smuzhiyun as an array of bytes where each process will write to one of the bytes to
26*4882a593Smuzhiyun indicate it has finished the current stage and this update is broadcast to
27*4882a593Smuzhiyun all processors without having to bounce a cacheline between them. In
28*4882a593Smuzhiyun POWER5 and POWER6 there is one of these registers per SMP, but it is
29*4882a593Smuzhiyun presented in two forms; first, it is given as a whole and then as a number
30*4882a593Smuzhiyun of smaller registers which alias to parts of the single whole register.
31*4882a593Smuzhiyun This can potentially allow multiple groups of processes to each have their
32*4882a593Smuzhiyun own private synchronization device.
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun Note that this hardware *must* be written to using *only* single byte writes.
35*4882a593Smuzhiyun It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
36*4882a593Smuzhiyun this region is treated as cache-inhibited processes should also use a
37*4882a593Smuzhiyun full sync before and after writing to the BSR to ensure all stores and
38*4882a593Smuzhiyun the BSR update have made it to all chips in the system
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* This is arbitrary number, up to Power6 it's been 17 or fewer */
42*4882a593Smuzhiyun #define BSR_MAX_DEVS (32)
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun struct bsr_dev {
45*4882a593Smuzhiyun u64 bsr_addr; /* Real address */
46*4882a593Smuzhiyun u64 bsr_len; /* length of mem region we can map */
47*4882a593Smuzhiyun unsigned bsr_bytes; /* size of the BSR reg itself */
48*4882a593Smuzhiyun unsigned bsr_stride; /* interval at which BSR repeats in the page */
49*4882a593Smuzhiyun unsigned bsr_type; /* maps to enum below */
50*4882a593Smuzhiyun unsigned bsr_num; /* bsr id number for its type */
51*4882a593Smuzhiyun int bsr_minor;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct list_head bsr_list;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun dev_t bsr_dev;
56*4882a593Smuzhiyun struct cdev bsr_cdev;
57*4882a593Smuzhiyun struct device *bsr_device;
58*4882a593Smuzhiyun char bsr_name[32];
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static unsigned total_bsr_devs;
63*4882a593Smuzhiyun static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
64*4882a593Smuzhiyun static struct class *bsr_class;
65*4882a593Smuzhiyun static int bsr_major;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun enum {
68*4882a593Smuzhiyun BSR_8 = 0,
69*4882a593Smuzhiyun BSR_16 = 1,
70*4882a593Smuzhiyun BSR_64 = 2,
71*4882a593Smuzhiyun BSR_128 = 3,
72*4882a593Smuzhiyun BSR_4096 = 4,
73*4882a593Smuzhiyun BSR_UNKNOWN = 5,
74*4882a593Smuzhiyun BSR_MAX = 6,
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun static unsigned bsr_types[BSR_MAX];
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun static ssize_t
bsr_size_show(struct device * dev,struct device_attribute * attr,char * buf)80*4882a593Smuzhiyun bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
83*4882a593Smuzhiyun return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun static DEVICE_ATTR_RO(bsr_size);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun static ssize_t
bsr_stride_show(struct device * dev,struct device_attribute * attr,char * buf)88*4882a593Smuzhiyun bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
91*4882a593Smuzhiyun return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun static DEVICE_ATTR_RO(bsr_stride);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun static ssize_t
bsr_length_show(struct device * dev,struct device_attribute * attr,char * buf)96*4882a593Smuzhiyun bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
99*4882a593Smuzhiyun return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun static DEVICE_ATTR_RO(bsr_length);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun static struct attribute *bsr_dev_attrs[] = {
104*4882a593Smuzhiyun &dev_attr_bsr_size.attr,
105*4882a593Smuzhiyun &dev_attr_bsr_stride.attr,
106*4882a593Smuzhiyun &dev_attr_bsr_length.attr,
107*4882a593Smuzhiyun NULL,
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun ATTRIBUTE_GROUPS(bsr_dev);
110*4882a593Smuzhiyun
bsr_mmap(struct file * filp,struct vm_area_struct * vma)111*4882a593Smuzhiyun static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun unsigned long size = vma->vm_end - vma->vm_start;
114*4882a593Smuzhiyun struct bsr_dev *dev = filp->private_data;
115*4882a593Smuzhiyun int ret;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* check for the case of a small BSR device and map one 4k page for it*/
120*4882a593Smuzhiyun if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
121*4882a593Smuzhiyun ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
122*4882a593Smuzhiyun vma->vm_page_prot);
123*4882a593Smuzhiyun else if (size <= dev->bsr_len)
124*4882a593Smuzhiyun ret = io_remap_pfn_range(vma, vma->vm_start,
125*4882a593Smuzhiyun dev->bsr_addr >> PAGE_SHIFT,
126*4882a593Smuzhiyun size, vma->vm_page_prot);
127*4882a593Smuzhiyun else
128*4882a593Smuzhiyun return -EINVAL;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (ret)
131*4882a593Smuzhiyun return -EAGAIN;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun return 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
bsr_open(struct inode * inode,struct file * filp)136*4882a593Smuzhiyun static int bsr_open(struct inode *inode, struct file *filp)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct cdev *cdev = inode->i_cdev;
139*4882a593Smuzhiyun struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun filp->private_data = dev;
142*4882a593Smuzhiyun return 0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun static const struct file_operations bsr_fops = {
146*4882a593Smuzhiyun .owner = THIS_MODULE,
147*4882a593Smuzhiyun .mmap = bsr_mmap,
148*4882a593Smuzhiyun .open = bsr_open,
149*4882a593Smuzhiyun .llseek = noop_llseek,
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun
bsr_cleanup_devs(void)152*4882a593Smuzhiyun static void bsr_cleanup_devs(void)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct bsr_dev *cur, *n;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
157*4882a593Smuzhiyun if (cur->bsr_device) {
158*4882a593Smuzhiyun cdev_del(&cur->bsr_cdev);
159*4882a593Smuzhiyun device_del(cur->bsr_device);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun list_del(&cur->bsr_list);
162*4882a593Smuzhiyun kfree(cur);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
bsr_add_node(struct device_node * bn)166*4882a593Smuzhiyun static int bsr_add_node(struct device_node *bn)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
169*4882a593Smuzhiyun const u32 *bsr_stride;
170*4882a593Smuzhiyun const u32 *bsr_bytes;
171*4882a593Smuzhiyun unsigned i;
172*4882a593Smuzhiyun int ret = -ENODEV;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
175*4882a593Smuzhiyun bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (!bsr_stride || !bsr_bytes ||
178*4882a593Smuzhiyun (bsr_stride_len != bsr_bytes_len)) {
179*4882a593Smuzhiyun printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
180*4882a593Smuzhiyun return ret;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun num_bsr_devs = bsr_bytes_len / sizeof(u32);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun for (i = 0 ; i < num_bsr_devs; i++) {
186*4882a593Smuzhiyun struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
187*4882a593Smuzhiyun GFP_KERNEL);
188*4882a593Smuzhiyun struct resource res;
189*4882a593Smuzhiyun int result;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (!cur) {
192*4882a593Smuzhiyun printk(KERN_ERR "Unable to alloc bsr dev\n");
193*4882a593Smuzhiyun ret = -ENOMEM;
194*4882a593Smuzhiyun goto out_err;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun result = of_address_to_resource(bn, i, &res);
198*4882a593Smuzhiyun if (result < 0) {
199*4882a593Smuzhiyun printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
200*4882a593Smuzhiyun kfree(cur);
201*4882a593Smuzhiyun continue;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun cur->bsr_minor = i + total_bsr_devs;
205*4882a593Smuzhiyun cur->bsr_addr = res.start;
206*4882a593Smuzhiyun cur->bsr_len = resource_size(&res);
207*4882a593Smuzhiyun cur->bsr_bytes = bsr_bytes[i];
208*4882a593Smuzhiyun cur->bsr_stride = bsr_stride[i];
209*4882a593Smuzhiyun cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
212*4882a593Smuzhiyun /* we can only map 4k of it, so only advertise the 4k in sysfs */
213*4882a593Smuzhiyun if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
214*4882a593Smuzhiyun cur->bsr_len = 4096;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun switch(cur->bsr_bytes) {
217*4882a593Smuzhiyun case 8:
218*4882a593Smuzhiyun cur->bsr_type = BSR_8;
219*4882a593Smuzhiyun break;
220*4882a593Smuzhiyun case 16:
221*4882a593Smuzhiyun cur->bsr_type = BSR_16;
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun case 64:
224*4882a593Smuzhiyun cur->bsr_type = BSR_64;
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun case 128:
227*4882a593Smuzhiyun cur->bsr_type = BSR_128;
228*4882a593Smuzhiyun break;
229*4882a593Smuzhiyun case 4096:
230*4882a593Smuzhiyun cur->bsr_type = BSR_4096;
231*4882a593Smuzhiyun break;
232*4882a593Smuzhiyun default:
233*4882a593Smuzhiyun cur->bsr_type = BSR_UNKNOWN;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun cur->bsr_num = bsr_types[cur->bsr_type];
237*4882a593Smuzhiyun snprintf(cur->bsr_name, 32, "bsr%d_%d",
238*4882a593Smuzhiyun cur->bsr_bytes, cur->bsr_num);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun cdev_init(&cur->bsr_cdev, &bsr_fops);
241*4882a593Smuzhiyun result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
242*4882a593Smuzhiyun if (result) {
243*4882a593Smuzhiyun kfree(cur);
244*4882a593Smuzhiyun goto out_err;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
248*4882a593Smuzhiyun cur, "%s", cur->bsr_name);
249*4882a593Smuzhiyun if (IS_ERR(cur->bsr_device)) {
250*4882a593Smuzhiyun printk(KERN_ERR "device_create failed for %s\n",
251*4882a593Smuzhiyun cur->bsr_name);
252*4882a593Smuzhiyun cdev_del(&cur->bsr_cdev);
253*4882a593Smuzhiyun kfree(cur);
254*4882a593Smuzhiyun goto out_err;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun bsr_types[cur->bsr_type] = cur->bsr_num + 1;
258*4882a593Smuzhiyun list_add_tail(&cur->bsr_list, &bsr_devs);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun total_bsr_devs += num_bsr_devs;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun return 0;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun out_err:
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun bsr_cleanup_devs();
268*4882a593Smuzhiyun return ret;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
bsr_create_devs(struct device_node * bn)271*4882a593Smuzhiyun static int bsr_create_devs(struct device_node *bn)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun int ret;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun while (bn) {
276*4882a593Smuzhiyun ret = bsr_add_node(bn);
277*4882a593Smuzhiyun if (ret) {
278*4882a593Smuzhiyun of_node_put(bn);
279*4882a593Smuzhiyun return ret;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun return 0;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
bsr_init(void)286*4882a593Smuzhiyun static int __init bsr_init(void)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct device_node *np;
289*4882a593Smuzhiyun dev_t bsr_dev;
290*4882a593Smuzhiyun int ret = -ENODEV;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
293*4882a593Smuzhiyun if (!np)
294*4882a593Smuzhiyun goto out_err;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun bsr_class = class_create(THIS_MODULE, "bsr");
297*4882a593Smuzhiyun if (IS_ERR(bsr_class)) {
298*4882a593Smuzhiyun printk(KERN_ERR "class_create() failed for bsr_class\n");
299*4882a593Smuzhiyun ret = PTR_ERR(bsr_class);
300*4882a593Smuzhiyun goto out_err_1;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun bsr_class->dev_groups = bsr_dev_groups;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
305*4882a593Smuzhiyun bsr_major = MAJOR(bsr_dev);
306*4882a593Smuzhiyun if (ret < 0) {
307*4882a593Smuzhiyun printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
308*4882a593Smuzhiyun goto out_err_2;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun ret = bsr_create_devs(np);
312*4882a593Smuzhiyun if (ret < 0) {
313*4882a593Smuzhiyun np = NULL;
314*4882a593Smuzhiyun goto out_err_3;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return 0;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun out_err_3:
320*4882a593Smuzhiyun unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun out_err_2:
323*4882a593Smuzhiyun class_destroy(bsr_class);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun out_err_1:
326*4882a593Smuzhiyun of_node_put(np);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun out_err:
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun return ret;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
bsr_exit(void)333*4882a593Smuzhiyun static void __exit bsr_exit(void)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun bsr_cleanup_devs();
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun if (bsr_class)
339*4882a593Smuzhiyun class_destroy(bsr_class);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun if (bsr_major)
342*4882a593Smuzhiyun unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun module_init(bsr_init);
346*4882a593Smuzhiyun module_exit(bsr_exit);
347*4882a593Smuzhiyun MODULE_LICENSE("GPL");
348*4882a593Smuzhiyun MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
349