• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 #include <linux/device.h>
5 #include <linux/slab.h>
6 #include <linux/idr.h>
7 #include <linux/pci.h>
8 #include <cxlmem.h>
9 #include "core.h"
10 
11 /*
12  * An entire PCI topology full of devices should be enough for any
13  * config
14  */
15 #define CXL_MEM_MAX_DEVS 65536
16 
17 static int cxl_mem_major;
18 static DEFINE_IDA(cxl_memdev_ida);
19 
cxl_memdev_release(struct device * dev)20 static void cxl_memdev_release(struct device *dev)
21 {
22 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
23 
24 	ida_free(&cxl_memdev_ida, cxlmd->id);
25 	kfree(cxlmd);
26 }
27 
cxl_memdev_devnode(struct device * dev,umode_t * mode,kuid_t * uid,kgid_t * gid)28 static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
29 				kgid_t *gid)
30 {
31 	return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
32 }
33 
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)34 static ssize_t firmware_version_show(struct device *dev,
35 				     struct device_attribute *attr, char *buf)
36 {
37 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
38 	struct cxl_mem *cxlm = cxlmd->cxlm;
39 
40 	return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
41 }
42 static DEVICE_ATTR_RO(firmware_version);
43 
payload_max_show(struct device * dev,struct device_attribute * attr,char * buf)44 static ssize_t payload_max_show(struct device *dev,
45 				struct device_attribute *attr, char *buf)
46 {
47 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
48 	struct cxl_mem *cxlm = cxlmd->cxlm;
49 
50 	return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
51 }
52 static DEVICE_ATTR_RO(payload_max);
53 
label_storage_size_show(struct device * dev,struct device_attribute * attr,char * buf)54 static ssize_t label_storage_size_show(struct device *dev,
55 				       struct device_attribute *attr, char *buf)
56 {
57 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
58 	struct cxl_mem *cxlm = cxlmd->cxlm;
59 
60 	return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
61 }
62 static DEVICE_ATTR_RO(label_storage_size);
63 
ram_size_show(struct device * dev,struct device_attribute * attr,char * buf)64 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
65 			     char *buf)
66 {
67 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
68 	struct cxl_mem *cxlm = cxlmd->cxlm;
69 	unsigned long long len = range_len(&cxlm->ram_range);
70 
71 	return sysfs_emit(buf, "%#llx\n", len);
72 }
73 
74 static struct device_attribute dev_attr_ram_size =
75 	__ATTR(size, 0444, ram_size_show, NULL);
76 
pmem_size_show(struct device * dev,struct device_attribute * attr,char * buf)77 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
78 			      char *buf)
79 {
80 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
81 	struct cxl_mem *cxlm = cxlmd->cxlm;
82 	unsigned long long len = range_len(&cxlm->pmem_range);
83 
84 	return sysfs_emit(buf, "%#llx\n", len);
85 }
86 
87 static struct device_attribute dev_attr_pmem_size =
88 	__ATTR(size, 0444, pmem_size_show, NULL);
89 
90 static struct attribute *cxl_memdev_attributes[] = {
91 	&dev_attr_firmware_version.attr,
92 	&dev_attr_payload_max.attr,
93 	&dev_attr_label_storage_size.attr,
94 	NULL,
95 };
96 
97 static struct attribute *cxl_memdev_pmem_attributes[] = {
98 	&dev_attr_pmem_size.attr,
99 	NULL,
100 };
101 
102 static struct attribute *cxl_memdev_ram_attributes[] = {
103 	&dev_attr_ram_size.attr,
104 	NULL,
105 };
106 
107 static struct attribute_group cxl_memdev_attribute_group = {
108 	.attrs = cxl_memdev_attributes,
109 };
110 
111 static struct attribute_group cxl_memdev_ram_attribute_group = {
112 	.name = "ram",
113 	.attrs = cxl_memdev_ram_attributes,
114 };
115 
116 static struct attribute_group cxl_memdev_pmem_attribute_group = {
117 	.name = "pmem",
118 	.attrs = cxl_memdev_pmem_attributes,
119 };
120 
121 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
122 	&cxl_memdev_attribute_group,
123 	&cxl_memdev_ram_attribute_group,
124 	&cxl_memdev_pmem_attribute_group,
125 	NULL,
126 };
127 
128 static const struct device_type cxl_memdev_type = {
129 	.name = "cxl_memdev",
130 	.release = cxl_memdev_release,
131 	.devnode = cxl_memdev_devnode,
132 	.groups = cxl_memdev_attribute_groups,
133 };
134 
cxl_memdev_unregister(void * _cxlmd)135 static void cxl_memdev_unregister(void *_cxlmd)
136 {
137 	struct cxl_memdev *cxlmd = _cxlmd;
138 	struct device *dev = &cxlmd->dev;
139 	struct cdev *cdev = &cxlmd->cdev;
140 	const struct cdevm_file_operations *cdevm_fops;
141 
142 	cdev_device_del(&cxlmd->cdev, dev);
143 	cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
144 	cdevm_fops->shutdown(dev);
145 	put_device(dev);
146 }
147 
cxl_memdev_alloc(struct cxl_mem * cxlm,const struct file_operations * fops)148 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
149 					   const struct file_operations *fops)
150 {
151 	struct pci_dev *pdev = cxlm->pdev;
152 	struct cxl_memdev *cxlmd;
153 	struct device *dev;
154 	struct cdev *cdev;
155 	int rc;
156 
157 	cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
158 	if (!cxlmd)
159 		return ERR_PTR(-ENOMEM);
160 
161 	rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
162 	if (rc < 0)
163 		goto err;
164 	cxlmd->id = rc;
165 
166 	dev = &cxlmd->dev;
167 	device_initialize(dev);
168 	dev->parent = &pdev->dev;
169 	dev->bus = &cxl_bus_type;
170 	dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
171 	dev->type = &cxl_memdev_type;
172 	device_set_pm_not_required(dev);
173 
174 	cdev = &cxlmd->cdev;
175 	cdev_init(cdev, fops);
176 	return cxlmd;
177 
178 err:
179 	kfree(cxlmd);
180 	return ERR_PTR(rc);
181 }
182 
183 struct cxl_memdev *
devm_cxl_add_memdev(struct device * host,struct cxl_mem * cxlm,const struct cdevm_file_operations * cdevm_fops)184 devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
185 		    const struct cdevm_file_operations *cdevm_fops)
186 {
187 	struct cxl_memdev *cxlmd;
188 	struct device *dev;
189 	struct cdev *cdev;
190 	int rc;
191 
192 	cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
193 	if (IS_ERR(cxlmd))
194 		return cxlmd;
195 
196 	dev = &cxlmd->dev;
197 	rc = dev_set_name(dev, "mem%d", cxlmd->id);
198 	if (rc)
199 		goto err;
200 
201 	/*
202 	 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
203 	 * needed as this is ordered with cdev_add() publishing the device.
204 	 */
205 	cxlmd->cxlm = cxlm;
206 
207 	cdev = &cxlmd->cdev;
208 	rc = cdev_device_add(cdev, dev);
209 	if (rc)
210 		goto err;
211 
212 	rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
213 	if (rc)
214 		return ERR_PTR(rc);
215 	return cxlmd;
216 
217 err:
218 	/*
219 	 * The cdev was briefly live, shutdown any ioctl operations that
220 	 * saw that state.
221 	 */
222 	cdevm_fops->shutdown(dev);
223 	put_device(dev);
224 	return ERR_PTR(rc);
225 }
226 EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
227 
cxl_memdev_init(void)228 __init int cxl_memdev_init(void)
229 {
230 	dev_t devt;
231 	int rc;
232 
233 	rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
234 	if (rc)
235 		return rc;
236 
237 	cxl_mem_major = MAJOR(devt);
238 
239 	return 0;
240 }
241 
cxl_memdev_exit(void)242 void cxl_memdev_exit(void)
243 {
244 	unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
245 }
246