1
2
3#include <linux/device.h>
4#include <linux/slab.h>
5#include <linux/idr.h>
6#include <cxlmem.h>
7#include <cxl.h>
8#include "core.h"
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24static DEFINE_IDA(cxl_nvdimm_bridge_ida);
25
26static void cxl_nvdimm_bridge_release(struct device *dev)
27{
28 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
29
30 ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
31 kfree(cxl_nvb);
32}
33
34static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
35 &cxl_base_attribute_group,
36 NULL,
37};
38
39const struct device_type cxl_nvdimm_bridge_type = {
40 .name = "cxl_nvdimm_bridge",
41 .release = cxl_nvdimm_bridge_release,
42 .groups = cxl_nvdimm_bridge_attribute_groups,
43};
44
45struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
46{
47 if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
48 "not a cxl_nvdimm_bridge device\n"))
49 return NULL;
50 return container_of(dev, struct cxl_nvdimm_bridge, dev);
51}
52EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
53
54bool is_cxl_nvdimm_bridge(struct device *dev)
55{
56 return dev->type == &cxl_nvdimm_bridge_type;
57}
58EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
59
60__mock int match_nvdimm_bridge(struct device *dev, const void *data)
61{
62 return is_cxl_nvdimm_bridge(dev);
63}
64
65struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
66{
67 struct device *dev;
68
69 dev = bus_find_device(&cxl_bus_type, NULL, cxl_nvd, match_nvdimm_bridge);
70 if (!dev)
71 return NULL;
72 return to_cxl_nvdimm_bridge(dev);
73}
74EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
75
76static struct cxl_nvdimm_bridge *
77cxl_nvdimm_bridge_alloc(struct cxl_port *port)
78{
79 struct cxl_nvdimm_bridge *cxl_nvb;
80 struct device *dev;
81 int rc;
82
83 cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
84 if (!cxl_nvb)
85 return ERR_PTR(-ENOMEM);
86
87 rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
88 if (rc < 0)
89 goto err;
90 cxl_nvb->id = rc;
91
92 dev = &cxl_nvb->dev;
93 cxl_nvb->port = port;
94 cxl_nvb->state = CXL_NVB_NEW;
95 device_initialize(dev);
96 device_set_pm_not_required(dev);
97 dev->parent = &port->dev;
98 dev->bus = &cxl_bus_type;
99 dev->type = &cxl_nvdimm_bridge_type;
100
101 return cxl_nvb;
102
103err:
104 kfree(cxl_nvb);
105 return ERR_PTR(rc);
106}
107
108static void unregister_nvb(void *_cxl_nvb)
109{
110 struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
111 bool flush;
112
113
114
115
116
117
118 device_lock(&cxl_nvb->dev);
119 flush = cxl_nvb->state != CXL_NVB_NEW;
120 cxl_nvb->state = CXL_NVB_DEAD;
121 device_unlock(&cxl_nvb->dev);
122
123
124
125
126
127
128
129
130 device_release_driver(&cxl_nvb->dev);
131 if (flush)
132 flush_work(&cxl_nvb->state_work);
133 device_unregister(&cxl_nvb->dev);
134}
135
136
137
138
139
140
141
142
143struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
144 struct cxl_port *port)
145{
146 struct cxl_nvdimm_bridge *cxl_nvb;
147 struct device *dev;
148 int rc;
149
150 if (!IS_ENABLED(CONFIG_CXL_PMEM))
151 return ERR_PTR(-ENXIO);
152
153 cxl_nvb = cxl_nvdimm_bridge_alloc(port);
154 if (IS_ERR(cxl_nvb))
155 return cxl_nvb;
156
157 dev = &cxl_nvb->dev;
158 rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
159 if (rc)
160 goto err;
161
162 rc = device_add(dev);
163 if (rc)
164 goto err;
165
166 rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
167 if (rc)
168 return ERR_PTR(rc);
169
170 return cxl_nvb;
171
172err:
173 put_device(dev);
174 return ERR_PTR(rc);
175}
176EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
177
178static void cxl_nvdimm_release(struct device *dev)
179{
180 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
181
182 kfree(cxl_nvd);
183}
184
185static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
186 &cxl_base_attribute_group,
187 NULL,
188};
189
190const struct device_type cxl_nvdimm_type = {
191 .name = "cxl_nvdimm",
192 .release = cxl_nvdimm_release,
193 .groups = cxl_nvdimm_attribute_groups,
194};
195
196bool is_cxl_nvdimm(struct device *dev)
197{
198 return dev->type == &cxl_nvdimm_type;
199}
200EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
201
202struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
203{
204 if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
205 "not a cxl_nvdimm device\n"))
206 return NULL;
207 return container_of(dev, struct cxl_nvdimm, dev);
208}
209EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
210
211static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
212{
213 struct cxl_nvdimm *cxl_nvd;
214 struct device *dev;
215
216 cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
217 if (!cxl_nvd)
218 return ERR_PTR(-ENOMEM);
219
220 dev = &cxl_nvd->dev;
221 cxl_nvd->cxlmd = cxlmd;
222 device_initialize(dev);
223 device_set_pm_not_required(dev);
224 dev->parent = &cxlmd->dev;
225 dev->bus = &cxl_bus_type;
226 dev->type = &cxl_nvdimm_type;
227
228 return cxl_nvd;
229}
230
231static void cxl_nvd_unregister(void *dev)
232{
233 device_unregister(dev);
234}
235
236
237
238
239
240
241
242
243int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
244{
245 struct cxl_nvdimm *cxl_nvd;
246 struct device *dev;
247 int rc;
248
249 cxl_nvd = cxl_nvdimm_alloc(cxlmd);
250 if (IS_ERR(cxl_nvd))
251 return PTR_ERR(cxl_nvd);
252
253 dev = &cxl_nvd->dev;
254 rc = dev_set_name(dev, "pmem%d", cxlmd->id);
255 if (rc)
256 goto err;
257
258 rc = device_add(dev);
259 if (rc)
260 goto err;
261
262 dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
263 dev_name(dev));
264
265 return devm_add_action_or_reset(host, cxl_nvd_unregister, dev);
266
267err:
268 put_device(dev);
269 return rc;
270}
271EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);
272