1
2
3#include <linux/irq.h>
4#include <linux/module.h>
5#include <linux/ntb.h>
6#include <linux/msi.h>
7#include <linux/pci.h>
8
9struct ntb_msi {
10 u64 base_addr;
11 u64 end_addr;
12
13 void (*desc_changed)(void *ctx);
14
15 u32 __iomem *peer_mws[];
16};
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31int ntb_msi_init(struct ntb_dev *ntb,
32 void (*desc_changed)(void *ctx))
33{
34 phys_addr_t mw_phys_addr;
35 resource_size_t mw_size;
36 size_t struct_size;
37 int peer_widx;
38 int peers;
39 int ret;
40 int i;
41
42 peers = ntb_peer_port_count(ntb);
43 if (peers <= 0)
44 return -EINVAL;
45
46 struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers;
47
48 ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL);
49 if (!ntb->msi)
50 return -ENOMEM;
51
52 ntb->msi->desc_changed = desc_changed;
53
54 for (i = 0; i < peers; i++) {
55 peer_widx = ntb_peer_mw_count(ntb) - 1 - i;
56
57 ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr,
58 &mw_size);
59 if (ret)
60 goto unroll;
61
62 ntb->msi->peer_mws[i] = devm_ioremap(&ntb->dev, mw_phys_addr,
63 mw_size);
64 if (!ntb->msi->peer_mws[i]) {
65 ret = -EFAULT;
66 goto unroll;
67 }
68 }
69
70 return 0;
71
72unroll:
73 for (i = 0; i < peers; i++)
74 if (ntb->msi->peer_mws[i])
75 devm_iounmap(&ntb->dev, ntb->msi->peer_mws[i]);
76
77 devm_kfree(&ntb->dev, ntb->msi);
78 ntb->msi = NULL;
79 return ret;
80}
81EXPORT_SYMBOL(ntb_msi_init);
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97int ntb_msi_setup_mws(struct ntb_dev *ntb)
98{
99 struct msi_desc *desc;
100 u64 addr;
101 int peer, peer_widx;
102 resource_size_t addr_align, size_align, size_max;
103 resource_size_t mw_size = SZ_32K;
104 resource_size_t mw_min_size = mw_size;
105 int i;
106 int ret;
107
108 if (!ntb->msi)
109 return -EINVAL;
110
111 desc = first_msi_entry(&ntb->pdev->dev);
112 addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
113
114 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
115 peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
116 if (peer_widx < 0)
117 return peer_widx;
118
119 ret = ntb_mw_get_align(ntb, peer, peer_widx, &addr_align,
120 NULL, NULL);
121 if (ret)
122 return ret;
123
124 addr &= ~(addr_align - 1);
125 }
126
127 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
128 peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
129 if (peer_widx < 0) {
130 ret = peer_widx;
131 goto error_out;
132 }
133
134 ret = ntb_mw_get_align(ntb, peer, peer_widx, NULL,
135 &size_align, &size_max);
136 if (ret)
137 goto error_out;
138
139 mw_size = round_up(mw_size, size_align);
140 mw_size = max(mw_size, size_max);
141 if (mw_size < mw_min_size)
142 mw_min_size = mw_size;
143
144 ret = ntb_mw_set_trans(ntb, peer, peer_widx,
145 addr, mw_size);
146 if (ret)
147 goto error_out;
148 }
149
150 ntb->msi->base_addr = addr;
151 ntb->msi->end_addr = addr + mw_min_size;
152
153 return 0;
154
155error_out:
156 for (i = 0; i < peer; i++) {
157 peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
158 if (peer_widx < 0)
159 continue;
160
161 ntb_mw_clear_trans(ntb, i, peer_widx);
162 }
163
164 return ret;
165}
166EXPORT_SYMBOL(ntb_msi_setup_mws);
167
168
169
170
171
172
173
174void ntb_msi_clear_mws(struct ntb_dev *ntb)
175{
176 int peer;
177 int peer_widx;
178
179 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
180 peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
181 if (peer_widx < 0)
182 continue;
183
184 ntb_mw_clear_trans(ntb, peer, peer_widx);
185 }
186}
187EXPORT_SYMBOL(ntb_msi_clear_mws);
188
189struct ntb_msi_devres {
190 struct ntb_dev *ntb;
191 struct msi_desc *entry;
192 struct ntb_msi_desc *msi_desc;
193};
194
195static int ntb_msi_set_desc(struct ntb_dev *ntb, struct msi_desc *entry,
196 struct ntb_msi_desc *msi_desc)
197{
198 u64 addr;
199
200 addr = entry->msg.address_lo +
201 ((uint64_t)entry->msg.address_hi << 32);
202
203 if (addr < ntb->msi->base_addr || addr >= ntb->msi->end_addr) {
204 dev_warn_once(&ntb->dev,
205 "IRQ %d: MSI Address not within the memory window (%llx, [%llx %llx])\n",
206 entry->irq, addr, ntb->msi->base_addr,
207 ntb->msi->end_addr);
208 return -EFAULT;
209 }
210
211 msi_desc->addr_offset = addr - ntb->msi->base_addr;
212 msi_desc->data = entry->msg.data;
213
214 return 0;
215}
216
217static void ntb_msi_write_msg(struct msi_desc *entry, void *data)
218{
219 struct ntb_msi_devres *dr = data;
220
221 WARN_ON(ntb_msi_set_desc(dr->ntb, entry, dr->msi_desc));
222
223 if (dr->ntb->msi->desc_changed)
224 dr->ntb->msi->desc_changed(dr->ntb->ctx);
225}
226
227static void ntbm_msi_callback_release(struct device *dev, void *res)
228{
229 struct ntb_msi_devres *dr = res;
230
231 dr->entry->write_msi_msg = NULL;
232 dr->entry->write_msi_msg_data = NULL;
233}
234
235static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry,
236 struct ntb_msi_desc *msi_desc)
237{
238 struct ntb_msi_devres *dr;
239
240 dr = devres_alloc(ntbm_msi_callback_release,
241 sizeof(struct ntb_msi_devres), GFP_KERNEL);
242 if (!dr)
243 return -ENOMEM;
244
245 dr->ntb = ntb;
246 dr->entry = entry;
247 dr->msi_desc = msi_desc;
248
249 devres_add(&ntb->dev, dr);
250
251 dr->entry->write_msi_msg = ntb_msi_write_msg;
252 dr->entry->write_msi_msg_data = dr;
253
254 return 0;
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
280 irq_handler_t thread_fn,
281 const char *name, void *dev_id,
282 struct ntb_msi_desc *msi_desc)
283{
284 struct msi_desc *entry;
285 struct irq_desc *desc;
286 int ret;
287
288 if (!ntb->msi)
289 return -EINVAL;
290
291 for_each_pci_msi_entry(entry, ntb->pdev) {
292 desc = irq_to_desc(entry->irq);
293 if (desc->action)
294 continue;
295
296 ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler,
297 thread_fn, 0, name, dev_id);
298 if (ret)
299 continue;
300
301 if (ntb_msi_set_desc(ntb, entry, msi_desc)) {
302 devm_free_irq(&ntb->dev, entry->irq, dev_id);
303 continue;
304 }
305
306 ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
307 if (ret) {
308 devm_free_irq(&ntb->dev, entry->irq, dev_id);
309 return ret;
310 }
311
312
313 return entry->irq;
314 }
315
316 return -ENODEV;
317}
318EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
319
320static int ntbm_msi_callback_match(struct device *dev, void *res, void *data)
321{
322 struct ntb_dev *ntb = dev_ntb(dev);
323 struct ntb_msi_devres *dr = res;
324
325 return dr->ntb == ntb && dr->entry == data;
326}
327
328
329
330
331
332
333
334
335
336
337void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id)
338{
339 struct msi_desc *entry = irq_get_msi_desc(irq);
340
341 entry->write_msi_msg = NULL;
342 entry->write_msi_msg_data = NULL;
343
344 WARN_ON(devres_destroy(&ntb->dev, ntbm_msi_callback_release,
345 ntbm_msi_callback_match, entry));
346
347 devm_free_irq(&ntb->dev, irq, dev_id);
348}
349EXPORT_SYMBOL(ntbm_msi_free_irq);
350
351
352
353
354
355
356
357
358
359
360
361
362
363int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
364 struct ntb_msi_desc *desc)
365{
366 int idx;
367
368 if (!ntb->msi)
369 return -EINVAL;
370
371 idx = desc->addr_offset / sizeof(*ntb->msi->peer_mws[peer]);
372
373 iowrite32(desc->data, &ntb->msi->peer_mws[peer][idx]);
374
375 return 0;
376}
377EXPORT_SYMBOL(ntb_msi_peer_trigger);
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
394 struct ntb_msi_desc *desc,
395 phys_addr_t *msi_addr)
396{
397 int peer_widx = ntb_peer_mw_count(ntb) - 1 - peer;
398 phys_addr_t mw_phys_addr;
399 int ret;
400
401 ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, NULL);
402 if (ret)
403 return ret;
404
405 if (msi_addr)
406 *msi_addr = mw_phys_addr + desc->addr_offset;
407
408 return 0;
409}
410EXPORT_SYMBOL(ntb_msi_peer_addr);
411