1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/notifier.h>
12#include <linux/device.h>
13#include <linux/dca.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16
17#define DCA_VERSION "1.12.1"
18
19MODULE_VERSION(DCA_VERSION);
20MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Intel Corporation");
22
23static DEFINE_RAW_SPINLOCK(dca_lock);
24
25static LIST_HEAD(dca_domains);
26
27static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
28
29static int dca_providers_blocked;
30
31static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
32{
33 struct pci_dev *pdev = to_pci_dev(dev);
34 struct pci_bus *bus = pdev->bus;
35
36 while (bus->parent)
37 bus = bus->parent;
38
39 return bus;
40}
41
42static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
43{
44 struct dca_domain *domain;
45
46 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
47 if (!domain)
48 return NULL;
49
50 INIT_LIST_HEAD(&domain->dca_providers);
51 domain->pci_rc = rc;
52
53 return domain;
54}
55
56static void dca_free_domain(struct dca_domain *domain)
57{
58 list_del(&domain->node);
59 kfree(domain);
60}
61
62static int dca_provider_ioat_ver_3_0(struct device *dev)
63{
64 struct pci_dev *pdev = to_pci_dev(dev);
65
66 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
67 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
68 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
69 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
70 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
71 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
72 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
73 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
74 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
75}
76
77static void unregister_dca_providers(void)
78{
79 struct dca_provider *dca, *_dca;
80 struct list_head unregistered_providers;
81 struct dca_domain *domain;
82 unsigned long flags;
83
84 blocking_notifier_call_chain(&dca_provider_chain,
85 DCA_PROVIDER_REMOVE, NULL);
86
87 INIT_LIST_HEAD(&unregistered_providers);
88
89 raw_spin_lock_irqsave(&dca_lock, flags);
90
91 if (list_empty(&dca_domains)) {
92 raw_spin_unlock_irqrestore(&dca_lock, flags);
93 return;
94 }
95
96
97 domain = list_first_entry(&dca_domains, struct dca_domain, node);
98
99 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
100 list_move(&dca->node, &unregistered_providers);
101
102 dca_free_domain(domain);
103
104 raw_spin_unlock_irqrestore(&dca_lock, flags);
105
106 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
107 dca_sysfs_remove_provider(dca);
108 list_del(&dca->node);
109 }
110}
111
112static struct dca_domain *dca_find_domain(struct pci_bus *rc)
113{
114 struct dca_domain *domain;
115
116 list_for_each_entry(domain, &dca_domains, node)
117 if (domain->pci_rc == rc)
118 return domain;
119
120 return NULL;
121}
122
123static struct dca_domain *dca_get_domain(struct device *dev)
124{
125 struct pci_bus *rc;
126 struct dca_domain *domain;
127
128 rc = dca_pci_rc_from_dev(dev);
129 domain = dca_find_domain(rc);
130
131 if (!domain) {
132 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
133 dca_providers_blocked = 1;
134 }
135
136 return domain;
137}
138
139static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
140{
141 struct dca_provider *dca;
142 struct pci_bus *rc;
143 struct dca_domain *domain;
144
145 if (dev) {
146 rc = dca_pci_rc_from_dev(dev);
147 domain = dca_find_domain(rc);
148 if (!domain)
149 return NULL;
150 } else {
151 if (!list_empty(&dca_domains))
152 domain = list_first_entry(&dca_domains,
153 struct dca_domain,
154 node);
155 else
156 return NULL;
157 }
158
159 list_for_each_entry(dca, &domain->dca_providers, node)
160 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
161 return dca;
162
163 return NULL;
164}
165
166
167
168
169
170int dca_add_requester(struct device *dev)
171{
172 struct dca_provider *dca;
173 int err, slot = -ENODEV;
174 unsigned long flags;
175 struct pci_bus *pci_rc;
176 struct dca_domain *domain;
177
178 if (!dev)
179 return -EFAULT;
180
181 raw_spin_lock_irqsave(&dca_lock, flags);
182
183
184 dca = dca_find_provider_by_dev(dev);
185 if (dca) {
186 raw_spin_unlock_irqrestore(&dca_lock, flags);
187 return -EEXIST;
188 }
189
190 pci_rc = dca_pci_rc_from_dev(dev);
191 domain = dca_find_domain(pci_rc);
192 if (!domain) {
193 raw_spin_unlock_irqrestore(&dca_lock, flags);
194 return -ENODEV;
195 }
196
197 list_for_each_entry(dca, &domain->dca_providers, node) {
198 slot = dca->ops->add_requester(dca, dev);
199 if (slot >= 0)
200 break;
201 }
202
203 raw_spin_unlock_irqrestore(&dca_lock, flags);
204
205 if (slot < 0)
206 return slot;
207
208 err = dca_sysfs_add_req(dca, dev, slot);
209 if (err) {
210 raw_spin_lock_irqsave(&dca_lock, flags);
211 if (dca == dca_find_provider_by_dev(dev))
212 dca->ops->remove_requester(dca, dev);
213 raw_spin_unlock_irqrestore(&dca_lock, flags);
214 return err;
215 }
216
217 return 0;
218}
219EXPORT_SYMBOL_GPL(dca_add_requester);
220
221
222
223
224
225int dca_remove_requester(struct device *dev)
226{
227 struct dca_provider *dca;
228 int slot;
229 unsigned long flags;
230
231 if (!dev)
232 return -EFAULT;
233
234 raw_spin_lock_irqsave(&dca_lock, flags);
235 dca = dca_find_provider_by_dev(dev);
236 if (!dca) {
237 raw_spin_unlock_irqrestore(&dca_lock, flags);
238 return -ENODEV;
239 }
240 slot = dca->ops->remove_requester(dca, dev);
241 raw_spin_unlock_irqrestore(&dca_lock, flags);
242
243 if (slot < 0)
244 return slot;
245
246 dca_sysfs_remove_req(dca, slot);
247
248 return 0;
249}
250EXPORT_SYMBOL_GPL(dca_remove_requester);
251
252
253
254
255
256
257static u8 dca_common_get_tag(struct device *dev, int cpu)
258{
259 struct dca_provider *dca;
260 u8 tag;
261 unsigned long flags;
262
263 raw_spin_lock_irqsave(&dca_lock, flags);
264
265 dca = dca_find_provider_by_dev(dev);
266 if (!dca) {
267 raw_spin_unlock_irqrestore(&dca_lock, flags);
268 return -ENODEV;
269 }
270 tag = dca->ops->get_tag(dca, dev, cpu);
271
272 raw_spin_unlock_irqrestore(&dca_lock, flags);
273 return tag;
274}
275
276
277
278
279
280
281
282u8 dca3_get_tag(struct device *dev, int cpu)
283{
284 if (!dev)
285 return -EFAULT;
286
287 return dca_common_get_tag(dev, cpu);
288}
289EXPORT_SYMBOL_GPL(dca3_get_tag);
290
291
292
293
294
295u8 dca_get_tag(int cpu)
296{
297 struct device *dev = NULL;
298
299 return dca_common_get_tag(dev, cpu);
300}
301EXPORT_SYMBOL_GPL(dca_get_tag);
302
303
304
305
306
307
308struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
309 int priv_size)
310{
311 struct dca_provider *dca;
312 int alloc_size;
313
314 alloc_size = (sizeof(*dca) + priv_size);
315 dca = kzalloc(alloc_size, GFP_KERNEL);
316 if (!dca)
317 return NULL;
318 dca->ops = ops;
319
320 return dca;
321}
322EXPORT_SYMBOL_GPL(alloc_dca_provider);
323
324
325
326
327
328
329void free_dca_provider(struct dca_provider *dca)
330{
331 kfree(dca);
332}
333EXPORT_SYMBOL_GPL(free_dca_provider);
334
335
336
337
338
339
340int register_dca_provider(struct dca_provider *dca, struct device *dev)
341{
342 int err;
343 unsigned long flags;
344 struct dca_domain *domain, *newdomain = NULL;
345
346 raw_spin_lock_irqsave(&dca_lock, flags);
347 if (dca_providers_blocked) {
348 raw_spin_unlock_irqrestore(&dca_lock, flags);
349 return -ENODEV;
350 }
351 raw_spin_unlock_irqrestore(&dca_lock, flags);
352
353 err = dca_sysfs_add_provider(dca, dev);
354 if (err)
355 return err;
356
357 raw_spin_lock_irqsave(&dca_lock, flags);
358 domain = dca_get_domain(dev);
359 if (!domain) {
360 struct pci_bus *rc;
361
362 if (dca_providers_blocked) {
363 raw_spin_unlock_irqrestore(&dca_lock, flags);
364 dca_sysfs_remove_provider(dca);
365 unregister_dca_providers();
366 return -ENODEV;
367 }
368
369 raw_spin_unlock_irqrestore(&dca_lock, flags);
370 rc = dca_pci_rc_from_dev(dev);
371 newdomain = dca_allocate_domain(rc);
372 if (!newdomain)
373 return -ENODEV;
374 raw_spin_lock_irqsave(&dca_lock, flags);
375
376 domain = dca_get_domain(dev);
377 if (!domain) {
378 domain = newdomain;
379 newdomain = NULL;
380 list_add(&domain->node, &dca_domains);
381 }
382 }
383 list_add(&dca->node, &domain->dca_providers);
384 raw_spin_unlock_irqrestore(&dca_lock, flags);
385
386 blocking_notifier_call_chain(&dca_provider_chain,
387 DCA_PROVIDER_ADD, NULL);
388 kfree(newdomain);
389 return 0;
390}
391EXPORT_SYMBOL_GPL(register_dca_provider);
392
393
394
395
396
397void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
398{
399 unsigned long flags;
400 struct pci_bus *pci_rc;
401 struct dca_domain *domain;
402
403 blocking_notifier_call_chain(&dca_provider_chain,
404 DCA_PROVIDER_REMOVE, NULL);
405
406 raw_spin_lock_irqsave(&dca_lock, flags);
407
408 if (list_empty(&dca_domains)) {
409 raw_spin_unlock_irqrestore(&dca_lock, flags);
410 return;
411 }
412
413 list_del(&dca->node);
414
415 pci_rc = dca_pci_rc_from_dev(dev);
416 domain = dca_find_domain(pci_rc);
417 if (list_empty(&domain->dca_providers))
418 dca_free_domain(domain);
419
420 raw_spin_unlock_irqrestore(&dca_lock, flags);
421
422 dca_sysfs_remove_provider(dca);
423}
424EXPORT_SYMBOL_GPL(unregister_dca_provider);
425
426
427
428
429void dca_register_notify(struct notifier_block *nb)
430{
431 blocking_notifier_chain_register(&dca_provider_chain, nb);
432}
433EXPORT_SYMBOL_GPL(dca_register_notify);
434
435
436
437
438void dca_unregister_notify(struct notifier_block *nb)
439{
440 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
441}
442EXPORT_SYMBOL_GPL(dca_unregister_notify);
443
444static int __init dca_init(void)
445{
446 pr_info("dca service started, version %s\n", DCA_VERSION);
447 return dca_sysfs_init();
448}
449
450static void __exit dca_exit(void)
451{
452 dca_sysfs_exit();
453}
454
455arch_initcall(dca_init);
456module_exit(dca_exit);
457
458