1
2
3#ifndef _IDXD_H_
4#define _IDXD_H_
5
6#include <linux/sbitmap.h>
7#include <linux/dmaengine.h>
8#include <linux/percpu-rwsem.h>
9#include <linux/wait.h>
10#include <linux/cdev.h>
11#include <linux/idr.h>
12#include <linux/pci.h>
13#include <linux/perf_event.h>
14#include <uapi/linux/idxd.h>
15#include "registers.h"
16
17#define IDXD_DRIVER_VERSION "1.00"
18
19extern struct kmem_cache *idxd_desc_pool;
20extern bool tc_override;
21
22struct idxd_wq;
23struct idxd_dev;
24
25enum idxd_dev_type {
26 IDXD_DEV_NONE = -1,
27 IDXD_DEV_DSA = 0,
28 IDXD_DEV_IAX,
29 IDXD_DEV_WQ,
30 IDXD_DEV_GROUP,
31 IDXD_DEV_ENGINE,
32 IDXD_DEV_CDEV,
33 IDXD_DEV_MAX_TYPE,
34};
35
36struct idxd_dev {
37 struct device conf_dev;
38 enum idxd_dev_type type;
39};
40
41#define IDXD_REG_TIMEOUT 50
42#define IDXD_DRAIN_TIMEOUT 5000
43
44enum idxd_type {
45 IDXD_TYPE_UNKNOWN = -1,
46 IDXD_TYPE_DSA = 0,
47 IDXD_TYPE_IAX,
48 IDXD_TYPE_MAX,
49};
50
51#define IDXD_NAME_SIZE 128
52#define IDXD_PMU_EVENT_MAX 64
53
54struct idxd_device_driver {
55 const char *name;
56 enum idxd_dev_type *type;
57 int (*probe)(struct idxd_dev *idxd_dev);
58 void (*remove)(struct idxd_dev *idxd_dev);
59 struct device_driver drv;
60};
61
62extern struct idxd_device_driver dsa_drv;
63extern struct idxd_device_driver idxd_drv;
64extern struct idxd_device_driver idxd_dmaengine_drv;
65extern struct idxd_device_driver idxd_user_drv;
66
67struct idxd_irq_entry {
68 struct idxd_device *idxd;
69 int id;
70 int vector;
71 struct llist_head pending_llist;
72 struct list_head work_list;
73
74
75
76
77 spinlock_t list_lock;
78};
79
80struct idxd_group {
81 struct idxd_dev idxd_dev;
82 struct idxd_device *idxd;
83 struct grpcfg grpcfg;
84 int id;
85 int num_engines;
86 int num_wqs;
87 bool use_token_limit;
88 u8 tokens_allowed;
89 u8 tokens_reserved;
90 int tc_a;
91 int tc_b;
92};
93
94struct idxd_pmu {
95 struct idxd_device *idxd;
96
97 struct perf_event *event_list[IDXD_PMU_EVENT_MAX];
98 int n_events;
99
100 DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX);
101
102 struct pmu pmu;
103 char name[IDXD_NAME_SIZE];
104 int cpu;
105
106 int n_counters;
107 int counter_width;
108 int n_event_categories;
109
110 bool per_counter_caps_supported;
111 unsigned long supported_event_categories;
112
113 unsigned long supported_filters;
114 int n_filters;
115
116 struct hlist_node cpuhp_node;
117};
118
119#define IDXD_MAX_PRIORITY 0xf
120
121enum idxd_wq_state {
122 IDXD_WQ_DISABLED = 0,
123 IDXD_WQ_ENABLED,
124};
125
126enum idxd_wq_flag {
127 WQ_FLAG_DEDICATED = 0,
128 WQ_FLAG_BLOCK_ON_FAULT,
129};
130
131enum idxd_wq_type {
132 IDXD_WQT_NONE = 0,
133 IDXD_WQT_KERNEL,
134 IDXD_WQT_USER,
135};
136
137struct idxd_cdev {
138 struct idxd_wq *wq;
139 struct cdev cdev;
140 struct idxd_dev idxd_dev;
141 int minor;
142};
143
144#define IDXD_ALLOCATED_BATCH_SIZE 128U
145#define WQ_NAME_SIZE 1024
146#define WQ_TYPE_SIZE 10
147
148enum idxd_op_type {
149 IDXD_OP_BLOCK = 0,
150 IDXD_OP_NONBLOCK = 1,
151};
152
153enum idxd_complete_type {
154 IDXD_COMPLETE_NORMAL = 0,
155 IDXD_COMPLETE_ABORT,
156 IDXD_COMPLETE_DEV_FAIL,
157};
158
159struct idxd_dma_chan {
160 struct dma_chan chan;
161 struct idxd_wq *wq;
162};
163
164struct idxd_wq {
165 void __iomem *portal;
166 u32 portal_offset;
167 struct percpu_ref wq_active;
168 struct completion wq_dead;
169 struct idxd_dev idxd_dev;
170 struct idxd_cdev *idxd_cdev;
171 struct wait_queue_head err_queue;
172 struct idxd_device *idxd;
173 int id;
174 enum idxd_wq_type type;
175 struct idxd_group *group;
176 int client_count;
177 struct mutex wq_lock;
178 u32 size;
179 u32 threshold;
180 u32 priority;
181 enum idxd_wq_state state;
182 unsigned long flags;
183 union wqcfg *wqcfg;
184 struct dsa_hw_desc **hw_descs;
185 int num_descs;
186 union {
187 struct dsa_completion_record *compls;
188 struct iax_completion_record *iax_compls;
189 };
190 void *compls_raw;
191 dma_addr_t compls_addr;
192 dma_addr_t compls_addr_raw;
193 int compls_size;
194 struct idxd_desc **descs;
195 struct sbitmap_queue sbq;
196 struct idxd_dma_chan *idxd_chan;
197 char name[WQ_NAME_SIZE + 1];
198 u64 max_xfer_bytes;
199 u32 max_batch_size;
200 bool ats_dis;
201};
202
203struct idxd_engine {
204 struct idxd_dev idxd_dev;
205 int id;
206 struct idxd_group *group;
207 struct idxd_device *idxd;
208};
209
210
211struct idxd_hw {
212 u32 version;
213 union gen_cap_reg gen_cap;
214 union wq_cap_reg wq_cap;
215 union group_cap_reg group_cap;
216 union engine_cap_reg engine_cap;
217 struct opcap opcap;
218 u32 cmd_cap;
219};
220
221enum idxd_device_state {
222 IDXD_DEV_HALTED = -1,
223 IDXD_DEV_DISABLED = 0,
224 IDXD_DEV_ENABLED,
225};
226
227enum idxd_device_flag {
228 IDXD_FLAG_CONFIGURABLE = 0,
229 IDXD_FLAG_CMD_RUNNING,
230 IDXD_FLAG_PASID_ENABLED,
231};
232
233struct idxd_dma_dev {
234 struct idxd_device *idxd;
235 struct dma_device dma;
236};
237
238struct idxd_driver_data {
239 const char *name_prefix;
240 enum idxd_type type;
241 struct device_type *dev_type;
242 int compl_size;
243 int align;
244};
245
246struct idxd_device {
247 struct idxd_dev idxd_dev;
248 struct idxd_driver_data *data;
249 struct list_head list;
250 struct idxd_hw hw;
251 enum idxd_device_state state;
252 unsigned long flags;
253 int id;
254 int major;
255 u32 cmd_status;
256
257 struct pci_dev *pdev;
258 void __iomem *reg_base;
259
260 spinlock_t dev_lock;
261 spinlock_t cmd_lock;
262 struct completion *cmd_done;
263 struct idxd_group **groups;
264 struct idxd_wq **wqs;
265 struct idxd_engine **engines;
266
267 struct iommu_sva *sva;
268 unsigned int pasid;
269
270 int num_groups;
271
272 u32 msix_perm_offset;
273 u32 wqcfg_offset;
274 u32 grpcfg_offset;
275 u32 perfmon_offset;
276
277 u64 max_xfer_bytes;
278 u32 max_batch_size;
279 int max_groups;
280 int max_engines;
281 int max_tokens;
282 int max_wqs;
283 int max_wq_size;
284 int token_limit;
285 int nr_tokens;
286 unsigned int wqcfg_size;
287
288 union sw_err_reg sw_err;
289 wait_queue_head_t cmd_waitq;
290 int num_wq_irqs;
291 struct idxd_irq_entry *irq_entries;
292
293 struct idxd_dma_dev *idxd_dma;
294 struct workqueue_struct *wq;
295 struct work_struct work;
296
297 int *int_handles;
298
299 struct idxd_pmu *idxd_pmu;
300};
301
302
303struct idxd_desc {
304 union {
305 struct dsa_hw_desc *hw;
306 struct iax_hw_desc *iax_hw;
307 };
308 dma_addr_t desc_dma;
309 union {
310 struct dsa_completion_record *completion;
311 struct iax_completion_record *iax_completion;
312 };
313 dma_addr_t compl_dma;
314 struct dma_async_tx_descriptor txd;
315 struct llist_node llnode;
316 struct list_head list;
317 int id;
318 int cpu;
319 struct idxd_wq *wq;
320};
321
322
323
324
325
326enum idxd_completion_status {
327 IDXD_COMP_DESC_ABORT = 0xff,
328};
329
330#define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
331#define wq_confdev(wq) &wq->idxd_dev.conf_dev
332#define engine_confdev(engine) &engine->idxd_dev.conf_dev
333#define group_confdev(group) &group->idxd_dev.conf_dev
334#define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
335
336#define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
337#define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
338#define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
339
340static inline struct idxd_device *confdev_to_idxd(struct device *dev)
341{
342 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
343
344 return idxd_dev_to_idxd(idxd_dev);
345}
346
347static inline struct idxd_wq *confdev_to_wq(struct device *dev)
348{
349 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
350
351 return idxd_dev_to_wq(idxd_dev);
352}
353
354static inline struct idxd_engine *confdev_to_engine(struct device *dev)
355{
356 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
357
358 return container_of(idxd_dev, struct idxd_engine, idxd_dev);
359}
360
361static inline struct idxd_group *confdev_to_group(struct device *dev)
362{
363 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
364
365 return container_of(idxd_dev, struct idxd_group, idxd_dev);
366}
367
368static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
369{
370 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
371
372 return container_of(idxd_dev, struct idxd_cdev, idxd_dev);
373}
374
375static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
376{
377 if (type >= IDXD_DEV_MAX_TYPE) {
378 idev->type = IDXD_DEV_NONE;
379 return;
380 }
381
382 idev->type = type;
383}
384
385extern struct bus_type dsa_bus_type;
386
387extern bool support_enqcmd;
388extern struct ida idxd_ida;
389extern struct device_type dsa_device_type;
390extern struct device_type iax_device_type;
391extern struct device_type idxd_wq_device_type;
392extern struct device_type idxd_engine_device_type;
393extern struct device_type idxd_group_device_type;
394
395static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
396{
397 return idxd_dev->type == IDXD_DEV_DSA;
398}
399
400static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
401{
402 return idxd_dev->type == IDXD_DEV_IAX;
403}
404
405static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
406{
407 return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev);
408}
409
410static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
411{
412 return idxd_dev->type == IDXD_DEV_WQ;
413}
414
415static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
416{
417 if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
418 return true;
419 return false;
420}
421
422static inline bool is_idxd_wq_user(struct idxd_wq *wq)
423{
424 return wq->type == IDXD_WQT_USER;
425}
426
427static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
428{
429 return wq->type == IDXD_WQT_KERNEL;
430}
431
432static inline bool wq_dedicated(struct idxd_wq *wq)
433{
434 return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
435}
436
437static inline bool wq_shared(struct idxd_wq *wq)
438{
439 return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
440}
441
442static inline bool device_pasid_enabled(struct idxd_device *idxd)
443{
444 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
445}
446
447static inline bool device_swq_supported(struct idxd_device *idxd)
448{
449 return (support_enqcmd && device_pasid_enabled(idxd));
450}
451
452enum idxd_portal_prot {
453 IDXD_PORTAL_UNLIMITED = 0,
454 IDXD_PORTAL_LIMITED,
455};
456
457enum idxd_interrupt_type {
458 IDXD_IRQ_MSIX = 0,
459 IDXD_IRQ_IMS,
460};
461
462static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
463{
464 return prot * 0x1000;
465}
466
467static inline int idxd_get_wq_portal_full_offset(int wq_id,
468 enum idxd_portal_prot prot)
469{
470 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
471}
472
473#define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
474
475
476
477
478
479
480
481
482
483static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
484{
485 int ofs = wq->portal_offset;
486
487 wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK;
488 return wq->portal + ofs;
489}
490
491static inline void idxd_wq_get(struct idxd_wq *wq)
492{
493 wq->client_count++;
494}
495
496static inline void idxd_wq_put(struct idxd_wq *wq)
497{
498 wq->client_count--;
499}
500
501static inline int idxd_wq_refcount(struct idxd_wq *wq)
502{
503 return wq->client_count;
504};
505
506int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
507 struct module *module, const char *mod_name);
508#define idxd_driver_register(driver) \
509 __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
510
511void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
512
513#define module_idxd_driver(__idxd_driver) \
514 module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
515
516int idxd_register_bus_type(void);
517void idxd_unregister_bus_type(void);
518int idxd_register_devices(struct idxd_device *idxd);
519void idxd_unregister_devices(struct idxd_device *idxd);
520int idxd_register_driver(void);
521void idxd_unregister_driver(void);
522void idxd_wqs_quiesce(struct idxd_device *idxd);
523
524
525void idxd_msix_perm_setup(struct idxd_device *idxd);
526void idxd_msix_perm_clear(struct idxd_device *idxd);
527irqreturn_t idxd_misc_thread(int vec, void *data);
528irqreturn_t idxd_wq_thread(int irq, void *data);
529void idxd_mask_error_interrupts(struct idxd_device *idxd);
530void idxd_unmask_error_interrupts(struct idxd_device *idxd);
531void idxd_mask_msix_vectors(struct idxd_device *idxd);
532void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
533void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
534
535
536int idxd_register_idxd_drv(void);
537void idxd_unregister_idxd_drv(void);
538int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
539void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
540int drv_enable_wq(struct idxd_wq *wq);
541int __drv_enable_wq(struct idxd_wq *wq);
542void drv_disable_wq(struct idxd_wq *wq);
543void __drv_disable_wq(struct idxd_wq *wq);
544int idxd_device_init_reset(struct idxd_device *idxd);
545int idxd_device_enable(struct idxd_device *idxd);
546int idxd_device_disable(struct idxd_device *idxd);
547void idxd_device_reset(struct idxd_device *idxd);
548void idxd_device_clear_state(struct idxd_device *idxd);
549int idxd_device_config(struct idxd_device *idxd);
550void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
551int idxd_device_load_config(struct idxd_device *idxd);
552int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
553 enum idxd_interrupt_type irq_type);
554int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
555 enum idxd_interrupt_type irq_type);
556
557
558void idxd_wqs_unmap_portal(struct idxd_device *idxd);
559int idxd_wq_alloc_resources(struct idxd_wq *wq);
560void idxd_wq_free_resources(struct idxd_wq *wq);
561int idxd_wq_enable(struct idxd_wq *wq);
562int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
563void idxd_wq_drain(struct idxd_wq *wq);
564void idxd_wq_reset(struct idxd_wq *wq);
565int idxd_wq_map_portal(struct idxd_wq *wq);
566void idxd_wq_unmap_portal(struct idxd_wq *wq);
567int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
568int idxd_wq_disable_pasid(struct idxd_wq *wq);
569void idxd_wq_quiesce(struct idxd_wq *wq);
570int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
571
572
573int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
574struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
575void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
576
577
578int idxd_register_dma_device(struct idxd_device *idxd);
579void idxd_unregister_dma_device(struct idxd_device *idxd);
580int idxd_register_dma_channel(struct idxd_wq *wq);
581void idxd_unregister_dma_channel(struct idxd_wq *wq);
582void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
583void idxd_dma_complete_txd(struct idxd_desc *desc,
584 enum idxd_complete_type comp_type);
585
586
587int idxd_cdev_register(void);
588void idxd_cdev_remove(void);
589int idxd_cdev_get_major(struct idxd_device *idxd);
590int idxd_wq_add_cdev(struct idxd_wq *wq);
591void idxd_wq_del_cdev(struct idxd_wq *wq);
592
593
594#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
595int perfmon_pmu_init(struct idxd_device *idxd);
596void perfmon_pmu_remove(struct idxd_device *idxd);
597void perfmon_counter_overflow(struct idxd_device *idxd);
598void perfmon_init(void);
599void perfmon_exit(void);
600#else
601static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
602static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
603static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
604static inline void perfmon_init(void) {}
605static inline void perfmon_exit(void) {}
606#endif
607
608static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
609{
610 idxd_dma_complete_txd(desc, reason);
611 idxd_free_desc(desc->wq, desc);
612}
613
614#endif
615