1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef IOATDMA_H
22#define IOATDMA_H
23
24#include <linux/dmaengine.h>
25#include "hw.h"
26#include "registers.h"
27#include <linux/init.h>
28#include <linux/dmapool.h>
29#include <linux/cache.h>
30#include <linux/pci_ids.h>
31#include <net/tcp.h>
32
33#define IOAT_DMA_VERSION "4.00"
34
35#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
36#define IOAT_DMA_DCA_ANY_CPU ~0
37
38#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
39#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
40#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
41#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
42#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
43
44#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
45
46
47
48
49
50#define NULL_DESC_BUFFER_SIZE 1
51
52enum ioat_irq_mode {
53 IOAT_NOIRQ = 0,
54 IOAT_MSIX,
55 IOAT_MSIX_SINGLE,
56 IOAT_MSI,
57 IOAT_INTX
58};
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79struct ioatdma_device {
80 struct pci_dev *pdev;
81 void __iomem *reg_base;
82 struct pci_pool *dma_pool;
83 struct pci_pool *completion_pool;
84#define MAX_SED_POOLS 5
85 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
86 struct kmem_cache *sed_pool;
87 struct dma_device common;
88 u8 version;
89 struct msix_entry msix_entries[4];
90 struct ioat_chan_common *idx[4];
91 struct dca_provider *dca;
92 enum ioat_irq_mode irq_mode;
93 u32 cap;
94 void (*intr_quirk)(struct ioatdma_device *device);
95 int (*enumerate_channels)(struct ioatdma_device *device);
96 int (*reset_hw)(struct ioat_chan_common *chan);
97 void (*cleanup_fn)(unsigned long data);
98 void (*timer_fn)(unsigned long data);
99 int (*self_test)(struct ioatdma_device *device);
100};
101
102struct ioat_chan_common {
103 struct dma_chan common;
104 void __iomem *reg_base;
105 dma_addr_t last_completion;
106 spinlock_t cleanup_lock;
107 unsigned long state;
108 #define IOAT_COMPLETION_PENDING 0
109 #define IOAT_COMPLETION_ACK 1
110 #define IOAT_RESET_PENDING 2
111 #define IOAT_KOBJ_INIT_FAIL 3
112 #define IOAT_RESHAPE_PENDING 4
113 #define IOAT_RUN 5
114 #define IOAT_CHAN_ACTIVE 6
115 struct timer_list timer;
116 #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
117 #define IDLE_TIMEOUT msecs_to_jiffies(2000)
118 #define RESET_DELAY msecs_to_jiffies(100)
119 struct ioatdma_device *device;
120 dma_addr_t completion_dma;
121 u64 *completion;
122 struct tasklet_struct cleanup_task;
123 struct kobject kobj;
124};
125
126struct ioat_sysfs_entry {
127 struct attribute attr;
128 ssize_t (*show)(struct dma_chan *, char *);
129};
130
131
132
133
134struct ioat_dma_chan {
135 struct ioat_chan_common base;
136
137 size_t xfercap;
138
139 spinlock_t desc_lock;
140 struct list_head free_desc;
141 struct list_head used_desc;
142
143 int pending;
144 u16 desccount;
145 u16 active;
146};
147
148
149
150
151
152
153
154
155struct ioat_sed_ent {
156 struct ioat_sed_raw_descriptor *hw;
157 dma_addr_t dma;
158 struct ioat_ring_ent *parent;
159 unsigned int hw_pool;
160};
161
162static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
163{
164 return container_of(c, struct ioat_chan_common, common);
165}
166
167static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
168{
169 struct ioat_chan_common *chan = to_chan_common(c);
170
171 return container_of(chan, struct ioat_dma_chan, base);
172}
173
174
175
176
177
178
179
180
181
182
183
184struct ioat_desc_sw {
185 struct ioat_dma_descriptor *hw;
186 struct list_head node;
187 size_t len;
188 struct list_head tx_list;
189 struct dma_async_tx_descriptor txd;
190 #ifdef DEBUG
191 int id;
192 #endif
193};
194
195#ifdef DEBUG
196#define set_desc_id(desc, i) ((desc)->id = (i))
197#define desc_id(desc) ((desc)->id)
198#else
199#define set_desc_id(desc, i)
200#define desc_id(desc) (0)
201#endif
202
203static inline void
204__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
205 struct dma_async_tx_descriptor *tx, int id)
206{
207 struct device *dev = to_dev(chan);
208
209 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
210 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
211 (unsigned long long) tx->phys,
212 (unsigned long long) hw->next, tx->cookie, tx->flags,
213 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
214}
215
216#define dump_desc_dbg(c, d) \
217 ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
218
219static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
220{
221 #ifdef CONFIG_NET_DMA
222 sysctl_tcp_dma_copybreak = copybreak;
223 #endif
224}
225
226static inline struct ioat_chan_common *
227ioat_chan_by_index(struct ioatdma_device *device, int index)
228{
229 return device->idx[index];
230}
231
232static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
233{
234 u8 ver = chan->device->version;
235 u64 status;
236 u32 status_lo;
237
238
239
240
241 status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
242 status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
243 status <<= 32;
244 status |= status_lo;
245
246 return status;
247}
248
249#if BITS_PER_LONG == 64
250
251static inline u64 ioat_chansts(struct ioat_chan_common *chan)
252{
253 u8 ver = chan->device->version;
254 u64 status;
255
256
257 if (ver >= IOAT_VER_3_3)
258 status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
259 else
260 status = ioat_chansts_32(chan);
261
262 return status;
263}
264
265#else
266#define ioat_chansts ioat_chansts_32
267#endif
268
269static inline void ioat_start(struct ioat_chan_common *chan)
270{
271 u8 ver = chan->device->version;
272
273 writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
274}
275
276static inline u64 ioat_chansts_to_addr(u64 status)
277{
278 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
279}
280
281static inline u32 ioat_chanerr(struct ioat_chan_common *chan)
282{
283 return readl(chan->reg_base + IOAT_CHANERR_OFFSET);
284}
285
286static inline void ioat_suspend(struct ioat_chan_common *chan)
287{
288 u8 ver = chan->device->version;
289
290 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
291}
292
293static inline void ioat_reset(struct ioat_chan_common *chan)
294{
295 u8 ver = chan->device->version;
296
297 writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
298}
299
300static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
301{
302 u8 ver = chan->device->version;
303 u8 cmd;
304
305 cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
306 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
307}
308
309static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
310{
311 struct ioat_chan_common *chan = &ioat->base;
312
313 writel(addr & 0x00000000FFFFFFFF,
314 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
315 writel(addr >> 32,
316 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
317}
318
319static inline bool is_ioat_active(unsigned long status)
320{
321 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
322}
323
324static inline bool is_ioat_idle(unsigned long status)
325{
326 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
327}
328
329static inline bool is_ioat_halted(unsigned long status)
330{
331 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
332}
333
334static inline bool is_ioat_suspended(unsigned long status)
335{
336 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
337}
338
339
340static inline bool is_ioat_bug(unsigned long err)
341{
342 return !!err;
343}
344
345static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
346 int direction, enum dma_ctrl_flags flags, bool dst)
347{
348 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
349 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
350 pci_unmap_single(pdev, addr, len, direction);
351 else
352 pci_unmap_page(pdev, addr, len, direction);
353}
354
355int ioat_probe(struct ioatdma_device *device);
356int ioat_register(struct ioatdma_device *device);
357int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
358int ioat_dma_self_test(struct ioatdma_device *device);
359void ioat_dma_remove(struct ioatdma_device *device);
360struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
361dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
362void ioat_init_channel(struct ioatdma_device *device,
363 struct ioat_chan_common *chan, int idx);
364enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
365 struct dma_tx_state *txstate);
366void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
367 size_t len, struct ioat_dma_descriptor *hw);
368bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
369 dma_addr_t *phys_complete);
370void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
371void ioat_kobject_del(struct ioatdma_device *device);
372int ioat_dma_setup_interrupts(struct ioatdma_device *device);
373extern const struct sysfs_ops ioat_sysfs_ops;
374extern struct ioat_sysfs_entry ioat_version_attr;
375extern struct ioat_sysfs_entry ioat_cap_attr;
376#endif
377