1
2
3
4
5
6
7
8#include <linux/compiler.h>
9#include <linux/delay.h>
10#include <linux/device.h>
11#include <linux/fs.h>
12#include <linux/init.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/pci.h>
17#include <linux/printk.h>
18#include <linux/sched.h>
19#include <linux/uaccess.h>
20
21#include "apex.h"
22
23#include "gasket_core.h"
24#include "gasket_interrupt.h"
25#include "gasket_page_table.h"
26#include "gasket_sysfs.h"
27
28
29#define APEX_DEVICE_NAME "Apex"
30#define APEX_DRIVER_VERSION "1.0"
31
32
33#define APEX_BAR_INDEX 2
34
35#define APEX_PCI_VENDOR_ID 0x1ac1
36#define APEX_PCI_DEVICE_ID 0x089a
37
38
39#define APEX_BAR_OFFSET 0
40#define APEX_CM_OFFSET 0x1000000
41
42
43#define APEX_BAR_BYTES 0x100000
44#define APEX_CH_MEM_BYTES (PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
45
46
47#define NUM_REGIONS 3
48
49
50#define NUM_NODES 1
51
52
53
54
55
56#define APEX_PAGE_TABLE_TOTAL_ENTRIES 8192
57
58#define APEX_EXTENDED_SHIFT 63
59
60
61#define APEX_RESET_RETRY 120
62
63#define APEX_RESET_DELAY 100
64
65
66enum sysfs_attribute_type {
67 ATTR_KERNEL_HIB_PAGE_TABLE_SIZE,
68 ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE,
69 ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES,
70};
71
72
73
74
75
76enum apex_bar2_regs {
77 APEX_BAR2_REG_SCU_BASE = 0x1A300,
78 APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_SIZE = 0x46000,
79 APEX_BAR2_REG_KERNEL_HIB_EXTENDED_TABLE = 0x46008,
80 APEX_BAR2_REG_KERNEL_HIB_TRANSLATION_ENABLE = 0x46010,
81 APEX_BAR2_REG_KERNEL_HIB_INSTR_QUEUE_INTVECCTL = 0x46018,
82 APEX_BAR2_REG_KERNEL_HIB_INPUT_ACTV_QUEUE_INTVECCTL = 0x46020,
83 APEX_BAR2_REG_KERNEL_HIB_PARAM_QUEUE_INTVECCTL = 0x46028,
84 APEX_BAR2_REG_KERNEL_HIB_OUTPUT_ACTV_QUEUE_INTVECCTL = 0x46030,
85 APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL = 0x46038,
86 APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL = 0x46040,
87 APEX_BAR2_REG_KERNEL_HIB_FATAL_ERR_INTVECCTL = 0x46048,
88 APEX_BAR2_REG_KERNEL_HIB_DMA_PAUSE = 0x46050,
89 APEX_BAR2_REG_KERNEL_HIB_DMA_PAUSE_MASK = 0x46058,
90 APEX_BAR2_REG_KERNEL_HIB_STATUS_BLOCK_DELAY = 0x46060,
91 APEX_BAR2_REG_KERNEL_HIB_MSIX_PENDING_BIT_ARRAY0 = 0x46068,
92 APEX_BAR2_REG_KERNEL_HIB_MSIX_PENDING_BIT_ARRAY1 = 0x46070,
93 APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_INIT = 0x46078,
94 APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE_INIT = 0x46080,
95 APEX_BAR2_REG_KERNEL_WIRE_INT_PENDING_BIT_ARRAY = 0x48778,
96 APEX_BAR2_REG_KERNEL_WIRE_INT_MASK_ARRAY = 0x48780,
97 APEX_BAR2_REG_USER_HIB_DMA_PAUSE = 0x486D8,
98 APEX_BAR2_REG_USER_HIB_DMA_PAUSED = 0x486E0,
99 APEX_BAR2_REG_IDLEGENERATOR_IDLEGEN_IDLEREGISTER = 0x4A000,
100 APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE = 0x50000,
101
102
103 APEX_BAR2_REG_USER_HIB_ERROR_STATUS = 0x86f0,
104 APEX_BAR2_REG_SCALAR_CORE_ERROR_STATUS = 0x41a0,
105};
106
107
108#define APEX_BAR2_REG_AXI_QUIESCE (APEX_BAR2_REG_SCU_BASE + 0x2C)
109#define APEX_BAR2_REG_GCB_CLOCK_GATE (APEX_BAR2_REG_SCU_BASE + 0x14)
110#define APEX_BAR2_REG_SCU_0 (APEX_BAR2_REG_SCU_BASE + 0xc)
111#define APEX_BAR2_REG_SCU_1 (APEX_BAR2_REG_SCU_BASE + 0x10)
112#define APEX_BAR2_REG_SCU_2 (APEX_BAR2_REG_SCU_BASE + 0x14)
113#define APEX_BAR2_REG_SCU_3 (APEX_BAR2_REG_SCU_BASE + 0x18)
114#define APEX_BAR2_REG_SCU_4 (APEX_BAR2_REG_SCU_BASE + 0x1c)
115#define APEX_BAR2_REG_SCU_5 (APEX_BAR2_REG_SCU_BASE + 0x20)
116
117#define SCU3_RG_PWR_STATE_OVR_BIT_OFFSET 26
118#define SCU3_RG_PWR_STATE_OVR_MASK_WIDTH 2
119#define SCU3_CUR_RST_GCB_BIT_MASK 0x10
120#define SCU2_RG_RST_GCB_BIT_MASK 0xc
121
122
123static struct gasket_page_table_config apex_page_table_configs[NUM_NODES] = {
124 {
125 .id = 0,
126 .mode = GASKET_PAGE_TABLE_MODE_NORMAL,
127 .total_entries = APEX_PAGE_TABLE_TOTAL_ENTRIES,
128 .base_reg = APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE,
129 .extended_reg = APEX_BAR2_REG_KERNEL_HIB_EXTENDED_TABLE,
130 .extended_bit = APEX_EXTENDED_SHIFT,
131 },
132};
133
134
135static const struct gasket_mappable_region mappable_regions[NUM_REGIONS] = {
136 { 0x40000, 0x1000 },
137 { 0x44000, 0x1000 },
138 { 0x48000, 0x1000 },
139};
140
141static const struct gasket_mappable_region cm_mappable_regions[1] = { { 0x0,
142 APEX_CH_MEM_BYTES } };
143
144
145enum apex_interrupt {
146 APEX_INTERRUPT_INSTR_QUEUE = 0,
147 APEX_INTERRUPT_INPUT_ACTV_QUEUE = 1,
148 APEX_INTERRUPT_PARAM_QUEUE = 2,
149 APEX_INTERRUPT_OUTPUT_ACTV_QUEUE = 3,
150 APEX_INTERRUPT_SC_HOST_0 = 4,
151 APEX_INTERRUPT_SC_HOST_1 = 5,
152 APEX_INTERRUPT_SC_HOST_2 = 6,
153 APEX_INTERRUPT_SC_HOST_3 = 7,
154 APEX_INTERRUPT_TOP_LEVEL_0 = 8,
155 APEX_INTERRUPT_TOP_LEVEL_1 = 9,
156 APEX_INTERRUPT_TOP_LEVEL_2 = 10,
157 APEX_INTERRUPT_TOP_LEVEL_3 = 11,
158 APEX_INTERRUPT_FATAL_ERR = 12,
159 APEX_INTERRUPT_COUNT = 13,
160};
161
162
163static struct gasket_interrupt_desc apex_interrupts[] = {
164 {
165 APEX_INTERRUPT_INSTR_QUEUE,
166 APEX_BAR2_REG_KERNEL_HIB_INSTR_QUEUE_INTVECCTL,
167 UNPACKED,
168 },
169 {
170 APEX_INTERRUPT_INPUT_ACTV_QUEUE,
171 APEX_BAR2_REG_KERNEL_HIB_INPUT_ACTV_QUEUE_INTVECCTL,
172 UNPACKED
173 },
174 {
175 APEX_INTERRUPT_PARAM_QUEUE,
176 APEX_BAR2_REG_KERNEL_HIB_PARAM_QUEUE_INTVECCTL,
177 UNPACKED
178 },
179 {
180 APEX_INTERRUPT_OUTPUT_ACTV_QUEUE,
181 APEX_BAR2_REG_KERNEL_HIB_OUTPUT_ACTV_QUEUE_INTVECCTL,
182 UNPACKED
183 },
184 {
185 APEX_INTERRUPT_SC_HOST_0,
186 APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
187 PACK_0
188 },
189 {
190 APEX_INTERRUPT_SC_HOST_1,
191 APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
192 PACK_1
193 },
194 {
195 APEX_INTERRUPT_SC_HOST_2,
196 APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
197 PACK_2
198 },
199 {
200 APEX_INTERRUPT_SC_HOST_3,
201 APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
202 PACK_3
203 },
204 {
205 APEX_INTERRUPT_TOP_LEVEL_0,
206 APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
207 PACK_0
208 },
209 {
210 APEX_INTERRUPT_TOP_LEVEL_1,
211 APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
212 PACK_1
213 },
214 {
215 APEX_INTERRUPT_TOP_LEVEL_2,
216 APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
217 PACK_2
218 },
219 {
220 APEX_INTERRUPT_TOP_LEVEL_3,
221 APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
222 PACK_3
223 },
224 {
225 APEX_INTERRUPT_FATAL_ERR,
226 APEX_BAR2_REG_KERNEL_HIB_FATAL_ERR_INTVECCTL,
227 UNPACKED
228 },
229};
230
231
232
233static int allow_power_save = 1;
234
235
236static int allow_sw_clock_gating;
237
238
239
240static int allow_hw_clock_gating = 1;
241
242
243static int bypass_top_level;
244
245module_param(allow_power_save, int, 0644);
246module_param(allow_sw_clock_gating, int, 0644);
247module_param(allow_hw_clock_gating, int, 0644);
248module_param(bypass_top_level, int, 0644);
249
250
251static int apex_get_status(struct gasket_dev *gasket_dev)
252{
253
254 return GASKET_STATUS_ALIVE;
255}
256
257
258static int apex_enter_reset(struct gasket_dev *gasket_dev)
259{
260 if (bypass_top_level)
261 return 0;
262
263
264
265
266
267
268
269 gasket_read_modify_write_64(gasket_dev, APEX_BAR_INDEX,
270 APEX_BAR2_REG_IDLEGENERATOR_IDLEGEN_IDLEREGISTER,
271 0x0, 1, 32);
272
273
274 gasket_dev_write_64(gasket_dev, 1, APEX_BAR_INDEX,
275 APEX_BAR2_REG_USER_HIB_DMA_PAUSE);
276
277
278 if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
279 APEX_BAR2_REG_USER_HIB_DMA_PAUSED, 1, 1,
280 APEX_RESET_DELAY, APEX_RESET_RETRY)) {
281 dev_err(gasket_dev->dev,
282 "DMAs did not quiesce within timeout (%d ms)\n",
283 APEX_RESET_RETRY * APEX_RESET_DELAY);
284 return -ETIMEDOUT;
285 }
286
287
288 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
289 APEX_BAR2_REG_SCU_2, 0x1, 2, 2);
290
291
292 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
293 APEX_BAR2_REG_SCU_2, 0x1, 2, 18);
294
295
296 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
297 APEX_BAR2_REG_SCU_3, 0x3, 2, 14);
298
299
300 if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
301 APEX_BAR2_REG_SCU_3, 1 << 6, 1 << 6,
302 APEX_RESET_DELAY, APEX_RESET_RETRY)) {
303 dev_err(gasket_dev->dev,
304 "RAM did not shut down within timeout (%d ms)\n",
305 APEX_RESET_RETRY * APEX_RESET_DELAY);
306 return -ETIMEDOUT;
307 }
308
309 return 0;
310}
311
312
313static int apex_quit_reset(struct gasket_dev *gasket_dev)
314{
315 u32 val0, val1;
316
317 if (bypass_top_level)
318 return 0;
319
320
321
322
323
324
325
326 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
327 APEX_BAR2_REG_SCU_3, 0x0, 2, 14);
328
329
330
331
332
333
334 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
335 APEX_BAR2_REG_SCU_2, 0x0, 2, 18);
336
337
338
339
340
341
342 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
343 APEX_BAR2_REG_SCU_2, 0x2, 2, 2);
344
345
346 if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
347 APEX_BAR2_REG_SCU_3, 1 << 6, 0,
348 APEX_RESET_DELAY, APEX_RESET_RETRY)) {
349 dev_err(gasket_dev->dev,
350 "RAM did not enable within timeout (%d ms)\n",
351 APEX_RESET_RETRY * APEX_RESET_DELAY);
352 return -ETIMEDOUT;
353 }
354
355
356 if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
357 APEX_BAR2_REG_SCU_3,
358 SCU3_CUR_RST_GCB_BIT_MASK, 0,
359 APEX_RESET_DELAY, APEX_RESET_RETRY)) {
360 dev_err(gasket_dev->dev,
361 "GCB did not leave reset within timeout (%d ms)\n",
362 APEX_RESET_RETRY * APEX_RESET_DELAY);
363 return -ETIMEDOUT;
364 }
365
366 if (!allow_hw_clock_gating) {
367 val0 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
368 APEX_BAR2_REG_SCU_3);
369
370 gasket_read_modify_write_32(gasket_dev,
371 APEX_BAR_INDEX,
372 APEX_BAR2_REG_SCU_3, 0x3,
373 SCU3_RG_PWR_STATE_OVR_MASK_WIDTH,
374 SCU3_RG_PWR_STATE_OVR_BIT_OFFSET);
375 val1 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
376 APEX_BAR2_REG_SCU_3);
377 dev_dbg(gasket_dev->dev,
378 "Disallow HW clock gating 0x%x -> 0x%x\n", val0, val1);
379 } else {
380 val0 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
381 APEX_BAR2_REG_SCU_3);
382
383 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
384 APEX_BAR2_REG_SCU_3, 2,
385 SCU3_RG_PWR_STATE_OVR_MASK_WIDTH,
386 SCU3_RG_PWR_STATE_OVR_BIT_OFFSET);
387 val1 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
388 APEX_BAR2_REG_SCU_3);
389 dev_dbg(gasket_dev->dev, "Allow HW clock gating 0x%x -> 0x%x\n",
390 val0, val1);
391 }
392
393 return 0;
394}
395
396
397static int apex_device_cleanup(struct gasket_dev *gasket_dev)
398{
399 u64 scalar_error;
400 u64 hib_error;
401 int ret = 0;
402
403 hib_error = gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
404 APEX_BAR2_REG_USER_HIB_ERROR_STATUS);
405 scalar_error = gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
406 APEX_BAR2_REG_SCALAR_CORE_ERROR_STATUS);
407
408 dev_dbg(gasket_dev->dev,
409 "%s 0x%p hib_error 0x%llx scalar_error 0x%llx\n",
410 __func__, gasket_dev, hib_error, scalar_error);
411
412 if (allow_power_save)
413 ret = apex_enter_reset(gasket_dev);
414
415 return ret;
416}
417
418
419static bool is_gcb_in_reset(struct gasket_dev *gasket_dev)
420{
421 u32 val = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
422 APEX_BAR2_REG_SCU_3);
423
424
425 return (val & SCU3_CUR_RST_GCB_BIT_MASK);
426}
427
428
429static int apex_reset(struct gasket_dev *gasket_dev)
430{
431 int ret;
432
433 if (bypass_top_level)
434 return 0;
435
436 if (!is_gcb_in_reset(gasket_dev)) {
437
438
439
440 dev_dbg(gasket_dev->dev, "%s: toggle reset\n", __func__);
441
442 ret = apex_enter_reset(gasket_dev);
443 if (ret)
444 return ret;
445 }
446 ret = apex_quit_reset(gasket_dev);
447
448 return ret;
449}
450
451
452
453
454
455static bool apex_ioctl_check_permissions(struct file *filp, uint cmd)
456{
457 return !!(filp->f_mode & FMODE_WRITE);
458}
459
460
461static long apex_clock_gating(struct gasket_dev *gasket_dev,
462 struct apex_gate_clock_ioctl __user *argp)
463{
464 struct apex_gate_clock_ioctl ibuf;
465
466 if (bypass_top_level || !allow_sw_clock_gating)
467 return 0;
468
469 if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
470 return -EFAULT;
471
472 dev_dbg(gasket_dev->dev, "%s %llu\n", __func__, ibuf.enable);
473
474 if (ibuf.enable) {
475
476 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
477 APEX_BAR2_REG_AXI_QUIESCE, 0x1, 1,
478 16);
479 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
480 APEX_BAR2_REG_GCB_CLOCK_GATE, 0x1,
481 2, 18);
482 } else {
483
484 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
485 APEX_BAR2_REG_GCB_CLOCK_GATE, 0x0,
486 2, 18);
487 gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
488 APEX_BAR2_REG_AXI_QUIESCE, 0x0, 1,
489 16);
490 }
491 return 0;
492}
493
494
495static long apex_ioctl(struct file *filp, uint cmd, void __user *argp)
496{
497 struct gasket_dev *gasket_dev = filp->private_data;
498
499 if (!apex_ioctl_check_permissions(filp, cmd))
500 return -EPERM;
501
502 switch (cmd) {
503 case APEX_IOCTL_GATE_CLOCK:
504 return apex_clock_gating(gasket_dev, argp);
505 default:
506 return -ENOTTY;
507 }
508}
509
510
511static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
512 char *buf)
513{
514 int ret;
515 struct gasket_dev *gasket_dev;
516 struct gasket_sysfs_attribute *gasket_attr;
517 enum sysfs_attribute_type type;
518
519 gasket_dev = gasket_sysfs_get_device_data(device);
520 if (!gasket_dev) {
521 dev_err(device, "No Apex device sysfs mapping found\n");
522 return -ENODEV;
523 }
524
525 gasket_attr = gasket_sysfs_get_attr(device, attr);
526 if (!gasket_attr) {
527 dev_err(device, "No Apex device sysfs attr data found\n");
528 gasket_sysfs_put_device_data(device, gasket_dev);
529 return -ENODEV;
530 }
531
532 type = (enum sysfs_attribute_type)gasket_sysfs_get_attr(device, attr);
533 switch (type) {
534 case ATTR_KERNEL_HIB_PAGE_TABLE_SIZE:
535 ret = scnprintf(buf, PAGE_SIZE, "%u\n",
536 gasket_page_table_num_entries(
537 gasket_dev->page_table[0]));
538 break;
539 case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
540 ret = scnprintf(buf, PAGE_SIZE, "%u\n",
541 gasket_page_table_num_entries(
542 gasket_dev->page_table[0]));
543 break;
544 case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
545 ret = scnprintf(buf, PAGE_SIZE, "%u\n",
546 gasket_page_table_num_active_pages(
547 gasket_dev->page_table[0]));
548 break;
549 default:
550 dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
551 attr->attr.name);
552 ret = 0;
553 break;
554 }
555
556 gasket_sysfs_put_attr(device, gasket_attr);
557 gasket_sysfs_put_device_data(device, gasket_dev);
558 return ret;
559}
560
561static struct gasket_sysfs_attribute apex_sysfs_attrs[] = {
562 GASKET_SYSFS_RO(node_0_page_table_entries, sysfs_show,
563 ATTR_KERNEL_HIB_PAGE_TABLE_SIZE),
564 GASKET_SYSFS_RO(node_0_simple_page_table_entries, sysfs_show,
565 ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE),
566 GASKET_SYSFS_RO(node_0_num_mapped_pages, sysfs_show,
567 ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES),
568 GASKET_END_OF_ATTR_ARRAY
569};
570
571
572static int apex_device_open_cb(struct gasket_dev *gasket_dev)
573{
574 return gasket_reset_nolock(gasket_dev);
575}
576
577static const struct pci_device_id apex_pci_ids[] = {
578 { PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 }
579};
580
581static void apex_pci_fixup_class(struct pci_dev *pdev)
582{
583 pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
584}
585DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID,
586 PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
587
588static int apex_pci_probe(struct pci_dev *pci_dev,
589 const struct pci_device_id *id)
590{
591 int ret;
592 ulong page_table_ready, msix_table_ready;
593 int retries = 0;
594 struct gasket_dev *gasket_dev;
595
596 ret = pci_enable_device(pci_dev);
597 if (ret) {
598 dev_err(&pci_dev->dev, "error enabling PCI device\n");
599 return ret;
600 }
601
602 pci_set_master(pci_dev);
603
604 ret = gasket_pci_add_device(pci_dev, &gasket_dev);
605 if (ret) {
606 dev_err(&pci_dev->dev, "error adding gasket device\n");
607 pci_disable_device(pci_dev);
608 return ret;
609 }
610
611 pci_set_drvdata(pci_dev, gasket_dev);
612 apex_reset(gasket_dev);
613
614 while (retries < APEX_RESET_RETRY) {
615 page_table_ready =
616 gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
617 APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_INIT);
618 msix_table_ready =
619 gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
620 APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE_INIT);
621 if (page_table_ready && msix_table_ready)
622 break;
623 schedule_timeout(msecs_to_jiffies(APEX_RESET_DELAY));
624 retries++;
625 }
626
627 if (retries == APEX_RESET_RETRY) {
628 if (!page_table_ready)
629 dev_err(gasket_dev->dev, "Page table init timed out\n");
630 if (!msix_table_ready)
631 dev_err(gasket_dev->dev, "MSI-X table init timed out\n");
632 ret = -ETIMEDOUT;
633 goto remove_device;
634 }
635
636 ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
637 apex_sysfs_attrs);
638 if (ret)
639 dev_err(&pci_dev->dev, "error creating device sysfs entries\n");
640
641 ret = gasket_enable_device(gasket_dev);
642 if (ret) {
643 dev_err(&pci_dev->dev, "error enabling gasket device\n");
644 goto remove_device;
645 }
646
647
648 if (allow_power_save)
649 apex_enter_reset(gasket_dev);
650
651 return 0;
652
653remove_device:
654 gasket_pci_remove_device(pci_dev);
655 pci_disable_device(pci_dev);
656 return ret;
657}
658
659static void apex_pci_remove(struct pci_dev *pci_dev)
660{
661 struct gasket_dev *gasket_dev = pci_get_drvdata(pci_dev);
662
663 gasket_disable_device(gasket_dev);
664 gasket_pci_remove_device(pci_dev);
665 pci_disable_device(pci_dev);
666}
667
668static struct gasket_driver_desc apex_desc = {
669 .name = "apex",
670 .driver_version = APEX_DRIVER_VERSION,
671 .major = 120,
672 .minor = 0,
673 .module = THIS_MODULE,
674 .pci_id_table = apex_pci_ids,
675
676 .num_page_tables = NUM_NODES,
677 .page_table_bar_index = APEX_BAR_INDEX,
678 .page_table_configs = apex_page_table_configs,
679 .page_table_extended_bit = APEX_EXTENDED_SHIFT,
680
681 .bar_descriptions = {
682 GASKET_UNUSED_BAR,
683 GASKET_UNUSED_BAR,
684 { APEX_BAR_BYTES, (VM_WRITE | VM_READ), APEX_BAR_OFFSET,
685 NUM_REGIONS, mappable_regions, PCI_BAR },
686 GASKET_UNUSED_BAR,
687 GASKET_UNUSED_BAR,
688 GASKET_UNUSED_BAR,
689 },
690 .coherent_buffer_description = {
691 APEX_CH_MEM_BYTES,
692 (VM_WRITE | VM_READ),
693 APEX_CM_OFFSET,
694 },
695 .interrupt_type = PCI_MSIX,
696 .interrupt_bar_index = APEX_BAR_INDEX,
697 .num_interrupts = APEX_INTERRUPT_COUNT,
698 .interrupts = apex_interrupts,
699 .interrupt_pack_width = 7,
700
701 .device_open_cb = apex_device_open_cb,
702 .device_close_cb = apex_device_cleanup,
703
704 .ioctl_handler_cb = apex_ioctl,
705 .device_status_cb = apex_get_status,
706 .hardware_revision_cb = NULL,
707 .device_reset_cb = apex_reset,
708};
709
710static struct pci_driver apex_pci_driver = {
711 .name = "apex",
712 .probe = apex_pci_probe,
713 .remove = apex_pci_remove,
714 .id_table = apex_pci_ids,
715};
716
717static int __init apex_init(void)
718{
719 int ret;
720
721 ret = gasket_register_device(&apex_desc);
722 if (ret)
723 return ret;
724 ret = pci_register_driver(&apex_pci_driver);
725 if (ret)
726 gasket_unregister_device(&apex_desc);
727 return ret;
728}
729
730static void apex_exit(void)
731{
732 pci_unregister_driver(&apex_pci_driver);
733 gasket_unregister_device(&apex_desc);
734}
735MODULE_DESCRIPTION("Google Apex driver");
736MODULE_VERSION(APEX_DRIVER_VERSION);
737MODULE_LICENSE("GPL v2");
738MODULE_AUTHOR("John Joseph <jnjoseph@google.com>");
739MODULE_DEVICE_TABLE(pci, apex_pci_ids);
740module_init(apex_init);
741module_exit(apex_exit);
742