1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/console.h>
26#include <linux/delay.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/pm_runtime.h>
30#include <linux/vga_switcheroo.h>
31#include <linux/mmu_notifier.h>
32
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_ioctl.h>
35#include <drm/drm_vblank.h>
36
37#include <core/gpuobj.h>
38#include <core/option.h>
39#include <core/pci.h>
40#include <core/tegra.h>
41
42#include <nvif/driver.h>
43#include <nvif/fifo.h>
44#include <nvif/push006c.h>
45#include <nvif/user.h>
46
47#include <nvif/class.h>
48#include <nvif/cl0002.h>
49#include <nvif/cla06f.h>
50
51#include "nouveau_drv.h"
52#include "nouveau_dma.h"
53#include "nouveau_ttm.h"
54#include "nouveau_gem.h"
55#include "nouveau_vga.h"
56#include "nouveau_led.h"
57#include "nouveau_hwmon.h"
58#include "nouveau_acpi.h"
59#include "nouveau_bios.h"
60#include "nouveau_ioctl.h"
61#include "nouveau_abi16.h"
62#include "nouveau_fbcon.h"
63#include "nouveau_fence.h"
64#include "nouveau_debugfs.h"
65#include "nouveau_usif.h"
66#include "nouveau_connector.h"
67#include "nouveau_platform.h"
68#include "nouveau_svm.h"
69#include "nouveau_dmem.h"
70
71MODULE_PARM_DESC(config, "option string to pass to driver core");
72static char *nouveau_config;
73module_param_named(config, nouveau_config, charp, 0400);
74
75MODULE_PARM_DESC(debug, "debug string to pass to driver core");
76static char *nouveau_debug;
77module_param_named(debug, nouveau_debug, charp, 0400);
78
79MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
80static int nouveau_noaccel = 0;
81module_param_named(noaccel, nouveau_noaccel, int, 0400);
82
83MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
84 "0 = disabled, 1 = enabled, 2 = headless)");
85int nouveau_modeset = -1;
86module_param_named(modeset, nouveau_modeset, int, 0400);
87
88MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
89static int nouveau_atomic = 0;
90module_param_named(atomic, nouveau_atomic, int, 0400);
91
92MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
93static int nouveau_runtime_pm = -1;
94module_param_named(runpm, nouveau_runtime_pm, int, 0400);
95
96static struct drm_driver driver_stub;
97static struct drm_driver driver_pci;
98static struct drm_driver driver_platform;
99
100static u64
101nouveau_pci_name(struct pci_dev *pdev)
102{
103 u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
104 name |= pdev->bus->number << 16;
105 name |= PCI_SLOT(pdev->devfn) << 8;
106 return name | PCI_FUNC(pdev->devfn);
107}
108
109static u64
110nouveau_platform_name(struct platform_device *platformdev)
111{
112 return platformdev->id;
113}
114
115static u64
116nouveau_name(struct drm_device *dev)
117{
118 if (dev_is_pci(dev->dev))
119 return nouveau_pci_name(to_pci_dev(dev->dev));
120 else
121 return nouveau_platform_name(to_platform_device(dev->dev));
122}
123
124static inline bool
125nouveau_cli_work_ready(struct dma_fence *fence)
126{
127 if (!dma_fence_is_signaled(fence))
128 return false;
129 dma_fence_put(fence);
130 return true;
131}
132
133static void
134nouveau_cli_work(struct work_struct *w)
135{
136 struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
137 struct nouveau_cli_work *work, *wtmp;
138 mutex_lock(&cli->lock);
139 list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
140 if (!work->fence || nouveau_cli_work_ready(work->fence)) {
141 list_del(&work->head);
142 work->func(work);
143 }
144 }
145 mutex_unlock(&cli->lock);
146}
147
148static void
149nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
150{
151 struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
152 schedule_work(&work->cli->work);
153}
154
155void
156nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
157 struct nouveau_cli_work *work)
158{
159 work->fence = dma_fence_get(fence);
160 work->cli = cli;
161 mutex_lock(&cli->lock);
162 list_add_tail(&work->head, &cli->worker);
163 if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
164 nouveau_cli_work_fence(fence, &work->cb);
165 mutex_unlock(&cli->lock);
166}
167
168static void
169nouveau_cli_fini(struct nouveau_cli *cli)
170{
171
172
173
174
175
176 flush_work(&cli->work);
177 WARN_ON(!list_empty(&cli->worker));
178
179 usif_client_fini(cli);
180 nouveau_vmm_fini(&cli->svm);
181 nouveau_vmm_fini(&cli->vmm);
182 nvif_mmu_dtor(&cli->mmu);
183 nvif_device_dtor(&cli->device);
184 mutex_lock(&cli->drm->master.lock);
185 nvif_client_dtor(&cli->base);
186 mutex_unlock(&cli->drm->master.lock);
187}
188
189static int
190nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
191 struct nouveau_cli *cli)
192{
193 static const struct nvif_mclass
194 mems[] = {
195 { NVIF_CLASS_MEM_GF100, -1 },
196 { NVIF_CLASS_MEM_NV50 , -1 },
197 { NVIF_CLASS_MEM_NV04 , -1 },
198 {}
199 };
200 static const struct nvif_mclass
201 mmus[] = {
202 { NVIF_CLASS_MMU_GF100, -1 },
203 { NVIF_CLASS_MMU_NV50 , -1 },
204 { NVIF_CLASS_MMU_NV04 , -1 },
205 {}
206 };
207 static const struct nvif_mclass
208 vmms[] = {
209 { NVIF_CLASS_VMM_GP100, -1 },
210 { NVIF_CLASS_VMM_GM200, -1 },
211 { NVIF_CLASS_VMM_GF100, -1 },
212 { NVIF_CLASS_VMM_NV50 , -1 },
213 { NVIF_CLASS_VMM_NV04 , -1 },
214 {}
215 };
216 u64 device = nouveau_name(drm->dev);
217 int ret;
218
219 snprintf(cli->name, sizeof(cli->name), "%s", sname);
220 cli->drm = drm;
221 mutex_init(&cli->mutex);
222 usif_client_init(cli);
223
224 INIT_WORK(&cli->work, nouveau_cli_work);
225 INIT_LIST_HEAD(&cli->worker);
226 mutex_init(&cli->lock);
227
228 if (cli == &drm->master) {
229 ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
230 cli->name, device, &cli->base);
231 } else {
232 mutex_lock(&drm->master.lock);
233 ret = nvif_client_ctor(&drm->master.base, cli->name, device,
234 &cli->base);
235 mutex_unlock(&drm->master.lock);
236 }
237 if (ret) {
238 NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
239 goto done;
240 }
241
242 ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
243 &(struct nv_device_v0) {
244 .device = ~0,
245 }, sizeof(struct nv_device_v0),
246 &cli->device);
247 if (ret) {
248 NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
249 goto done;
250 }
251
252 ret = nvif_mclass(&cli->device.object, mmus);
253 if (ret < 0) {
254 NV_PRINTK(err, cli, "No supported MMU class\n");
255 goto done;
256 }
257
258 ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass,
259 &cli->mmu);
260 if (ret) {
261 NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
262 goto done;
263 }
264
265 ret = nvif_mclass(&cli->mmu.object, vmms);
266 if (ret < 0) {
267 NV_PRINTK(err, cli, "No supported VMM class\n");
268 goto done;
269 }
270
271 ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
272 if (ret) {
273 NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
274 goto done;
275 }
276
277 ret = nvif_mclass(&cli->mmu.object, mems);
278 if (ret < 0) {
279 NV_PRINTK(err, cli, "No supported MEM class\n");
280 goto done;
281 }
282
283 cli->mem = &mems[ret];
284 return 0;
285done:
286 if (ret)
287 nouveau_cli_fini(cli);
288 return ret;
289}
290
291static void
292nouveau_accel_ce_fini(struct nouveau_drm *drm)
293{
294 nouveau_channel_idle(drm->cechan);
295 nvif_object_dtor(&drm->ttm.copy);
296 nouveau_channel_del(&drm->cechan);
297}
298
299static void
300nouveau_accel_ce_init(struct nouveau_drm *drm)
301{
302 struct nvif_device *device = &drm->client.device;
303 int ret = 0;
304
305
306
307
308 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
309 ret = nouveau_channel_new(drm, device,
310 nvif_fifo_runlist_ce(device), 0,
311 true, &drm->cechan);
312 } else
313 if (device->info.chipset >= 0xa3 &&
314 device->info.chipset != 0xaa &&
315 device->info.chipset != 0xac) {
316
317
318
319
320
321 ret = nouveau_channel_new(drm, device, NvDmaFB, NvDmaTT, false,
322 &drm->cechan);
323 }
324
325 if (ret)
326 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
327}
328
329static void
330nouveau_accel_gr_fini(struct nouveau_drm *drm)
331{
332 nouveau_channel_idle(drm->channel);
333 nvif_object_dtor(&drm->ntfy);
334 nvkm_gpuobj_del(&drm->notify);
335 nouveau_channel_del(&drm->channel);
336}
337
338static void
339nouveau_accel_gr_init(struct nouveau_drm *drm)
340{
341 struct nvif_device *device = &drm->client.device;
342 u32 arg0, arg1;
343 int ret;
344
345
346 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
347 arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
348 arg1 = 1;
349 } else {
350 arg0 = NvDmaFB;
351 arg1 = NvDmaTT;
352 }
353
354 ret = nouveau_channel_new(drm, device, arg0, arg1, false,
355 &drm->channel);
356 if (ret) {
357 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
358 nouveau_accel_gr_fini(drm);
359 return;
360 }
361
362
363
364
365
366 if (!drm->channel->nvsw.client && device->info.family < NV_DEVICE_INFO_V0_TESLA) {
367 ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
368 NVDRM_NVSW, nouveau_abi16_swclass(drm),
369 NULL, 0, &drm->channel->nvsw);
370 if (ret == 0) {
371 struct nvif_push *push = drm->channel->chan.push;
372 ret = PUSH_WAIT(push, 2);
373 if (ret == 0)
374 PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
375 }
376
377 if (ret) {
378 NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
379 nouveau_accel_gr_fini(drm);
380 return;
381 }
382 }
383
384
385
386
387
388 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
389 ret = nvkm_gpuobj_new(nvxx_device(device), 32, 0, false, NULL,
390 &drm->notify);
391 if (ret) {
392 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
393 nouveau_accel_gr_fini(drm);
394 return;
395 }
396
397 ret = nvif_object_ctor(&drm->channel->user, "drmM2mfNtfy",
398 NvNotify0, NV_DMA_IN_MEMORY,
399 &(struct nv_dma_v0) {
400 .target = NV_DMA_V0_TARGET_VRAM,
401 .access = NV_DMA_V0_ACCESS_RDWR,
402 .start = drm->notify->addr,
403 .limit = drm->notify->addr + 31
404 }, sizeof(struct nv_dma_v0),
405 &drm->ntfy);
406 if (ret) {
407 nouveau_accel_gr_fini(drm);
408 return;
409 }
410 }
411}
412
413static void
414nouveau_accel_fini(struct nouveau_drm *drm)
415{
416 nouveau_accel_ce_fini(drm);
417 nouveau_accel_gr_fini(drm);
418 if (drm->fence)
419 nouveau_fence(drm)->dtor(drm);
420}
421
422static void
423nouveau_accel_init(struct nouveau_drm *drm)
424{
425 struct nvif_device *device = &drm->client.device;
426 struct nvif_sclass *sclass;
427 int ret, i, n;
428
429 if (nouveau_noaccel)
430 return;
431
432
433 ret = nouveau_channels_init(drm);
434 if (ret)
435 return;
436
437
438
439
440 ret = n = nvif_object_sclass_get(&device->object, &sclass);
441 if (ret < 0)
442 return;
443
444 for (ret = -ENOSYS, i = 0; i < n; i++) {
445 switch (sclass[i].oclass) {
446 case NV03_CHANNEL_DMA:
447 ret = nv04_fence_create(drm);
448 break;
449 case NV10_CHANNEL_DMA:
450 ret = nv10_fence_create(drm);
451 break;
452 case NV17_CHANNEL_DMA:
453 case NV40_CHANNEL_DMA:
454 ret = nv17_fence_create(drm);
455 break;
456 case NV50_CHANNEL_GPFIFO:
457 ret = nv50_fence_create(drm);
458 break;
459 case G82_CHANNEL_GPFIFO:
460 ret = nv84_fence_create(drm);
461 break;
462 case FERMI_CHANNEL_GPFIFO:
463 case KEPLER_CHANNEL_GPFIFO_A:
464 case KEPLER_CHANNEL_GPFIFO_B:
465 case MAXWELL_CHANNEL_GPFIFO_A:
466 case PASCAL_CHANNEL_GPFIFO_A:
467 case VOLTA_CHANNEL_GPFIFO_A:
468 case TURING_CHANNEL_GPFIFO_A:
469 ret = nvc0_fence_create(drm);
470 break;
471 default:
472 break;
473 }
474 }
475
476 nvif_object_sclass_put(&sclass);
477 if (ret) {
478 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
479 nouveau_accel_fini(drm);
480 return;
481 }
482
483
484 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
485 ret = nvif_user_ctor(device, "drmUsermode");
486 if (ret)
487 return;
488 }
489
490
491 nouveau_accel_gr_init(drm);
492 nouveau_accel_ce_init(drm);
493
494
495 nouveau_bo_move_init(drm);
496}
497
498static void __printf(2, 3)
499nouveau_drm_errorf(struct nvif_object *object, const char *fmt, ...)
500{
501 struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
502 struct va_format vaf;
503 va_list va;
504
505 va_start(va, fmt);
506 vaf.fmt = fmt;
507 vaf.va = &va;
508 NV_ERROR(drm, "%pV", &vaf);
509 va_end(va);
510}
511
512static void __printf(2, 3)
513nouveau_drm_debugf(struct nvif_object *object, const char *fmt, ...)
514{
515 struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
516 struct va_format vaf;
517 va_list va;
518
519 va_start(va, fmt);
520 vaf.fmt = fmt;
521 vaf.va = &va;
522 NV_DEBUG(drm, "%pV", &vaf);
523 va_end(va);
524}
525
526static const struct nvif_parent_func
527nouveau_parent = {
528 .debugf = nouveau_drm_debugf,
529 .errorf = nouveau_drm_errorf,
530};
531
532static int
533nouveau_drm_device_init(struct drm_device *dev)
534{
535 struct nouveau_drm *drm;
536 int ret;
537
538 if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
539 return -ENOMEM;
540 dev->dev_private = drm;
541 drm->dev = dev;
542
543 nvif_parent_ctor(&nouveau_parent, &drm->parent);
544 drm->master.base.object.parent = &drm->parent;
545
546 ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
547 if (ret)
548 goto fail_alloc;
549
550 ret = nouveau_cli_init(drm, "DRM", &drm->client);
551 if (ret)
552 goto fail_master;
553
554 dev->irq_enabled = true;
555
556 nvxx_client(&drm->client.base)->debug =
557 nvkm_dbgopt(nouveau_debug, "DRM");
558
559 INIT_LIST_HEAD(&drm->clients);
560 spin_lock_init(&drm->tile.lock);
561
562
563
564
565
566 if (drm->client.device.info.chipset == 0xc1)
567 nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000);
568
569 nouveau_vga_init(drm);
570
571 ret = nouveau_ttm_init(drm);
572 if (ret)
573 goto fail_ttm;
574
575 ret = nouveau_bios_init(dev);
576 if (ret)
577 goto fail_bios;
578
579 nouveau_accel_init(drm);
580
581 ret = nouveau_display_create(dev);
582 if (ret)
583 goto fail_dispctor;
584
585 if (dev->mode_config.num_crtc) {
586 ret = nouveau_display_init(dev, false, false);
587 if (ret)
588 goto fail_dispinit;
589 }
590
591 nouveau_debugfs_init(drm);
592 nouveau_hwmon_init(dev);
593 nouveau_svm_init(drm);
594 nouveau_dmem_init(drm);
595 nouveau_fbcon_init(dev);
596 nouveau_led_init(dev);
597
598 if (nouveau_pmops_runtime()) {
599 pm_runtime_use_autosuspend(dev->dev);
600 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
601 pm_runtime_set_active(dev->dev);
602 pm_runtime_allow(dev->dev);
603 pm_runtime_mark_last_busy(dev->dev);
604 pm_runtime_put(dev->dev);
605 }
606
607 return 0;
608
609fail_dispinit:
610 nouveau_display_destroy(dev);
611fail_dispctor:
612 nouveau_accel_fini(drm);
613 nouveau_bios_takedown(dev);
614fail_bios:
615 nouveau_ttm_fini(drm);
616fail_ttm:
617 nouveau_vga_fini(drm);
618 nouveau_cli_fini(&drm->client);
619fail_master:
620 nouveau_cli_fini(&drm->master);
621fail_alloc:
622 nvif_parent_dtor(&drm->parent);
623 kfree(drm);
624 return ret;
625}
626
627static void
628nouveau_drm_device_fini(struct drm_device *dev)
629{
630 struct nouveau_drm *drm = nouveau_drm(dev);
631
632 if (nouveau_pmops_runtime()) {
633 pm_runtime_get_sync(dev->dev);
634 pm_runtime_forbid(dev->dev);
635 }
636
637 nouveau_led_fini(dev);
638 nouveau_fbcon_fini(dev);
639 nouveau_dmem_fini(drm);
640 nouveau_svm_fini(drm);
641 nouveau_hwmon_fini(dev);
642 nouveau_debugfs_fini(drm);
643
644 if (dev->mode_config.num_crtc)
645 nouveau_display_fini(dev, false, false);
646 nouveau_display_destroy(dev);
647
648 nouveau_accel_fini(drm);
649 nouveau_bios_takedown(dev);
650
651 nouveau_ttm_fini(drm);
652 nouveau_vga_fini(drm);
653
654 nouveau_cli_fini(&drm->client);
655 nouveau_cli_fini(&drm->master);
656 nvif_parent_dtor(&drm->parent);
657 kfree(drm);
658}
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700static void quirk_broken_nv_runpm(struct pci_dev *pdev)
701{
702 struct drm_device *dev = pci_get_drvdata(pdev);
703 struct nouveau_drm *drm = nouveau_drm(dev);
704 struct pci_dev *bridge = pci_upstream_bridge(pdev);
705
706 if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
707 return;
708
709 switch (bridge->device) {
710 case 0x1901:
711 drm->old_pm_cap = pdev->pm_cap;
712 pdev->pm_cap = 0;
713 NV_INFO(drm, "Disabling PCI power management to avoid bug\n");
714 break;
715 }
716}
717
718static int nouveau_drm_probe(struct pci_dev *pdev,
719 const struct pci_device_id *pent)
720{
721 struct nvkm_device *device;
722 struct drm_device *drm_dev;
723 int ret;
724
725 if (vga_switcheroo_client_probe_defer(pdev))
726 return -EPROBE_DEFER;
727
728
729
730
731 ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
732 true, false, 0, &device);
733 if (ret)
734 return ret;
735
736 nvkm_device_del(&device);
737
738
739 ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
740 if (ret)
741 return ret;
742
743 ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
744 true, true, ~0ULL, &device);
745 if (ret)
746 return ret;
747
748 pci_set_master(pdev);
749
750 if (nouveau_atomic)
751 driver_pci.driver_features |= DRIVER_ATOMIC;
752
753 drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev);
754 if (IS_ERR(drm_dev)) {
755 ret = PTR_ERR(drm_dev);
756 goto fail_nvkm;
757 }
758
759 ret = pci_enable_device(pdev);
760 if (ret)
761 goto fail_drm;
762
763 pci_set_drvdata(pdev, drm_dev);
764
765 ret = nouveau_drm_device_init(drm_dev);
766 if (ret)
767 goto fail_pci;
768
769 ret = drm_dev_register(drm_dev, pent->driver_data);
770 if (ret)
771 goto fail_drm_dev_init;
772
773 quirk_broken_nv_runpm(pdev);
774 return 0;
775
776fail_drm_dev_init:
777 nouveau_drm_device_fini(drm_dev);
778fail_pci:
779 pci_disable_device(pdev);
780fail_drm:
781 drm_dev_put(drm_dev);
782fail_nvkm:
783 nvkm_device_del(&device);
784 return ret;
785}
786
787void
788nouveau_drm_device_remove(struct drm_device *dev)
789{
790 struct nouveau_drm *drm = nouveau_drm(dev);
791 struct nvkm_client *client;
792 struct nvkm_device *device;
793
794 drm_dev_unregister(dev);
795
796 dev->irq_enabled = false;
797 client = nvxx_client(&drm->client.base);
798 device = nvkm_device_find(client->device);
799
800 nouveau_drm_device_fini(dev);
801 drm_dev_put(dev);
802 nvkm_device_del(&device);
803}
804
805static void
806nouveau_drm_remove(struct pci_dev *pdev)
807{
808 struct drm_device *dev = pci_get_drvdata(pdev);
809 struct nouveau_drm *drm = nouveau_drm(dev);
810
811
812 if (drm->old_pm_cap)
813 pdev->pm_cap = drm->old_pm_cap;
814 nouveau_drm_device_remove(dev);
815 pci_disable_device(pdev);
816}
817
818static int
819nouveau_do_suspend(struct drm_device *dev, bool runtime)
820{
821 struct nouveau_drm *drm = nouveau_drm(dev);
822 struct ttm_resource_manager *man;
823 int ret;
824
825 nouveau_svm_suspend(drm);
826 nouveau_dmem_suspend(drm);
827 nouveau_led_suspend(dev);
828
829 if (dev->mode_config.num_crtc) {
830 NV_DEBUG(drm, "suspending console...\n");
831 nouveau_fbcon_set_suspend(dev, 1);
832 NV_DEBUG(drm, "suspending display...\n");
833 ret = nouveau_display_suspend(dev, runtime);
834 if (ret)
835 return ret;
836 }
837
838 NV_DEBUG(drm, "evicting buffers...\n");
839
840 man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
841 ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
842
843 NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
844 if (drm->cechan) {
845 ret = nouveau_channel_idle(drm->cechan);
846 if (ret)
847 goto fail_display;
848 }
849
850 if (drm->channel) {
851 ret = nouveau_channel_idle(drm->channel);
852 if (ret)
853 goto fail_display;
854 }
855
856 NV_DEBUG(drm, "suspending fence...\n");
857 if (drm->fence && nouveau_fence(drm)->suspend) {
858 if (!nouveau_fence(drm)->suspend(drm)) {
859 ret = -ENOMEM;
860 goto fail_display;
861 }
862 }
863
864 NV_DEBUG(drm, "suspending object tree...\n");
865 ret = nvif_client_suspend(&drm->master.base);
866 if (ret)
867 goto fail_client;
868
869 return 0;
870
871fail_client:
872 if (drm->fence && nouveau_fence(drm)->resume)
873 nouveau_fence(drm)->resume(drm);
874
875fail_display:
876 if (dev->mode_config.num_crtc) {
877 NV_DEBUG(drm, "resuming display...\n");
878 nouveau_display_resume(dev, runtime);
879 }
880 return ret;
881}
882
883static int
884nouveau_do_resume(struct drm_device *dev, bool runtime)
885{
886 int ret = 0;
887 struct nouveau_drm *drm = nouveau_drm(dev);
888
889 NV_DEBUG(drm, "resuming object tree...\n");
890 ret = nvif_client_resume(&drm->master.base);
891 if (ret) {
892 NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
893 return ret;
894 }
895
896 NV_DEBUG(drm, "resuming fence...\n");
897 if (drm->fence && nouveau_fence(drm)->resume)
898 nouveau_fence(drm)->resume(drm);
899
900 nouveau_run_vbios_init(dev);
901
902 if (dev->mode_config.num_crtc) {
903 NV_DEBUG(drm, "resuming display...\n");
904 nouveau_display_resume(dev, runtime);
905 NV_DEBUG(drm, "resuming console...\n");
906 nouveau_fbcon_set_suspend(dev, 0);
907 }
908
909 nouveau_led_resume(dev);
910 nouveau_dmem_resume(drm);
911 nouveau_svm_resume(drm);
912 return 0;
913}
914
915int
916nouveau_pmops_suspend(struct device *dev)
917{
918 struct pci_dev *pdev = to_pci_dev(dev);
919 struct drm_device *drm_dev = pci_get_drvdata(pdev);
920 int ret;
921
922 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
923 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
924 return 0;
925
926 ret = nouveau_do_suspend(drm_dev, false);
927 if (ret)
928 return ret;
929
930 pci_save_state(pdev);
931 pci_disable_device(pdev);
932 pci_set_power_state(pdev, PCI_D3hot);
933 udelay(200);
934 return 0;
935}
936
937int
938nouveau_pmops_resume(struct device *dev)
939{
940 struct pci_dev *pdev = to_pci_dev(dev);
941 struct drm_device *drm_dev = pci_get_drvdata(pdev);
942 int ret;
943
944 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
945 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
946 return 0;
947
948 pci_set_power_state(pdev, PCI_D0);
949 pci_restore_state(pdev);
950 ret = pci_enable_device(pdev);
951 if (ret)
952 return ret;
953 pci_set_master(pdev);
954
955 ret = nouveau_do_resume(drm_dev, false);
956
957
958 nouveau_display_hpd_resume(drm_dev);
959
960 return ret;
961}
962
963static int
964nouveau_pmops_freeze(struct device *dev)
965{
966 struct pci_dev *pdev = to_pci_dev(dev);
967 struct drm_device *drm_dev = pci_get_drvdata(pdev);
968 return nouveau_do_suspend(drm_dev, false);
969}
970
971static int
972nouveau_pmops_thaw(struct device *dev)
973{
974 struct pci_dev *pdev = to_pci_dev(dev);
975 struct drm_device *drm_dev = pci_get_drvdata(pdev);
976 return nouveau_do_resume(drm_dev, false);
977}
978
979bool
980nouveau_pmops_runtime(void)
981{
982 if (nouveau_runtime_pm == -1)
983 return nouveau_is_optimus() || nouveau_is_v1_dsm();
984 return nouveau_runtime_pm == 1;
985}
986
987static int
988nouveau_pmops_runtime_suspend(struct device *dev)
989{
990 struct pci_dev *pdev = to_pci_dev(dev);
991 struct drm_device *drm_dev = pci_get_drvdata(pdev);
992 int ret;
993
994 if (!nouveau_pmops_runtime()) {
995 pm_runtime_forbid(dev);
996 return -EBUSY;
997 }
998
999 nouveau_switcheroo_optimus_dsm();
1000 ret = nouveau_do_suspend(drm_dev, true);
1001 pci_save_state(pdev);
1002 pci_disable_device(pdev);
1003 pci_ignore_hotplug(pdev);
1004 pci_set_power_state(pdev, PCI_D3cold);
1005 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
1006 return ret;
1007}
1008
1009static int
1010nouveau_pmops_runtime_resume(struct device *dev)
1011{
1012 struct pci_dev *pdev = to_pci_dev(dev);
1013 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1014 struct nouveau_drm *drm = nouveau_drm(drm_dev);
1015 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
1016 int ret;
1017
1018 if (!nouveau_pmops_runtime()) {
1019 pm_runtime_forbid(dev);
1020 return -EBUSY;
1021 }
1022
1023 pci_set_power_state(pdev, PCI_D0);
1024 pci_restore_state(pdev);
1025 ret = pci_enable_device(pdev);
1026 if (ret)
1027 return ret;
1028 pci_set_master(pdev);
1029
1030 ret = nouveau_do_resume(drm_dev, true);
1031 if (ret) {
1032 NV_ERROR(drm, "resume failed with: %d\n", ret);
1033 return ret;
1034 }
1035
1036
1037 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
1038 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
1039
1040
1041 nouveau_display_hpd_resume(drm_dev);
1042
1043 return ret;
1044}
1045
1046static int
1047nouveau_pmops_runtime_idle(struct device *dev)
1048{
1049 if (!nouveau_pmops_runtime()) {
1050 pm_runtime_forbid(dev);
1051 return -EBUSY;
1052 }
1053
1054 pm_runtime_mark_last_busy(dev);
1055 pm_runtime_autosuspend(dev);
1056
1057 return 1;
1058}
1059
1060static int
1061nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
1062{
1063 struct nouveau_drm *drm = nouveau_drm(dev);
1064 struct nouveau_cli *cli;
1065 char name[32], tmpname[TASK_COMM_LEN];
1066 int ret;
1067
1068
1069 ret = pm_runtime_get_sync(dev->dev);
1070 if (ret < 0 && ret != -EACCES) {
1071 pm_runtime_put_autosuspend(dev->dev);
1072 return ret;
1073 }
1074
1075 get_task_comm(tmpname, current);
1076 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
1077
1078 if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
1079 ret = -ENOMEM;
1080 goto done;
1081 }
1082
1083 ret = nouveau_cli_init(drm, name, cli);
1084 if (ret)
1085 goto done;
1086
1087 cli->base.super = false;
1088
1089 fpriv->driver_priv = cli;
1090
1091 mutex_lock(&drm->client.mutex);
1092 list_add(&cli->head, &drm->clients);
1093 mutex_unlock(&drm->client.mutex);
1094
1095done:
1096 if (ret && cli) {
1097 nouveau_cli_fini(cli);
1098 kfree(cli);
1099 }
1100
1101 pm_runtime_mark_last_busy(dev->dev);
1102 pm_runtime_put_autosuspend(dev->dev);
1103 return ret;
1104}
1105
1106static void
1107nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
1108{
1109 struct nouveau_cli *cli = nouveau_cli(fpriv);
1110 struct nouveau_drm *drm = nouveau_drm(dev);
1111
1112 pm_runtime_get_sync(dev->dev);
1113
1114 mutex_lock(&cli->mutex);
1115 if (cli->abi16)
1116 nouveau_abi16_fini(cli->abi16);
1117 mutex_unlock(&cli->mutex);
1118
1119 mutex_lock(&drm->client.mutex);
1120 list_del(&cli->head);
1121 mutex_unlock(&drm->client.mutex);
1122
1123 nouveau_cli_fini(cli);
1124 kfree(cli);
1125 pm_runtime_mark_last_busy(dev->dev);
1126 pm_runtime_put_autosuspend(dev->dev);
1127}
1128
1129static const struct drm_ioctl_desc
1130nouveau_ioctls[] = {
1131 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_RENDER_ALLOW),
1132 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1133 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_RENDER_ALLOW),
1134 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_RENDER_ALLOW),
1135 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_RENDER_ALLOW),
1136 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_RENDER_ALLOW),
1137 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_RENDER_ALLOW),
1138 DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_RENDER_ALLOW),
1139 DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_RENDER_ALLOW),
1140 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_RENDER_ALLOW),
1141 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_RENDER_ALLOW),
1142 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
1143 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
1144 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
1145};
1146
1147long
1148nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1149{
1150 struct drm_file *filp = file->private_data;
1151 struct drm_device *dev = filp->minor->dev;
1152 long ret;
1153
1154 ret = pm_runtime_get_sync(dev->dev);
1155 if (ret < 0 && ret != -EACCES) {
1156 pm_runtime_put_autosuspend(dev->dev);
1157 return ret;
1158 }
1159
1160 switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
1161 case DRM_NOUVEAU_NVIF:
1162 ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
1163 break;
1164 default:
1165 ret = drm_ioctl(file, cmd, arg);
1166 break;
1167 }
1168
1169 pm_runtime_mark_last_busy(dev->dev);
1170 pm_runtime_put_autosuspend(dev->dev);
1171 return ret;
1172}
1173
1174static const struct file_operations
1175nouveau_driver_fops = {
1176 .owner = THIS_MODULE,
1177 .open = drm_open,
1178 .release = drm_release,
1179 .unlocked_ioctl = nouveau_drm_ioctl,
1180 .mmap = nouveau_ttm_mmap,
1181 .poll = drm_poll,
1182 .read = drm_read,
1183#if defined(CONFIG_COMPAT)
1184 .compat_ioctl = nouveau_compat_ioctl,
1185#endif
1186 .llseek = noop_llseek,
1187};
1188
1189static struct drm_driver
1190driver_stub = {
1191 .driver_features =
1192 DRIVER_GEM | DRIVER_MODESET | DRIVER_RENDER
1193#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
1194 | DRIVER_KMS_LEGACY_CONTEXT
1195#endif
1196 ,
1197
1198 .open = nouveau_drm_open,
1199 .postclose = nouveau_drm_postclose,
1200 .lastclose = nouveau_vga_lastclose,
1201
1202#if defined(CONFIG_DEBUG_FS)
1203 .debugfs_init = nouveau_drm_debugfs_init,
1204#endif
1205
1206 .ioctls = nouveau_ioctls,
1207 .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
1208 .fops = &nouveau_driver_fops,
1209
1210 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1211 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1212 .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
1213
1214 .dumb_create = nouveau_display_dumb_create,
1215 .dumb_map_offset = nouveau_display_dumb_map_offset,
1216
1217 .name = DRIVER_NAME,
1218 .desc = DRIVER_DESC,
1219#ifdef GIT_REVISION
1220 .date = GIT_REVISION,
1221#else
1222 .date = DRIVER_DATE,
1223#endif
1224 .major = DRIVER_MAJOR,
1225 .minor = DRIVER_MINOR,
1226 .patchlevel = DRIVER_PATCHLEVEL,
1227};
1228
1229static struct pci_device_id
1230nouveau_drm_pci_table[] = {
1231 {
1232 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
1233 .class = PCI_BASE_CLASS_DISPLAY << 16,
1234 .class_mask = 0xff << 16,
1235 },
1236 {
1237 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
1238 .class = PCI_BASE_CLASS_DISPLAY << 16,
1239 .class_mask = 0xff << 16,
1240 },
1241 {}
1242};
1243
1244static void nouveau_display_options(void)
1245{
1246 DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
1247
1248 DRM_DEBUG_DRIVER("... tv_disable : %d\n", nouveau_tv_disable);
1249 DRM_DEBUG_DRIVER("... ignorelid : %d\n", nouveau_ignorelid);
1250 DRM_DEBUG_DRIVER("... duallink : %d\n", nouveau_duallink);
1251 DRM_DEBUG_DRIVER("... nofbaccel : %d\n", nouveau_nofbaccel);
1252 DRM_DEBUG_DRIVER("... config : %s\n", nouveau_config);
1253 DRM_DEBUG_DRIVER("... debug : %s\n", nouveau_debug);
1254 DRM_DEBUG_DRIVER("... noaccel : %d\n", nouveau_noaccel);
1255 DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
1256 DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
1257 DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
1258 DRM_DEBUG_DRIVER("... hdmimhz : %d\n", nouveau_hdmimhz);
1259}
1260
1261static const struct dev_pm_ops nouveau_pm_ops = {
1262 .suspend = nouveau_pmops_suspend,
1263 .resume = nouveau_pmops_resume,
1264 .freeze = nouveau_pmops_freeze,
1265 .thaw = nouveau_pmops_thaw,
1266 .poweroff = nouveau_pmops_freeze,
1267 .restore = nouveau_pmops_resume,
1268 .runtime_suspend = nouveau_pmops_runtime_suspend,
1269 .runtime_resume = nouveau_pmops_runtime_resume,
1270 .runtime_idle = nouveau_pmops_runtime_idle,
1271};
1272
1273static struct pci_driver
1274nouveau_drm_pci_driver = {
1275 .name = "nouveau",
1276 .id_table = nouveau_drm_pci_table,
1277 .probe = nouveau_drm_probe,
1278 .remove = nouveau_drm_remove,
1279 .driver.pm = &nouveau_pm_ops,
1280};
1281
1282struct drm_device *
1283nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
1284 struct platform_device *pdev,
1285 struct nvkm_device **pdevice)
1286{
1287 struct drm_device *drm;
1288 int err;
1289
1290 err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
1291 true, true, ~0ULL, pdevice);
1292 if (err)
1293 goto err_free;
1294
1295 drm = drm_dev_alloc(&driver_platform, &pdev->dev);
1296 if (IS_ERR(drm)) {
1297 err = PTR_ERR(drm);
1298 goto err_free;
1299 }
1300
1301 err = nouveau_drm_device_init(drm);
1302 if (err)
1303 goto err_put;
1304
1305 platform_set_drvdata(pdev, drm);
1306
1307 return drm;
1308
1309err_put:
1310 drm_dev_put(drm);
1311err_free:
1312 nvkm_device_del(pdevice);
1313
1314 return ERR_PTR(err);
1315}
1316
1317static int __init
1318nouveau_drm_init(void)
1319{
1320 driver_pci = driver_stub;
1321 driver_platform = driver_stub;
1322
1323 nouveau_display_options();
1324
1325 if (nouveau_modeset == -1) {
1326 if (vgacon_text_force())
1327 nouveau_modeset = 0;
1328 }
1329
1330 if (!nouveau_modeset)
1331 return 0;
1332
1333#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
1334 platform_driver_register(&nouveau_platform_driver);
1335#endif
1336
1337 nouveau_register_dsm_handler();
1338 nouveau_backlight_ctor();
1339
1340#ifdef CONFIG_PCI
1341 return pci_register_driver(&nouveau_drm_pci_driver);
1342#else
1343 return 0;
1344#endif
1345}
1346
1347static void __exit
1348nouveau_drm_exit(void)
1349{
1350 if (!nouveau_modeset)
1351 return;
1352
1353#ifdef CONFIG_PCI
1354 pci_unregister_driver(&nouveau_drm_pci_driver);
1355#endif
1356 nouveau_backlight_dtor();
1357 nouveau_unregister_dsm_handler();
1358
1359#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
1360 platform_driver_unregister(&nouveau_platform_driver);
1361#endif
1362 if (IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM))
1363 mmu_notifier_synchronize();
1364}
1365
1366module_init(nouveau_drm_init);
1367module_exit(nouveau_drm_exit);
1368
1369MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
1370MODULE_AUTHOR(DRIVER_AUTHOR);
1371MODULE_DESCRIPTION(DRIVER_DESC);
1372MODULE_LICENSE("GPL and additional rights");
1373