1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/console.h>
26#include <linux/delay.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/pm_runtime.h>
30#include <linux/vga_switcheroo.h>
31#include <linux/mmu_notifier.h>
32
33#include <drm/drm_aperture.h>
34#include <drm/drm_crtc_helper.h>
35#include <drm/drm_gem_ttm_helper.h>
36#include <drm/drm_ioctl.h>
37#include <drm/drm_vblank.h>
38
39#include <core/gpuobj.h>
40#include <core/option.h>
41#include <core/pci.h>
42#include <core/tegra.h>
43
44#include <nvif/driver.h>
45#include <nvif/fifo.h>
46#include <nvif/push006c.h>
47#include <nvif/user.h>
48
49#include <nvif/class.h>
50#include <nvif/cl0002.h>
51#include <nvif/cla06f.h>
52
53#include "nouveau_drv.h"
54#include "nouveau_dma.h"
55#include "nouveau_ttm.h"
56#include "nouveau_gem.h"
57#include "nouveau_vga.h"
58#include "nouveau_led.h"
59#include "nouveau_hwmon.h"
60#include "nouveau_acpi.h"
61#include "nouveau_bios.h"
62#include "nouveau_ioctl.h"
63#include "nouveau_abi16.h"
64#include "nouveau_fbcon.h"
65#include "nouveau_fence.h"
66#include "nouveau_debugfs.h"
67#include "nouveau_usif.h"
68#include "nouveau_connector.h"
69#include "nouveau_platform.h"
70#include "nouveau_svm.h"
71#include "nouveau_dmem.h"
72
73MODULE_PARM_DESC(config, "option string to pass to driver core");
74static char *nouveau_config;
75module_param_named(config, nouveau_config, charp, 0400);
76
77MODULE_PARM_DESC(debug, "debug string to pass to driver core");
78static char *nouveau_debug;
79module_param_named(debug, nouveau_debug, charp, 0400);
80
81MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
82static int nouveau_noaccel = 0;
83module_param_named(noaccel, nouveau_noaccel, int, 0400);
84
85MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
86 "0 = disabled, 1 = enabled, 2 = headless)");
87int nouveau_modeset = -1;
88module_param_named(modeset, nouveau_modeset, int, 0400);
89
90MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
91static int nouveau_atomic = 0;
92module_param_named(atomic, nouveau_atomic, int, 0400);
93
94MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
95static int nouveau_runtime_pm = -1;
96module_param_named(runpm, nouveau_runtime_pm, int, 0400);
97
98static struct drm_driver driver_stub;
99static struct drm_driver driver_pci;
100static struct drm_driver driver_platform;
101
102static u64
103nouveau_pci_name(struct pci_dev *pdev)
104{
105 u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
106 name |= pdev->bus->number << 16;
107 name |= PCI_SLOT(pdev->devfn) << 8;
108 return name | PCI_FUNC(pdev->devfn);
109}
110
111static u64
112nouveau_platform_name(struct platform_device *platformdev)
113{
114 return platformdev->id;
115}
116
117static u64
118nouveau_name(struct drm_device *dev)
119{
120 if (dev_is_pci(dev->dev))
121 return nouveau_pci_name(to_pci_dev(dev->dev));
122 else
123 return nouveau_platform_name(to_platform_device(dev->dev));
124}
125
126static inline bool
127nouveau_cli_work_ready(struct dma_fence *fence)
128{
129 if (!dma_fence_is_signaled(fence))
130 return false;
131 dma_fence_put(fence);
132 return true;
133}
134
135static void
136nouveau_cli_work(struct work_struct *w)
137{
138 struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
139 struct nouveau_cli_work *work, *wtmp;
140 mutex_lock(&cli->lock);
141 list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
142 if (!work->fence || nouveau_cli_work_ready(work->fence)) {
143 list_del(&work->head);
144 work->func(work);
145 }
146 }
147 mutex_unlock(&cli->lock);
148}
149
150static void
151nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
152{
153 struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
154 schedule_work(&work->cli->work);
155}
156
157void
158nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
159 struct nouveau_cli_work *work)
160{
161 work->fence = dma_fence_get(fence);
162 work->cli = cli;
163 mutex_lock(&cli->lock);
164 list_add_tail(&work->head, &cli->worker);
165 if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
166 nouveau_cli_work_fence(fence, &work->cb);
167 mutex_unlock(&cli->lock);
168}
169
170static void
171nouveau_cli_fini(struct nouveau_cli *cli)
172{
173
174
175
176
177
178 flush_work(&cli->work);
179 WARN_ON(!list_empty(&cli->worker));
180
181 usif_client_fini(cli);
182 nouveau_vmm_fini(&cli->svm);
183 nouveau_vmm_fini(&cli->vmm);
184 nvif_mmu_dtor(&cli->mmu);
185 nvif_device_dtor(&cli->device);
186 mutex_lock(&cli->drm->master.lock);
187 nvif_client_dtor(&cli->base);
188 mutex_unlock(&cli->drm->master.lock);
189}
190
191static int
192nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
193 struct nouveau_cli *cli)
194{
195 static const struct nvif_mclass
196 mems[] = {
197 { NVIF_CLASS_MEM_GF100, -1 },
198 { NVIF_CLASS_MEM_NV50 , -1 },
199 { NVIF_CLASS_MEM_NV04 , -1 },
200 {}
201 };
202 static const struct nvif_mclass
203 mmus[] = {
204 { NVIF_CLASS_MMU_GF100, -1 },
205 { NVIF_CLASS_MMU_NV50 , -1 },
206 { NVIF_CLASS_MMU_NV04 , -1 },
207 {}
208 };
209 static const struct nvif_mclass
210 vmms[] = {
211 { NVIF_CLASS_VMM_GP100, -1 },
212 { NVIF_CLASS_VMM_GM200, -1 },
213 { NVIF_CLASS_VMM_GF100, -1 },
214 { NVIF_CLASS_VMM_NV50 , -1 },
215 { NVIF_CLASS_VMM_NV04 , -1 },
216 {}
217 };
218 u64 device = nouveau_name(drm->dev);
219 int ret;
220
221 snprintf(cli->name, sizeof(cli->name), "%s", sname);
222 cli->drm = drm;
223 mutex_init(&cli->mutex);
224 usif_client_init(cli);
225
226 INIT_WORK(&cli->work, nouveau_cli_work);
227 INIT_LIST_HEAD(&cli->worker);
228 mutex_init(&cli->lock);
229
230 if (cli == &drm->master) {
231 ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
232 cli->name, device, &cli->base);
233 } else {
234 mutex_lock(&drm->master.lock);
235 ret = nvif_client_ctor(&drm->master.base, cli->name, device,
236 &cli->base);
237 mutex_unlock(&drm->master.lock);
238 }
239 if (ret) {
240 NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
241 goto done;
242 }
243
244 ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
245 &(struct nv_device_v0) {
246 .device = ~0,
247 .priv = true,
248 }, sizeof(struct nv_device_v0),
249 &cli->device);
250 if (ret) {
251 NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
252 goto done;
253 }
254
255 ret = nvif_mclass(&cli->device.object, mmus);
256 if (ret < 0) {
257 NV_PRINTK(err, cli, "No supported MMU class\n");
258 goto done;
259 }
260
261 ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass,
262 &cli->mmu);
263 if (ret) {
264 NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
265 goto done;
266 }
267
268 ret = nvif_mclass(&cli->mmu.object, vmms);
269 if (ret < 0) {
270 NV_PRINTK(err, cli, "No supported VMM class\n");
271 goto done;
272 }
273
274 ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
275 if (ret) {
276 NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
277 goto done;
278 }
279
280 ret = nvif_mclass(&cli->mmu.object, mems);
281 if (ret < 0) {
282 NV_PRINTK(err, cli, "No supported MEM class\n");
283 goto done;
284 }
285
286 cli->mem = &mems[ret];
287 return 0;
288done:
289 if (ret)
290 nouveau_cli_fini(cli);
291 return ret;
292}
293
294static void
295nouveau_accel_ce_fini(struct nouveau_drm *drm)
296{
297 nouveau_channel_idle(drm->cechan);
298 nvif_object_dtor(&drm->ttm.copy);
299 nouveau_channel_del(&drm->cechan);
300}
301
302static void
303nouveau_accel_ce_init(struct nouveau_drm *drm)
304{
305 struct nvif_device *device = &drm->client.device;
306 int ret = 0;
307
308
309
310
311 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
312 ret = nouveau_channel_new(drm, device,
313 nvif_fifo_runlist_ce(device), 0,
314 true, &drm->cechan);
315 } else
316 if (device->info.chipset >= 0xa3 &&
317 device->info.chipset != 0xaa &&
318 device->info.chipset != 0xac) {
319
320
321
322
323
324 ret = nouveau_channel_new(drm, device, NvDmaFB, NvDmaTT, false,
325 &drm->cechan);
326 }
327
328 if (ret)
329 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
330}
331
332static void
333nouveau_accel_gr_fini(struct nouveau_drm *drm)
334{
335 nouveau_channel_idle(drm->channel);
336 nvif_object_dtor(&drm->ntfy);
337 nvkm_gpuobj_del(&drm->notify);
338 nouveau_channel_del(&drm->channel);
339}
340
341static void
342nouveau_accel_gr_init(struct nouveau_drm *drm)
343{
344 struct nvif_device *device = &drm->client.device;
345 u32 arg0, arg1;
346 int ret;
347
348 if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE)
349 return;
350
351
352 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
353 arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
354 arg1 = 1;
355 } else {
356 arg0 = NvDmaFB;
357 arg1 = NvDmaTT;
358 }
359
360 ret = nouveau_channel_new(drm, device, arg0, arg1, false,
361 &drm->channel);
362 if (ret) {
363 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
364 nouveau_accel_gr_fini(drm);
365 return;
366 }
367
368
369
370
371
372 if (!drm->channel->nvsw.client && device->info.family < NV_DEVICE_INFO_V0_TESLA) {
373 ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
374 NVDRM_NVSW, nouveau_abi16_swclass(drm),
375 NULL, 0, &drm->channel->nvsw);
376 if (ret == 0) {
377 struct nvif_push *push = drm->channel->chan.push;
378 ret = PUSH_WAIT(push, 2);
379 if (ret == 0)
380 PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
381 }
382
383 if (ret) {
384 NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
385 nouveau_accel_gr_fini(drm);
386 return;
387 }
388 }
389
390
391
392
393
394 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
395 ret = nvkm_gpuobj_new(nvxx_device(device), 32, 0, false, NULL,
396 &drm->notify);
397 if (ret) {
398 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
399 nouveau_accel_gr_fini(drm);
400 return;
401 }
402
403 ret = nvif_object_ctor(&drm->channel->user, "drmM2mfNtfy",
404 NvNotify0, NV_DMA_IN_MEMORY,
405 &(struct nv_dma_v0) {
406 .target = NV_DMA_V0_TARGET_VRAM,
407 .access = NV_DMA_V0_ACCESS_RDWR,
408 .start = drm->notify->addr,
409 .limit = drm->notify->addr + 31
410 }, sizeof(struct nv_dma_v0),
411 &drm->ntfy);
412 if (ret) {
413 nouveau_accel_gr_fini(drm);
414 return;
415 }
416 }
417}
418
419static void
420nouveau_accel_fini(struct nouveau_drm *drm)
421{
422 nouveau_accel_ce_fini(drm);
423 nouveau_accel_gr_fini(drm);
424 if (drm->fence)
425 nouveau_fence(drm)->dtor(drm);
426}
427
428static void
429nouveau_accel_init(struct nouveau_drm *drm)
430{
431 struct nvif_device *device = &drm->client.device;
432 struct nvif_sclass *sclass;
433 int ret, i, n;
434
435 if (nouveau_noaccel)
436 return;
437
438
439 ret = nouveau_channels_init(drm);
440 if (ret)
441 return;
442
443
444
445
446 ret = n = nvif_object_sclass_get(&device->object, &sclass);
447 if (ret < 0)
448 return;
449
450 for (ret = -ENOSYS, i = 0; i < n; i++) {
451 switch (sclass[i].oclass) {
452 case NV03_CHANNEL_DMA:
453 ret = nv04_fence_create(drm);
454 break;
455 case NV10_CHANNEL_DMA:
456 ret = nv10_fence_create(drm);
457 break;
458 case NV17_CHANNEL_DMA:
459 case NV40_CHANNEL_DMA:
460 ret = nv17_fence_create(drm);
461 break;
462 case NV50_CHANNEL_GPFIFO:
463 ret = nv50_fence_create(drm);
464 break;
465 case G82_CHANNEL_GPFIFO:
466 ret = nv84_fence_create(drm);
467 break;
468 case FERMI_CHANNEL_GPFIFO:
469 case KEPLER_CHANNEL_GPFIFO_A:
470 case KEPLER_CHANNEL_GPFIFO_B:
471 case MAXWELL_CHANNEL_GPFIFO_A:
472 case PASCAL_CHANNEL_GPFIFO_A:
473 case VOLTA_CHANNEL_GPFIFO_A:
474 case TURING_CHANNEL_GPFIFO_A:
475 case AMPERE_CHANNEL_GPFIFO_B:
476 ret = nvc0_fence_create(drm);
477 break;
478 default:
479 break;
480 }
481 }
482
483 nvif_object_sclass_put(&sclass);
484 if (ret) {
485 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
486 nouveau_accel_fini(drm);
487 return;
488 }
489
490
491 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
492 ret = nvif_user_ctor(device, "drmUsermode");
493 if (ret)
494 return;
495 }
496
497
498 nouveau_accel_gr_init(drm);
499 nouveau_accel_ce_init(drm);
500
501
502 nouveau_bo_move_init(drm);
503}
504
505static void __printf(2, 3)
506nouveau_drm_errorf(struct nvif_object *object, const char *fmt, ...)
507{
508 struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
509 struct va_format vaf;
510 va_list va;
511
512 va_start(va, fmt);
513 vaf.fmt = fmt;
514 vaf.va = &va;
515 NV_ERROR(drm, "%pV", &vaf);
516 va_end(va);
517}
518
519static void __printf(2, 3)
520nouveau_drm_debugf(struct nvif_object *object, const char *fmt, ...)
521{
522 struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
523 struct va_format vaf;
524 va_list va;
525
526 va_start(va, fmt);
527 vaf.fmt = fmt;
528 vaf.va = &va;
529 NV_DEBUG(drm, "%pV", &vaf);
530 va_end(va);
531}
532
533static const struct nvif_parent_func
534nouveau_parent = {
535 .debugf = nouveau_drm_debugf,
536 .errorf = nouveau_drm_errorf,
537};
538
539static int
540nouveau_drm_device_init(struct drm_device *dev)
541{
542 struct nouveau_drm *drm;
543 int ret;
544
545 if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
546 return -ENOMEM;
547 dev->dev_private = drm;
548 drm->dev = dev;
549
550 nvif_parent_ctor(&nouveau_parent, &drm->parent);
551 drm->master.base.object.parent = &drm->parent;
552
553 ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
554 if (ret)
555 goto fail_alloc;
556
557 ret = nouveau_cli_init(drm, "DRM", &drm->client);
558 if (ret)
559 goto fail_master;
560
561 nvxx_client(&drm->client.base)->debug =
562 nvkm_dbgopt(nouveau_debug, "DRM");
563
564 INIT_LIST_HEAD(&drm->clients);
565 spin_lock_init(&drm->tile.lock);
566
567
568
569
570
571 if (drm->client.device.info.chipset == 0xc1)
572 nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000);
573
574 nouveau_vga_init(drm);
575
576 ret = nouveau_ttm_init(drm);
577 if (ret)
578 goto fail_ttm;
579
580 ret = nouveau_bios_init(dev);
581 if (ret)
582 goto fail_bios;
583
584 nouveau_accel_init(drm);
585
586 ret = nouveau_display_create(dev);
587 if (ret)
588 goto fail_dispctor;
589
590 if (dev->mode_config.num_crtc) {
591 ret = nouveau_display_init(dev, false, false);
592 if (ret)
593 goto fail_dispinit;
594 }
595
596 nouveau_debugfs_init(drm);
597 nouveau_hwmon_init(dev);
598 nouveau_svm_init(drm);
599 nouveau_dmem_init(drm);
600 nouveau_fbcon_init(dev);
601 nouveau_led_init(dev);
602
603 if (nouveau_pmops_runtime()) {
604 pm_runtime_use_autosuspend(dev->dev);
605 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
606 pm_runtime_set_active(dev->dev);
607 pm_runtime_allow(dev->dev);
608 pm_runtime_mark_last_busy(dev->dev);
609 pm_runtime_put(dev->dev);
610 }
611
612 return 0;
613
614fail_dispinit:
615 nouveau_display_destroy(dev);
616fail_dispctor:
617 nouveau_accel_fini(drm);
618 nouveau_bios_takedown(dev);
619fail_bios:
620 nouveau_ttm_fini(drm);
621fail_ttm:
622 nouveau_vga_fini(drm);
623 nouveau_cli_fini(&drm->client);
624fail_master:
625 nouveau_cli_fini(&drm->master);
626fail_alloc:
627 nvif_parent_dtor(&drm->parent);
628 kfree(drm);
629 return ret;
630}
631
632static void
633nouveau_drm_device_fini(struct drm_device *dev)
634{
635 struct nouveau_drm *drm = nouveau_drm(dev);
636
637 if (nouveau_pmops_runtime()) {
638 pm_runtime_get_sync(dev->dev);
639 pm_runtime_forbid(dev->dev);
640 }
641
642 nouveau_led_fini(dev);
643 nouveau_fbcon_fini(dev);
644 nouveau_dmem_fini(drm);
645 nouveau_svm_fini(drm);
646 nouveau_hwmon_fini(dev);
647 nouveau_debugfs_fini(drm);
648
649 if (dev->mode_config.num_crtc)
650 nouveau_display_fini(dev, false, false);
651 nouveau_display_destroy(dev);
652
653 nouveau_accel_fini(drm);
654 nouveau_bios_takedown(dev);
655
656 nouveau_ttm_fini(drm);
657 nouveau_vga_fini(drm);
658
659 nouveau_cli_fini(&drm->client);
660 nouveau_cli_fini(&drm->master);
661 nvif_parent_dtor(&drm->parent);
662 kfree(drm);
663}
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705static void quirk_broken_nv_runpm(struct pci_dev *pdev)
706{
707 struct drm_device *dev = pci_get_drvdata(pdev);
708 struct nouveau_drm *drm = nouveau_drm(dev);
709 struct pci_dev *bridge = pci_upstream_bridge(pdev);
710
711 if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
712 return;
713
714 switch (bridge->device) {
715 case 0x1901:
716 drm->old_pm_cap = pdev->pm_cap;
717 pdev->pm_cap = 0;
718 NV_INFO(drm, "Disabling PCI power management to avoid bug\n");
719 break;
720 }
721}
722
723static int nouveau_drm_probe(struct pci_dev *pdev,
724 const struct pci_device_id *pent)
725{
726 struct nvkm_device *device;
727 struct drm_device *drm_dev;
728 int ret;
729
730 if (vga_switcheroo_client_probe_defer(pdev))
731 return -EPROBE_DEFER;
732
733
734
735
736 ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
737 true, false, 0, &device);
738 if (ret)
739 return ret;
740
741 nvkm_device_del(&device);
742
743
744 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci);
745 if (ret)
746 return ret;
747
748 ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
749 true, true, ~0ULL, &device);
750 if (ret)
751 return ret;
752
753 pci_set_master(pdev);
754
755 if (nouveau_atomic)
756 driver_pci.driver_features |= DRIVER_ATOMIC;
757
758 drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev);
759 if (IS_ERR(drm_dev)) {
760 ret = PTR_ERR(drm_dev);
761 goto fail_nvkm;
762 }
763
764 ret = pci_enable_device(pdev);
765 if (ret)
766 goto fail_drm;
767
768 pci_set_drvdata(pdev, drm_dev);
769
770 ret = nouveau_drm_device_init(drm_dev);
771 if (ret)
772 goto fail_pci;
773
774 ret = drm_dev_register(drm_dev, pent->driver_data);
775 if (ret)
776 goto fail_drm_dev_init;
777
778 quirk_broken_nv_runpm(pdev);
779 return 0;
780
781fail_drm_dev_init:
782 nouveau_drm_device_fini(drm_dev);
783fail_pci:
784 pci_disable_device(pdev);
785fail_drm:
786 drm_dev_put(drm_dev);
787fail_nvkm:
788 nvkm_device_del(&device);
789 return ret;
790}
791
792void
793nouveau_drm_device_remove(struct drm_device *dev)
794{
795 struct nouveau_drm *drm = nouveau_drm(dev);
796 struct nvkm_client *client;
797 struct nvkm_device *device;
798
799 drm_dev_unregister(dev);
800
801 client = nvxx_client(&drm->client.base);
802 device = nvkm_device_find(client->device);
803
804 nouveau_drm_device_fini(dev);
805 drm_dev_put(dev);
806 nvkm_device_del(&device);
807}
808
809static void
810nouveau_drm_remove(struct pci_dev *pdev)
811{
812 struct drm_device *dev = pci_get_drvdata(pdev);
813 struct nouveau_drm *drm = nouveau_drm(dev);
814
815
816 if (drm->old_pm_cap)
817 pdev->pm_cap = drm->old_pm_cap;
818 nouveau_drm_device_remove(dev);
819 pci_disable_device(pdev);
820}
821
822static int
823nouveau_do_suspend(struct drm_device *dev, bool runtime)
824{
825 struct nouveau_drm *drm = nouveau_drm(dev);
826 struct ttm_resource_manager *man;
827 int ret;
828
829 nouveau_svm_suspend(drm);
830 nouveau_dmem_suspend(drm);
831 nouveau_led_suspend(dev);
832
833 if (dev->mode_config.num_crtc) {
834 NV_DEBUG(drm, "suspending console...\n");
835 nouveau_fbcon_set_suspend(dev, 1);
836 NV_DEBUG(drm, "suspending display...\n");
837 ret = nouveau_display_suspend(dev, runtime);
838 if (ret)
839 return ret;
840 }
841
842 NV_DEBUG(drm, "evicting buffers...\n");
843
844 man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
845 ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
846
847 NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
848 if (drm->cechan) {
849 ret = nouveau_channel_idle(drm->cechan);
850 if (ret)
851 goto fail_display;
852 }
853
854 if (drm->channel) {
855 ret = nouveau_channel_idle(drm->channel);
856 if (ret)
857 goto fail_display;
858 }
859
860 NV_DEBUG(drm, "suspending fence...\n");
861 if (drm->fence && nouveau_fence(drm)->suspend) {
862 if (!nouveau_fence(drm)->suspend(drm)) {
863 ret = -ENOMEM;
864 goto fail_display;
865 }
866 }
867
868 NV_DEBUG(drm, "suspending object tree...\n");
869 ret = nvif_client_suspend(&drm->master.base);
870 if (ret)
871 goto fail_client;
872
873 return 0;
874
875fail_client:
876 if (drm->fence && nouveau_fence(drm)->resume)
877 nouveau_fence(drm)->resume(drm);
878
879fail_display:
880 if (dev->mode_config.num_crtc) {
881 NV_DEBUG(drm, "resuming display...\n");
882 nouveau_display_resume(dev, runtime);
883 }
884 return ret;
885}
886
887static int
888nouveau_do_resume(struct drm_device *dev, bool runtime)
889{
890 int ret = 0;
891 struct nouveau_drm *drm = nouveau_drm(dev);
892
893 NV_DEBUG(drm, "resuming object tree...\n");
894 ret = nvif_client_resume(&drm->master.base);
895 if (ret) {
896 NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
897 return ret;
898 }
899
900 NV_DEBUG(drm, "resuming fence...\n");
901 if (drm->fence && nouveau_fence(drm)->resume)
902 nouveau_fence(drm)->resume(drm);
903
904 nouveau_run_vbios_init(dev);
905
906 if (dev->mode_config.num_crtc) {
907 NV_DEBUG(drm, "resuming display...\n");
908 nouveau_display_resume(dev, runtime);
909 NV_DEBUG(drm, "resuming console...\n");
910 nouveau_fbcon_set_suspend(dev, 0);
911 }
912
913 nouveau_led_resume(dev);
914 nouveau_dmem_resume(drm);
915 nouveau_svm_resume(drm);
916 return 0;
917}
918
919int
920nouveau_pmops_suspend(struct device *dev)
921{
922 struct pci_dev *pdev = to_pci_dev(dev);
923 struct drm_device *drm_dev = pci_get_drvdata(pdev);
924 int ret;
925
926 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
927 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
928 return 0;
929
930 ret = nouveau_do_suspend(drm_dev, false);
931 if (ret)
932 return ret;
933
934 pci_save_state(pdev);
935 pci_disable_device(pdev);
936 pci_set_power_state(pdev, PCI_D3hot);
937 udelay(200);
938 return 0;
939}
940
941int
942nouveau_pmops_resume(struct device *dev)
943{
944 struct pci_dev *pdev = to_pci_dev(dev);
945 struct drm_device *drm_dev = pci_get_drvdata(pdev);
946 int ret;
947
948 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
949 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
950 return 0;
951
952 pci_set_power_state(pdev, PCI_D0);
953 pci_restore_state(pdev);
954 ret = pci_enable_device(pdev);
955 if (ret)
956 return ret;
957 pci_set_master(pdev);
958
959 ret = nouveau_do_resume(drm_dev, false);
960
961
962 nouveau_display_hpd_resume(drm_dev);
963
964 return ret;
965}
966
967static int
968nouveau_pmops_freeze(struct device *dev)
969{
970 struct pci_dev *pdev = to_pci_dev(dev);
971 struct drm_device *drm_dev = pci_get_drvdata(pdev);
972 return nouveau_do_suspend(drm_dev, false);
973}
974
975static int
976nouveau_pmops_thaw(struct device *dev)
977{
978 struct pci_dev *pdev = to_pci_dev(dev);
979 struct drm_device *drm_dev = pci_get_drvdata(pdev);
980 return nouveau_do_resume(drm_dev, false);
981}
982
983bool
984nouveau_pmops_runtime(void)
985{
986 if (nouveau_runtime_pm == -1)
987 return nouveau_is_optimus() || nouveau_is_v1_dsm();
988 return nouveau_runtime_pm == 1;
989}
990
991static int
992nouveau_pmops_runtime_suspend(struct device *dev)
993{
994 struct pci_dev *pdev = to_pci_dev(dev);
995 struct drm_device *drm_dev = pci_get_drvdata(pdev);
996 int ret;
997
998 if (!nouveau_pmops_runtime()) {
999 pm_runtime_forbid(dev);
1000 return -EBUSY;
1001 }
1002
1003 nouveau_switcheroo_optimus_dsm();
1004 ret = nouveau_do_suspend(drm_dev, true);
1005 pci_save_state(pdev);
1006 pci_disable_device(pdev);
1007 pci_ignore_hotplug(pdev);
1008 pci_set_power_state(pdev, PCI_D3cold);
1009 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
1010 return ret;
1011}
1012
1013static int
1014nouveau_pmops_runtime_resume(struct device *dev)
1015{
1016 struct pci_dev *pdev = to_pci_dev(dev);
1017 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1018 struct nouveau_drm *drm = nouveau_drm(drm_dev);
1019 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
1020 int ret;
1021
1022 if (!nouveau_pmops_runtime()) {
1023 pm_runtime_forbid(dev);
1024 return -EBUSY;
1025 }
1026
1027 pci_set_power_state(pdev, PCI_D0);
1028 pci_restore_state(pdev);
1029 ret = pci_enable_device(pdev);
1030 if (ret)
1031 return ret;
1032 pci_set_master(pdev);
1033
1034 ret = nouveau_do_resume(drm_dev, true);
1035 if (ret) {
1036 NV_ERROR(drm, "resume failed with: %d\n", ret);
1037 return ret;
1038 }
1039
1040
1041 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
1042 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
1043
1044
1045 nouveau_display_hpd_resume(drm_dev);
1046
1047 return ret;
1048}
1049
1050static int
1051nouveau_pmops_runtime_idle(struct device *dev)
1052{
1053 if (!nouveau_pmops_runtime()) {
1054 pm_runtime_forbid(dev);
1055 return -EBUSY;
1056 }
1057
1058 pm_runtime_mark_last_busy(dev);
1059 pm_runtime_autosuspend(dev);
1060
1061 return 1;
1062}
1063
1064static int
1065nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
1066{
1067 struct nouveau_drm *drm = nouveau_drm(dev);
1068 struct nouveau_cli *cli;
1069 char name[32], tmpname[TASK_COMM_LEN];
1070 int ret;
1071
1072
1073 ret = pm_runtime_get_sync(dev->dev);
1074 if (ret < 0 && ret != -EACCES) {
1075 pm_runtime_put_autosuspend(dev->dev);
1076 return ret;
1077 }
1078
1079 get_task_comm(tmpname, current);
1080 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
1081
1082 if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
1083 ret = -ENOMEM;
1084 goto done;
1085 }
1086
1087 ret = nouveau_cli_init(drm, name, cli);
1088 if (ret)
1089 goto done;
1090
1091 fpriv->driver_priv = cli;
1092
1093 mutex_lock(&drm->client.mutex);
1094 list_add(&cli->head, &drm->clients);
1095 mutex_unlock(&drm->client.mutex);
1096
1097done:
1098 if (ret && cli) {
1099 nouveau_cli_fini(cli);
1100 kfree(cli);
1101 }
1102
1103 pm_runtime_mark_last_busy(dev->dev);
1104 pm_runtime_put_autosuspend(dev->dev);
1105 return ret;
1106}
1107
1108static void
1109nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
1110{
1111 struct nouveau_cli *cli = nouveau_cli(fpriv);
1112 struct nouveau_drm *drm = nouveau_drm(dev);
1113
1114 pm_runtime_get_sync(dev->dev);
1115
1116 mutex_lock(&cli->mutex);
1117 if (cli->abi16)
1118 nouveau_abi16_fini(cli->abi16);
1119 mutex_unlock(&cli->mutex);
1120
1121 mutex_lock(&drm->client.mutex);
1122 list_del(&cli->head);
1123 mutex_unlock(&drm->client.mutex);
1124
1125 nouveau_cli_fini(cli);
1126 kfree(cli);
1127 pm_runtime_mark_last_busy(dev->dev);
1128 pm_runtime_put_autosuspend(dev->dev);
1129}
1130
1131static const struct drm_ioctl_desc
1132nouveau_ioctls[] = {
1133 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_RENDER_ALLOW),
1134 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1135 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_RENDER_ALLOW),
1136 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_RENDER_ALLOW),
1137 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_RENDER_ALLOW),
1138 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_RENDER_ALLOW),
1139 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_RENDER_ALLOW),
1140 DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_RENDER_ALLOW),
1141 DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_RENDER_ALLOW),
1142 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_RENDER_ALLOW),
1143 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_RENDER_ALLOW),
1144 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
1145 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
1146 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
1147};
1148
1149long
1150nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1151{
1152 struct drm_file *filp = file->private_data;
1153 struct drm_device *dev = filp->minor->dev;
1154 long ret;
1155
1156 ret = pm_runtime_get_sync(dev->dev);
1157 if (ret < 0 && ret != -EACCES) {
1158 pm_runtime_put_autosuspend(dev->dev);
1159 return ret;
1160 }
1161
1162 switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
1163 case DRM_NOUVEAU_NVIF:
1164 ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
1165 break;
1166 default:
1167 ret = drm_ioctl(file, cmd, arg);
1168 break;
1169 }
1170
1171 pm_runtime_mark_last_busy(dev->dev);
1172 pm_runtime_put_autosuspend(dev->dev);
1173 return ret;
1174}
1175
1176static const struct file_operations
1177nouveau_driver_fops = {
1178 .owner = THIS_MODULE,
1179 .open = drm_open,
1180 .release = drm_release,
1181 .unlocked_ioctl = nouveau_drm_ioctl,
1182 .mmap = drm_gem_mmap,
1183 .poll = drm_poll,
1184 .read = drm_read,
1185#if defined(CONFIG_COMPAT)
1186 .compat_ioctl = nouveau_compat_ioctl,
1187#endif
1188 .llseek = noop_llseek,
1189};
1190
1191static struct drm_driver
1192driver_stub = {
1193 .driver_features =
1194 DRIVER_GEM | DRIVER_MODESET | DRIVER_RENDER
1195#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
1196 | DRIVER_KMS_LEGACY_CONTEXT
1197#endif
1198 ,
1199
1200 .open = nouveau_drm_open,
1201 .postclose = nouveau_drm_postclose,
1202 .lastclose = nouveau_vga_lastclose,
1203
1204#if defined(CONFIG_DEBUG_FS)
1205 .debugfs_init = nouveau_drm_debugfs_init,
1206#endif
1207
1208 .ioctls = nouveau_ioctls,
1209 .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
1210 .fops = &nouveau_driver_fops,
1211
1212 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1213 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1214 .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
1215 .gem_prime_mmap = drm_gem_prime_mmap,
1216
1217 .dumb_create = nouveau_display_dumb_create,
1218 .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
1219
1220 .name = DRIVER_NAME,
1221 .desc = DRIVER_DESC,
1222#ifdef GIT_REVISION
1223 .date = GIT_REVISION,
1224#else
1225 .date = DRIVER_DATE,
1226#endif
1227 .major = DRIVER_MAJOR,
1228 .minor = DRIVER_MINOR,
1229 .patchlevel = DRIVER_PATCHLEVEL,
1230};
1231
1232static struct pci_device_id
1233nouveau_drm_pci_table[] = {
1234 {
1235 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
1236 .class = PCI_BASE_CLASS_DISPLAY << 16,
1237 .class_mask = 0xff << 16,
1238 },
1239 {
1240 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
1241 .class = PCI_BASE_CLASS_DISPLAY << 16,
1242 .class_mask = 0xff << 16,
1243 },
1244 {}
1245};
1246
1247static void nouveau_display_options(void)
1248{
1249 DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
1250
1251 DRM_DEBUG_DRIVER("... tv_disable : %d\n", nouveau_tv_disable);
1252 DRM_DEBUG_DRIVER("... ignorelid : %d\n", nouveau_ignorelid);
1253 DRM_DEBUG_DRIVER("... duallink : %d\n", nouveau_duallink);
1254 DRM_DEBUG_DRIVER("... nofbaccel : %d\n", nouveau_nofbaccel);
1255 DRM_DEBUG_DRIVER("... config : %s\n", nouveau_config);
1256 DRM_DEBUG_DRIVER("... debug : %s\n", nouveau_debug);
1257 DRM_DEBUG_DRIVER("... noaccel : %d\n", nouveau_noaccel);
1258 DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
1259 DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
1260 DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
1261 DRM_DEBUG_DRIVER("... hdmimhz : %d\n", nouveau_hdmimhz);
1262}
1263
1264static const struct dev_pm_ops nouveau_pm_ops = {
1265 .suspend = nouveau_pmops_suspend,
1266 .resume = nouveau_pmops_resume,
1267 .freeze = nouveau_pmops_freeze,
1268 .thaw = nouveau_pmops_thaw,
1269 .poweroff = nouveau_pmops_freeze,
1270 .restore = nouveau_pmops_resume,
1271 .runtime_suspend = nouveau_pmops_runtime_suspend,
1272 .runtime_resume = nouveau_pmops_runtime_resume,
1273 .runtime_idle = nouveau_pmops_runtime_idle,
1274};
1275
1276static struct pci_driver
1277nouveau_drm_pci_driver = {
1278 .name = "nouveau",
1279 .id_table = nouveau_drm_pci_table,
1280 .probe = nouveau_drm_probe,
1281 .remove = nouveau_drm_remove,
1282 .driver.pm = &nouveau_pm_ops,
1283};
1284
1285struct drm_device *
1286nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
1287 struct platform_device *pdev,
1288 struct nvkm_device **pdevice)
1289{
1290 struct drm_device *drm;
1291 int err;
1292
1293 err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
1294 true, true, ~0ULL, pdevice);
1295 if (err)
1296 goto err_free;
1297
1298 drm = drm_dev_alloc(&driver_platform, &pdev->dev);
1299 if (IS_ERR(drm)) {
1300 err = PTR_ERR(drm);
1301 goto err_free;
1302 }
1303
1304 err = nouveau_drm_device_init(drm);
1305 if (err)
1306 goto err_put;
1307
1308 platform_set_drvdata(pdev, drm);
1309
1310 return drm;
1311
1312err_put:
1313 drm_dev_put(drm);
1314err_free:
1315 nvkm_device_del(pdevice);
1316
1317 return ERR_PTR(err);
1318}
1319
1320static int __init
1321nouveau_drm_init(void)
1322{
1323 driver_pci = driver_stub;
1324 driver_platform = driver_stub;
1325
1326 nouveau_display_options();
1327
1328 if (nouveau_modeset == -1) {
1329 if (vgacon_text_force())
1330 nouveau_modeset = 0;
1331 }
1332
1333 if (!nouveau_modeset)
1334 return 0;
1335
1336#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
1337 platform_driver_register(&nouveau_platform_driver);
1338#endif
1339
1340 nouveau_register_dsm_handler();
1341 nouveau_backlight_ctor();
1342
1343#ifdef CONFIG_PCI
1344 return pci_register_driver(&nouveau_drm_pci_driver);
1345#else
1346 return 0;
1347#endif
1348}
1349
1350static void __exit
1351nouveau_drm_exit(void)
1352{
1353 if (!nouveau_modeset)
1354 return;
1355
1356#ifdef CONFIG_PCI
1357 pci_unregister_driver(&nouveau_drm_pci_driver);
1358#endif
1359 nouveau_backlight_dtor();
1360 nouveau_unregister_dsm_handler();
1361
1362#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
1363 platform_driver_unregister(&nouveau_platform_driver);
1364#endif
1365 if (IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM))
1366 mmu_notifier_synchronize();
1367}
1368
1369module_init(nouveau_drm_init);
1370module_exit(nouveau_drm_exit);
1371
1372MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
1373MODULE_AUTHOR(DRIVER_AUTHOR);
1374MODULE_DESCRIPTION(DRIVER_DESC);
1375MODULE_LICENSE("GPL and additional rights");
1376