1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "i915_drv.h"
25#include "intel_guc_ct.h"
26
27#ifdef CONFIG_DRM_I915_DEBUG_GUC
28#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
29#else
30#define CT_DEBUG_DRIVER(...) do { } while (0)
31#endif
32
33struct ct_request {
34 struct list_head link;
35 u32 fence;
36 u32 status;
37 u32 response_len;
38 u32 *response_buf;
39};
40
41struct ct_incoming_request {
42 struct list_head link;
43 u32 msg[];
44};
45
46enum { CTB_SEND = 0, CTB_RECV = 1 };
47
48enum { CTB_OWNER_HOST = 0 };
49
50static void ct_incoming_request_worker_func(struct work_struct *w);
51
52
53
54
55
56void intel_guc_ct_init_early(struct intel_guc_ct *ct)
57{
58
59 ct->host_channel.owner = CTB_OWNER_HOST;
60
61 spin_lock_init(&ct->lock);
62 INIT_LIST_HEAD(&ct->pending_requests);
63 INIT_LIST_HEAD(&ct->incoming_requests);
64 INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
65}
66
67static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
68{
69 return container_of(ct, struct intel_guc, ct);
70}
71
72static inline const char *guc_ct_buffer_type_to_str(u32 type)
73{
74 switch (type) {
75 case INTEL_GUC_CT_BUFFER_TYPE_SEND:
76 return "SEND";
77 case INTEL_GUC_CT_BUFFER_TYPE_RECV:
78 return "RECV";
79 default:
80 return "<invalid>";
81 }
82}
83
84static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
85 u32 cmds_addr, u32 size, u32 owner)
86{
87 CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
88 desc, cmds_addr, size, owner);
89 memset(desc, 0, sizeof(*desc));
90 desc->addr = cmds_addr;
91 desc->size = size;
92 desc->owner = owner;
93}
94
95static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
96{
97 CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
98 desc, desc->head, desc->tail);
99 desc->head = 0;
100 desc->tail = 0;
101 desc->is_in_error = 0;
102}
103
104static int guc_action_register_ct_buffer(struct intel_guc *guc,
105 u32 desc_addr,
106 u32 type)
107{
108 u32 action[] = {
109 INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
110 desc_addr,
111 sizeof(struct guc_ct_buffer_desc),
112 type
113 };
114 int err;
115
116
117 err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
118 if (err)
119 DRM_ERROR("CT: register %s buffer failed; err=%d\n",
120 guc_ct_buffer_type_to_str(type), err);
121 return err;
122}
123
124static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
125 u32 owner,
126 u32 type)
127{
128 u32 action[] = {
129 INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
130 owner,
131 type
132 };
133 int err;
134
135
136 err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
137 if (err)
138 DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
139 guc_ct_buffer_type_to_str(type), owner, err);
140 return err;
141}
142
143static int ctch_init(struct intel_guc *guc,
144 struct intel_guc_ct_channel *ctch)
145{
146 struct i915_vma *vma;
147 void *blob;
148 int err;
149 int i;
150
151 GEM_BUG_ON(ctch->vma);
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176 vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
177 if (IS_ERR(vma)) {
178 err = PTR_ERR(vma);
179 goto err_out;
180 }
181 ctch->vma = vma;
182
183
184 blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
185 if (IS_ERR(blob)) {
186 err = PTR_ERR(blob);
187 goto err_vma;
188 }
189 CT_DEBUG_DRIVER("CT: vma base=%#x\n",
190 intel_guc_ggtt_offset(guc, ctch->vma));
191
192
193 for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
194 GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
195 ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
196 ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
197 }
198
199 return 0;
200
201err_vma:
202 i915_vma_unpin_and_release(&ctch->vma, 0);
203err_out:
204 CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
205 ctch->owner, err);
206 return err;
207}
208
209static void ctch_fini(struct intel_guc *guc,
210 struct intel_guc_ct_channel *ctch)
211{
212 GEM_BUG_ON(ctch->enabled);
213
214 i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
215}
216
217static int ctch_enable(struct intel_guc *guc,
218 struct intel_guc_ct_channel *ctch)
219{
220 u32 base;
221 int err;
222 int i;
223
224 GEM_BUG_ON(!ctch->vma);
225
226 GEM_BUG_ON(ctch->enabled);
227
228
229 base = intel_guc_ggtt_offset(guc, ctch->vma);
230
231
232
233
234 for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
235 GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
236 guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
237 base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
238 PAGE_SIZE/4,
239 ctch->owner);
240 }
241
242
243
244
245 err = guc_action_register_ct_buffer(guc,
246 base + PAGE_SIZE/4 * CTB_RECV,
247 INTEL_GUC_CT_BUFFER_TYPE_RECV);
248 if (unlikely(err))
249 goto err_out;
250
251 err = guc_action_register_ct_buffer(guc,
252 base + PAGE_SIZE/4 * CTB_SEND,
253 INTEL_GUC_CT_BUFFER_TYPE_SEND);
254 if (unlikely(err))
255 goto err_deregister;
256
257 ctch->enabled = true;
258
259 return 0;
260
261err_deregister:
262 guc_action_deregister_ct_buffer(guc,
263 ctch->owner,
264 INTEL_GUC_CT_BUFFER_TYPE_RECV);
265err_out:
266 DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
267 return err;
268}
269
270static void ctch_disable(struct intel_guc *guc,
271 struct intel_guc_ct_channel *ctch)
272{
273 GEM_BUG_ON(!ctch->enabled);
274
275 ctch->enabled = false;
276
277 guc_action_deregister_ct_buffer(guc,
278 ctch->owner,
279 INTEL_GUC_CT_BUFFER_TYPE_SEND);
280 guc_action_deregister_ct_buffer(guc,
281 ctch->owner,
282 INTEL_GUC_CT_BUFFER_TYPE_RECV);
283}
284
285static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
286{
287
288 return ++ctch->next_fence;
289}
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309static int ctb_write(struct intel_guc_ct_buffer *ctb,
310 const u32 *action,
311 u32 len ,
312 u32 fence,
313 bool want_response)
314{
315 struct guc_ct_buffer_desc *desc = ctb->desc;
316 u32 head = desc->head / 4;
317 u32 tail = desc->tail / 4;
318 u32 size = desc->size / 4;
319 u32 used;
320 u32 header;
321 u32 *cmds = ctb->cmds;
322 unsigned int i;
323
324 GEM_BUG_ON(desc->size % 4);
325 GEM_BUG_ON(desc->head % 4);
326 GEM_BUG_ON(desc->tail % 4);
327 GEM_BUG_ON(tail >= size);
328
329
330
331
332
333 if (tail < head)
334 used = (size - head) + tail;
335 else
336 used = tail - head;
337
338
339 if (unlikely(used + len + 1 >= size))
340 return -ENOSPC;
341
342
343
344
345
346
347
348 header = (len << GUC_CT_MSG_LEN_SHIFT) |
349 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
350 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
351 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
352
353 CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
354 4, &header, 4, &fence,
355 4 * (len - 1), &action[1]);
356
357 cmds[tail] = header;
358 tail = (tail + 1) % size;
359
360 cmds[tail] = fence;
361 tail = (tail + 1) % size;
362
363 for (i = 1; i < len; i++) {
364 cmds[tail] = action[i];
365 tail = (tail + 1) % size;
366 }
367
368
369 desc->tail = tail * 4;
370 GEM_BUG_ON(desc->tail > desc->size);
371
372 return 0;
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
392 u32 fence,
393 u32 *status)
394{
395 int err;
396
397
398
399
400
401
402#define done (READ_ONCE(desc->fence) == fence)
403 err = wait_for_us(done, 10);
404 if (err)
405 err = wait_for(done, 10);
406#undef done
407
408 if (unlikely(err)) {
409 DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
410 fence, desc->fence);
411
412 if (WARN_ON(desc->is_in_error)) {
413
414
415
416 guc_ct_buffer_desc_reset(desc);
417 err = -EPROTO;
418 }
419 }
420
421 *status = desc->status;
422 return err;
423}
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
440{
441 int err;
442
443
444
445
446
447
448#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
449 err = wait_for_us(done, 10);
450 if (err)
451 err = wait_for(done, 10);
452#undef done
453
454 if (unlikely(err))
455 DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
456
457 *status = req->status;
458 return err;
459}
460
461static int ctch_send(struct intel_guc_ct *ct,
462 struct intel_guc_ct_channel *ctch,
463 const u32 *action,
464 u32 len,
465 u32 *response_buf,
466 u32 response_buf_size,
467 u32 *status)
468{
469 struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
470 struct guc_ct_buffer_desc *desc = ctb->desc;
471 struct ct_request request;
472 unsigned long flags;
473 u32 fence;
474 int err;
475
476 GEM_BUG_ON(!ctch->enabled);
477 GEM_BUG_ON(!len);
478 GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
479 GEM_BUG_ON(!response_buf && response_buf_size);
480
481 fence = ctch_get_next_fence(ctch);
482 request.fence = fence;
483 request.status = 0;
484 request.response_len = response_buf_size;
485 request.response_buf = response_buf;
486
487 spin_lock_irqsave(&ct->lock, flags);
488 list_add_tail(&request.link, &ct->pending_requests);
489 spin_unlock_irqrestore(&ct->lock, flags);
490
491 err = ctb_write(ctb, action, len, fence, !!response_buf);
492 if (unlikely(err))
493 goto unlink;
494
495 intel_guc_notify(ct_to_guc(ct));
496
497 if (response_buf)
498 err = wait_for_ct_request_update(&request, status);
499 else
500 err = wait_for_ctb_desc_update(desc, fence, status);
501 if (unlikely(err))
502 goto unlink;
503
504 if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
505 err = -EIO;
506 goto unlink;
507 }
508
509 if (response_buf) {
510
511 WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
512
513 err = request.response_len;
514 } else {
515
516 WARN_ON(request.response_len);
517
518 err = INTEL_GUC_MSG_TO_DATA(*status);
519 }
520
521unlink:
522 spin_lock_irqsave(&ct->lock, flags);
523 list_del(&request.link);
524 spin_unlock_irqrestore(&ct->lock, flags);
525
526 return err;
527}
528
529
530
531
532static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
533 u32 *response_buf, u32 response_buf_size)
534{
535 struct intel_guc_ct *ct = &guc->ct;
536 struct intel_guc_ct_channel *ctch = &ct->host_channel;
537 u32 status = ~0;
538 int ret;
539
540 mutex_lock(&guc->send_mutex);
541
542 ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
543 &status);
544 if (unlikely(ret < 0)) {
545 DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
546 action[0], ret, status);
547 } else if (unlikely(ret)) {
548 CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
549 action[0], ret, ret);
550 }
551
552 mutex_unlock(&guc->send_mutex);
553 return ret;
554}
555
556static inline unsigned int ct_header_get_len(u32 header)
557{
558 return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
559}
560
561static inline unsigned int ct_header_get_action(u32 header)
562{
563 return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
564}
565
566static inline bool ct_header_is_response(u32 header)
567{
568 return !!(header & GUC_CT_MSG_IS_RESPONSE);
569}
570
571static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
572{
573 struct guc_ct_buffer_desc *desc = ctb->desc;
574 u32 head = desc->head / 4;
575 u32 tail = desc->tail / 4;
576 u32 size = desc->size / 4;
577 u32 *cmds = ctb->cmds;
578 s32 available;
579 unsigned int len;
580 unsigned int i;
581
582 GEM_BUG_ON(desc->size % 4);
583 GEM_BUG_ON(desc->head % 4);
584 GEM_BUG_ON(desc->tail % 4);
585 GEM_BUG_ON(tail >= size);
586 GEM_BUG_ON(head >= size);
587
588
589 available = tail - head;
590 if (unlikely(available == 0))
591 return -ENODATA;
592
593
594 if (unlikely(available < 0))
595 available += size;
596 CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
597 GEM_BUG_ON(available < 0);
598
599 data[0] = cmds[head];
600 head = (head + 1) % size;
601
602
603 len = ct_header_get_len(data[0]) + 1;
604 if (unlikely(len > (u32)available)) {
605 DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
606 4, data,
607 4 * (head + available - 1 > size ?
608 size - head : available - 1), &cmds[head],
609 4 * (head + available - 1 > size ?
610 available - 1 - size + head : 0), &cmds[0]);
611 return -EPROTO;
612 }
613
614 for (i = 1; i < len; i++) {
615 data[i] = cmds[head];
616 head = (head + 1) % size;
617 }
618 CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
619
620 desc->head = head * 4;
621 return 0;
622}
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
643{
644 u32 header = msg[0];
645 u32 len = ct_header_get_len(header);
646 u32 msglen = len + 1;
647 u32 fence;
648 u32 status;
649 u32 datalen;
650 struct ct_request *req;
651 bool found = false;
652
653 GEM_BUG_ON(!ct_header_is_response(header));
654 GEM_BUG_ON(!in_irq());
655
656
657 if (unlikely(len < 2)) {
658 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
659 return -EPROTO;
660 }
661
662 fence = msg[1];
663 status = msg[2];
664 datalen = len - 2;
665
666
667 if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
668 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
669 return -EPROTO;
670 }
671
672 CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
673
674 spin_lock(&ct->lock);
675 list_for_each_entry(req, &ct->pending_requests, link) {
676 if (unlikely(fence != req->fence)) {
677 CT_DEBUG_DRIVER("CT: request %u awaits response\n",
678 req->fence);
679 continue;
680 }
681 if (unlikely(datalen > req->response_len)) {
682 DRM_ERROR("CT: response %u too long %*ph\n",
683 req->fence, 4 * msglen, msg);
684 datalen = 0;
685 }
686 if (datalen)
687 memcpy(req->response_buf, msg + 3, 4 * datalen);
688 req->response_len = datalen;
689 WRITE_ONCE(req->status, status);
690 found = true;
691 break;
692 }
693 spin_unlock(&ct->lock);
694
695 if (!found)
696 DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
697 return 0;
698}
699
700static void ct_process_request(struct intel_guc_ct *ct,
701 u32 action, u32 len, const u32 *payload)
702{
703 struct intel_guc *guc = ct_to_guc(ct);
704 int ret;
705
706 CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
707
708 switch (action) {
709 case INTEL_GUC_ACTION_DEFAULT:
710 ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
711 if (unlikely(ret))
712 goto fail_unexpected;
713 break;
714
715 default:
716fail_unexpected:
717 DRM_ERROR("CT: unexpected request %x %*ph\n",
718 action, 4 * len, payload);
719 break;
720 }
721}
722
723static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
724{
725 unsigned long flags;
726 struct ct_incoming_request *request;
727 u32 header;
728 u32 *payload;
729 bool done;
730
731 spin_lock_irqsave(&ct->lock, flags);
732 request = list_first_entry_or_null(&ct->incoming_requests,
733 struct ct_incoming_request, link);
734 if (request)
735 list_del(&request->link);
736 done = !!list_empty(&ct->incoming_requests);
737 spin_unlock_irqrestore(&ct->lock, flags);
738
739 if (!request)
740 return true;
741
742 header = request->msg[0];
743 payload = &request->msg[1];
744 ct_process_request(ct,
745 ct_header_get_action(header),
746 ct_header_get_len(header),
747 payload);
748
749 kfree(request);
750 return done;
751}
752
753static void ct_incoming_request_worker_func(struct work_struct *w)
754{
755 struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
756 bool done;
757
758 done = ct_process_incoming_requests(ct);
759 if (!done)
760 queue_work(system_unbound_wq, &ct->worker);
761}
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
782{
783 u32 header = msg[0];
784 u32 len = ct_header_get_len(header);
785 u32 msglen = len + 1;
786 struct ct_incoming_request *request;
787 unsigned long flags;
788
789 GEM_BUG_ON(ct_header_is_response(header));
790
791 request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
792 if (unlikely(!request)) {
793 DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
794 return 0;
795 }
796 memcpy(request->msg, msg, 4 * msglen);
797
798 spin_lock_irqsave(&ct->lock, flags);
799 list_add_tail(&request->link, &ct->incoming_requests);
800 spin_unlock_irqrestore(&ct->lock, flags);
801
802 queue_work(system_unbound_wq, &ct->worker);
803 return 0;
804}
805
806static void ct_process_host_channel(struct intel_guc_ct *ct)
807{
808 struct intel_guc_ct_channel *ctch = &ct->host_channel;
809 struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
810 u32 msg[GUC_CT_MSG_LEN_MASK + 1];
811 int err = 0;
812
813 if (!ctch->enabled)
814 return;
815
816 do {
817 err = ctb_read(ctb, msg);
818 if (err)
819 break;
820
821 if (ct_header_is_response(msg[0]))
822 err = ct_handle_response(ct, msg);
823 else
824 err = ct_handle_request(ct, msg);
825 } while (!err);
826
827 if (GEM_WARN_ON(err == -EPROTO)) {
828 DRM_ERROR("CT: corrupted message detected!\n");
829 ctb->desc->is_in_error = 1;
830 }
831}
832
833
834
835
836
837static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
838{
839 struct intel_guc_ct *ct = &guc->ct;
840
841 ct_process_host_channel(ct);
842}
843
844
845
846
847
848
849
850
851
852
853int intel_guc_ct_init(struct intel_guc_ct *ct)
854{
855 struct intel_guc *guc = ct_to_guc(ct);
856 struct intel_guc_ct_channel *ctch = &ct->host_channel;
857 int err;
858
859 err = ctch_init(guc, ctch);
860 if (unlikely(err)) {
861 DRM_ERROR("CT: can't open channel %d; err=%d\n",
862 ctch->owner, err);
863 return err;
864 }
865
866 GEM_BUG_ON(!ctch->vma);
867 return 0;
868}
869
870
871
872
873
874
875
876
877void intel_guc_ct_fini(struct intel_guc_ct *ct)
878{
879 struct intel_guc *guc = ct_to_guc(ct);
880 struct intel_guc_ct_channel *ctch = &ct->host_channel;
881
882 ctch_fini(guc, ctch);
883}
884
885
886
887
888
889
890
891int intel_guc_ct_enable(struct intel_guc_ct *ct)
892{
893 struct intel_guc *guc = ct_to_guc(ct);
894 struct intel_guc_ct_channel *ctch = &ct->host_channel;
895 int err;
896
897 if (ctch->enabled)
898 return 0;
899
900 err = ctch_enable(guc, ctch);
901 if (unlikely(err))
902 return err;
903
904
905 guc->send = intel_guc_send_ct;
906 guc->handler = intel_guc_to_host_event_handler_ct;
907 DRM_INFO("CT: %s\n", enableddisabled(true));
908 return 0;
909}
910
911
912
913
914
915void intel_guc_ct_disable(struct intel_guc_ct *ct)
916{
917 struct intel_guc *guc = ct_to_guc(ct);
918 struct intel_guc_ct_channel *ctch = &ct->host_channel;
919
920 if (!ctch->enabled)
921 return;
922
923 ctch_disable(guc, ctch);
924
925
926 guc->send = intel_guc_send_nop;
927 guc->handler = intel_guc_to_host_event_handler_nop;
928 DRM_INFO("CT: %s\n", enableddisabled(false));
929}
930