1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "qemu/osdep.h"
15#include "exec/memop.h"
16#include "exec/memory-internal.h"
17#include "qemu/error-report.h"
18#include "sysemu/hw_accel.h"
19#include "hw/s390x/s390-pci-inst.h"
20#include "hw/s390x/s390-pci-bus.h"
21#include "hw/s390x/s390-pci-kvm.h"
22#include "hw/s390x/s390-pci-vfio.h"
23#include "hw/s390x/tod.h"
24
25#ifndef DEBUG_S390PCI_INST
26#define DEBUG_S390PCI_INST 0
27#endif
28
29#define DPRINTF(fmt, ...) \
30 do { \
31 if (DEBUG_S390PCI_INST) { \
32 fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); \
33 } \
34 } while (0)
35
36static inline void inc_dma_avail(S390PCIIOMMU *iommu)
37{
38 if (iommu->dma_limit) {
39 iommu->dma_limit->avail++;
40 }
41}
42
43static inline void dec_dma_avail(S390PCIIOMMU *iommu)
44{
45 if (iommu->dma_limit) {
46 iommu->dma_limit->avail--;
47 }
48}
49
50static void s390_set_status_code(CPUS390XState *env,
51 uint8_t r, uint64_t status_code)
52{
53 env->regs[r] &= ~0xff000000ULL;
54 env->regs[r] |= (status_code & 0xff) << 24;
55}
56
57static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
58{
59 S390PCIBusDevice *pbdev = NULL;
60 S390pciState *s = s390_get_phb();
61 uint32_t res_code, initial_l2, g_l2;
62 int rc, i;
63 uint64_t resume_token;
64
65 rc = 0;
66 if (lduw_p(&rrb->request.hdr.len) != 32) {
67 res_code = CLP_RC_LEN;
68 rc = -EINVAL;
69 goto out;
70 }
71
72 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
73 res_code = CLP_RC_FMT;
74 rc = -EINVAL;
75 goto out;
76 }
77
78 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
79 ldq_p(&rrb->request.reserved1) != 0) {
80 res_code = CLP_RC_RESNOT0;
81 rc = -EINVAL;
82 goto out;
83 }
84
85 resume_token = ldq_p(&rrb->request.resume_token);
86
87 if (resume_token) {
88 pbdev = s390_pci_find_dev_by_idx(s, resume_token);
89 if (!pbdev) {
90 res_code = CLP_RC_LISTPCI_BADRT;
91 rc = -EINVAL;
92 goto out;
93 }
94 } else {
95 pbdev = s390_pci_find_next_avail_dev(s, NULL);
96 }
97
98 if (lduw_p(&rrb->response.hdr.len) < 48) {
99 res_code = CLP_RC_8K;
100 rc = -EINVAL;
101 goto out;
102 }
103
104 initial_l2 = lduw_p(&rrb->response.hdr.len);
105 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
106 != 0) {
107 res_code = CLP_RC_LEN;
108 rc = -EINVAL;
109 *cc = 3;
110 goto out;
111 }
112
113 stl_p(&rrb->response.fmt, 0);
114 stq_p(&rrb->response.reserved1, 0);
115 stl_p(&rrb->response.mdd, FH_MASK_SHM);
116 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
117 rrb->response.flags = UID_CHECKING_ENABLED;
118 rrb->response.entry_size = sizeof(ClpFhListEntry);
119
120 i = 0;
121 g_l2 = LIST_PCI_HDR_LEN;
122 while (g_l2 < initial_l2 && pbdev) {
123 stw_p(&rrb->response.fh_list[i].device_id,
124 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
125 stw_p(&rrb->response.fh_list[i].vendor_id,
126 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
127
128 stl_p(&rrb->response.fh_list[i].config,
129 pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31);
130 stl_p(&rrb->response.fh_list[i].fid, pbdev->fid);
131 stl_p(&rrb->response.fh_list[i].fh, pbdev->fh);
132
133 g_l2 += sizeof(ClpFhListEntry);
134
135 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
136 g_l2,
137 lduw_p(&rrb->response.fh_list[i].vendor_id),
138 lduw_p(&rrb->response.fh_list[i].device_id),
139 ldl_p(&rrb->response.fh_list[i].fid),
140 ldl_p(&rrb->response.fh_list[i].fh));
141 pbdev = s390_pci_find_next_avail_dev(s, pbdev);
142 i++;
143 }
144
145 if (!pbdev) {
146 resume_token = 0;
147 } else {
148 resume_token = pbdev->fh & FH_MASK_INDEX;
149 }
150 stq_p(&rrb->response.resume_token, resume_token);
151 stw_p(&rrb->response.hdr.len, g_l2);
152 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
153out:
154 if (rc) {
155 DPRINTF("list pci failed rc 0x%x\n", rc);
156 stw_p(&rrb->response.hdr.rsp, res_code);
157 }
158 return rc;
159}
160
161int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
162{
163 ClpReqHdr *reqh;
164 ClpRspHdr *resh;
165 S390PCIBusDevice *pbdev;
166 uint32_t req_len;
167 uint32_t res_len;
168 uint8_t buffer[4096 * 2];
169 uint8_t cc = 0;
170 CPUS390XState *env = &cpu->env;
171 S390pciState *s = s390_get_phb();
172 int i;
173
174 if (env->psw.mask & PSW_MASK_PSTATE) {
175 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
176 return 0;
177 }
178
179 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
180 s390_cpu_virt_mem_handle_exc(cpu, ra);
181 return 0;
182 }
183 reqh = (ClpReqHdr *)buffer;
184 req_len = lduw_p(&reqh->len);
185 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
186 s390_program_interrupt(env, PGM_OPERAND, ra);
187 return 0;
188 }
189
190 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
191 req_len + sizeof(*resh))) {
192 s390_cpu_virt_mem_handle_exc(cpu, ra);
193 return 0;
194 }
195 resh = (ClpRspHdr *)(buffer + req_len);
196 res_len = lduw_p(&resh->len);
197 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
198 s390_program_interrupt(env, PGM_OPERAND, ra);
199 return 0;
200 }
201 if ((req_len + res_len) > 8192) {
202 s390_program_interrupt(env, PGM_OPERAND, ra);
203 return 0;
204 }
205
206 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
207 req_len + res_len)) {
208 s390_cpu_virt_mem_handle_exc(cpu, ra);
209 return 0;
210 }
211
212 if (req_len != 32) {
213 stw_p(&resh->rsp, CLP_RC_LEN);
214 goto out;
215 }
216
217 switch (lduw_p(&reqh->cmd)) {
218 case CLP_LIST_PCI: {
219 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
220 list_pci(rrb, &cc);
221 break;
222 }
223 case CLP_SET_PCI_FN: {
224 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
225 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
226
227 pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqsetpci->fh));
228 if (!pbdev) {
229 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
230 goto out;
231 }
232
233 switch (reqsetpci->oc) {
234 case CLP_SET_ENABLE_PCI_FN:
235 switch (reqsetpci->ndas) {
236 case 0:
237 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS);
238 goto out;
239 case 1:
240 break;
241 default:
242 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES);
243 goto out;
244 }
245
246 if (pbdev->fh & FH_MASK_ENABLE) {
247 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
248 goto out;
249 }
250
251
252
253
254
255
256
257 if (pbdev->interp) {
258
259 if (!s390_pci_get_host_fh(pbdev, &pbdev->fh) ||
260 !(pbdev->fh & FH_MASK_ENABLE)) {
261 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
262 goto out;
263 }
264 }
265 pbdev->fh |= FH_MASK_ENABLE;
266 pbdev->state = ZPCI_FS_ENABLED;
267 stl_p(&ressetpci->fh, pbdev->fh);
268 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
269 break;
270 case CLP_SET_DISABLE_PCI_FN:
271 if (!(pbdev->fh & FH_MASK_ENABLE)) {
272 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
273 goto out;
274 }
275 device_legacy_reset(DEVICE(pbdev));
276 pbdev->fh &= ~FH_MASK_ENABLE;
277 pbdev->state = ZPCI_FS_DISABLED;
278 stl_p(&ressetpci->fh, pbdev->fh);
279 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
280 break;
281 default:
282 DPRINTF("unknown set pci command\n");
283 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
284 break;
285 }
286 break;
287 }
288 case CLP_QUERY_PCI_FN: {
289 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
290 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
291
292 pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqquery->fh));
293 if (!pbdev) {
294 DPRINTF("query pci no pci dev\n");
295 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
296 goto out;
297 }
298
299 stq_p(&resquery->sdma, pbdev->zpci_fn.sdma);
300 stq_p(&resquery->edma, pbdev->zpci_fn.edma);
301 stw_p(&resquery->pchid, pbdev->zpci_fn.pchid);
302 stw_p(&resquery->vfn, pbdev->zpci_fn.vfn);
303 resquery->flags = pbdev->zpci_fn.flags;
304 resquery->pfgid = pbdev->zpci_fn.pfgid;
305 resquery->pft = pbdev->zpci_fn.pft;
306 resquery->fmbl = pbdev->zpci_fn.fmbl;
307 stl_p(&resquery->fid, pbdev->zpci_fn.fid);
308 stl_p(&resquery->uid, pbdev->zpci_fn.uid);
309 memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS);
310 memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN);
311
312 for (i = 0; i < PCI_BAR_COUNT; i++) {
313 uint32_t data = pci_get_long(pbdev->pdev->config +
314 PCI_BASE_ADDRESS_0 + (i * 4));
315
316 stl_p(&resquery->bar[i], data);
317 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
318 ctz64(pbdev->pdev->io_regions[i].size) : 0;
319 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
320 ldl_p(&resquery->bar[i]),
321 pbdev->pdev->io_regions[i].size,
322 resquery->bar_size[i]);
323 }
324
325 stw_p(&resquery->hdr.rsp, CLP_RC_OK);
326 break;
327 }
328 case CLP_QUERY_PCI_FNGRP: {
329 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
330
331 ClpReqQueryPciGrp *reqgrp = (ClpReqQueryPciGrp *)reqh;
332 S390PCIGroup *group;
333
334 group = s390_group_find(reqgrp->g);
335 if (!group) {
336
337
338 stw_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID);
339 goto out;
340 }
341 resgrp->fr = group->zpci_group.fr;
342 stq_p(&resgrp->dasm, group->zpci_group.dasm);
343 stq_p(&resgrp->msia, group->zpci_group.msia);
344 stw_p(&resgrp->mui, group->zpci_group.mui);
345 stw_p(&resgrp->i, group->zpci_group.i);
346 stw_p(&resgrp->maxstbl, group->zpci_group.maxstbl);
347 resgrp->version = group->zpci_group.version;
348 resgrp->dtsm = group->zpci_group.dtsm;
349 stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
350 break;
351 }
352 default:
353 DPRINTF("unknown clp command\n");
354 stw_p(&resh->rsp, CLP_RC_CMD);
355 break;
356 }
357
358out:
359 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
360 req_len + res_len)) {
361 s390_cpu_virt_mem_handle_exc(cpu, ra);
362 return 0;
363 }
364 setcc(cpu, cc);
365 return 0;
366}
367
368
369
370
371
372
373
374
375static int zpci_endian_swap(uint64_t *ptr, uint8_t len)
376{
377 uint64_t data = *ptr;
378
379 switch (len) {
380 case 1:
381 break;
382 case 2:
383 data = bswap16(data);
384 break;
385 case 4:
386 data = bswap32(data);
387 break;
388 case 8:
389 data = bswap64(data);
390 break;
391 default:
392 return -EINVAL;
393 }
394 *ptr = data;
395 return 0;
396}
397
398static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset,
399 uint8_t len)
400{
401 MemoryRegion *subregion;
402 uint64_t subregion_size;
403
404 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
405 subregion_size = int128_get64(subregion->size);
406 if ((offset >= subregion->addr) &&
407 (offset + len) <= (subregion->addr + subregion_size)) {
408 mr = subregion;
409 break;
410 }
411 }
412 return mr;
413}
414
415static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
416 uint64_t offset, uint64_t *data, uint8_t len)
417{
418 MemoryRegion *mr;
419
420 mr = pbdev->pdev->io_regions[pcias].memory;
421 mr = s390_get_subregion(mr, offset, len);
422 offset -= mr->addr;
423 return memory_region_dispatch_read(mr, offset, data,
424 size_memop(len) | MO_BE,
425 MEMTXATTRS_UNSPECIFIED);
426}
427
428int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
429{
430 CPUS390XState *env = &cpu->env;
431 S390PCIBusDevice *pbdev;
432 uint64_t offset;
433 uint64_t data;
434 MemTxResult result;
435 uint8_t len;
436 uint32_t fh;
437 uint8_t pcias;
438
439 if (env->psw.mask & PSW_MASK_PSTATE) {
440 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
441 return 0;
442 }
443
444 if (r2 & 0x1) {
445 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
446 return 0;
447 }
448
449 fh = env->regs[r2] >> 32;
450 pcias = (env->regs[r2] >> 16) & 0xf;
451 len = env->regs[r2] & 0xf;
452 offset = env->regs[r2 + 1];
453
454 if (!(fh & FH_MASK_ENABLE)) {
455 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
456 return 0;
457 }
458
459 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
460 if (!pbdev) {
461 DPRINTF("pcilg no pci dev\n");
462 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
463 return 0;
464 }
465
466 switch (pbdev->state) {
467 case ZPCI_FS_PERMANENT_ERROR:
468 case ZPCI_FS_ERROR:
469 setcc(cpu, ZPCI_PCI_LS_ERR);
470 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
471 return 0;
472 default:
473 break;
474 }
475
476 switch (pcias) {
477 case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
478 if (!len || (len > (8 - (offset & 0x7)))) {
479 s390_program_interrupt(env, PGM_OPERAND, ra);
480 return 0;
481 }
482 result = zpci_read_bar(pbdev, pcias, offset, &data, len);
483 if (result != MEMTX_OK) {
484 s390_program_interrupt(env, PGM_OPERAND, ra);
485 return 0;
486 }
487 break;
488 case ZPCI_CONFIG_BAR:
489 if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
490 s390_program_interrupt(env, PGM_OPERAND, ra);
491 return 0;
492 }
493 data = pci_host_config_read_common(
494 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
495
496 if (zpci_endian_swap(&data, len)) {
497 s390_program_interrupt(env, PGM_OPERAND, ra);
498 return 0;
499 }
500 break;
501 default:
502 DPRINTF("pcilg invalid space\n");
503 setcc(cpu, ZPCI_PCI_LS_ERR);
504 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
505 return 0;
506 }
507
508 pbdev->fmb.counter[ZPCI_FMB_CNT_LD]++;
509
510 env->regs[r1] = data;
511 setcc(cpu, ZPCI_PCI_LS_OK);
512 return 0;
513}
514
515static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
516 uint64_t offset, uint64_t data, uint8_t len)
517{
518 MemoryRegion *mr;
519
520 mr = pbdev->pdev->io_regions[pcias].memory;
521 mr = s390_get_subregion(mr, offset, len);
522 offset -= mr->addr;
523 return memory_region_dispatch_write(mr, offset, data,
524 size_memop(len) | MO_BE,
525 MEMTXATTRS_UNSPECIFIED);
526}
527
528int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
529{
530 CPUS390XState *env = &cpu->env;
531 uint64_t offset, data;
532 S390PCIBusDevice *pbdev;
533 MemTxResult result;
534 uint8_t len;
535 uint32_t fh;
536 uint8_t pcias;
537
538 if (env->psw.mask & PSW_MASK_PSTATE) {
539 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
540 return 0;
541 }
542
543 if (r2 & 0x1) {
544 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
545 return 0;
546 }
547
548 fh = env->regs[r2] >> 32;
549 pcias = (env->regs[r2] >> 16) & 0xf;
550 len = env->regs[r2] & 0xf;
551 offset = env->regs[r2 + 1];
552 data = env->regs[r1];
553
554 if (!(fh & FH_MASK_ENABLE)) {
555 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
556 return 0;
557 }
558
559 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
560 if (!pbdev) {
561 DPRINTF("pcistg no pci dev\n");
562 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
563 return 0;
564 }
565
566 switch (pbdev->state) {
567
568
569
570 case ZPCI_FS_PERMANENT_ERROR:
571 case ZPCI_FS_ERROR:
572 setcc(cpu, ZPCI_PCI_LS_ERR);
573 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
574 return 0;
575 default:
576 break;
577 }
578
579 switch (pcias) {
580
581 case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
582
583
584
585 if (!len || (len > (8 - (offset & 0x7)))) {
586 s390_program_interrupt(env, PGM_OPERAND, ra);
587 return 0;
588 }
589
590 result = zpci_write_bar(pbdev, pcias, offset, data, len);
591 if (result != MEMTX_OK) {
592 s390_program_interrupt(env, PGM_OPERAND, ra);
593 return 0;
594 }
595 break;
596 case ZPCI_CONFIG_BAR:
597
598
599 if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
600 s390_program_interrupt(env, PGM_OPERAND, ra);
601 return 0;
602 }
603
604 zpci_endian_swap(&data, len);
605 pci_host_config_write_common(pbdev->pdev, offset,
606 pci_config_size(pbdev->pdev),
607 data, len);
608 break;
609 default:
610 DPRINTF("pcistg invalid space\n");
611 setcc(cpu, ZPCI_PCI_LS_ERR);
612 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
613 return 0;
614 }
615
616 pbdev->fmb.counter[ZPCI_FMB_CNT_ST]++;
617
618 setcc(cpu, ZPCI_PCI_LS_OK);
619 return 0;
620}
621
622static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu,
623 S390IOTLBEntry *entry)
624{
625 S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova);
626 IOMMUTLBEvent event = {
627 .type = entry->perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP,
628 .entry = {
629 .target_as = &address_space_memory,
630 .iova = entry->iova,
631 .translated_addr = entry->translated_addr,
632 .perm = entry->perm,
633 .addr_mask = ~TARGET_PAGE_MASK,
634 },
635 };
636
637 if (event.type == IOMMU_NOTIFIER_UNMAP) {
638 if (!cache) {
639 goto out;
640 }
641 g_hash_table_remove(iommu->iotlb, &entry->iova);
642 inc_dma_avail(iommu);
643 } else {
644 if (cache) {
645 if (cache->perm == entry->perm &&
646 cache->translated_addr == entry->translated_addr) {
647 goto out;
648 }
649
650 event.type = IOMMU_NOTIFIER_UNMAP;
651 event.entry.perm = IOMMU_NONE;
652 memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
653 event.type = IOMMU_NOTIFIER_MAP;
654 event.entry.perm = entry->perm;
655 }
656
657 cache = g_new(S390IOTLBEntry, 1);
658 cache->iova = entry->iova;
659 cache->translated_addr = entry->translated_addr;
660 cache->len = TARGET_PAGE_SIZE;
661 cache->perm = entry->perm;
662 g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
663 dec_dma_avail(iommu);
664 }
665
666 memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
667
668out:
669 return iommu->dma_limit ? iommu->dma_limit->avail : 1;
670}
671
672int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
673{
674 CPUS390XState *env = &cpu->env;
675 uint32_t fh;
676 uint16_t error = 0;
677 S390PCIBusDevice *pbdev;
678 S390PCIIOMMU *iommu;
679 S390IOTLBEntry entry;
680 hwaddr start, end, sstart;
681 uint32_t dma_avail;
682 bool again;
683
684 if (env->psw.mask & PSW_MASK_PSTATE) {
685 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
686 return 0;
687 }
688
689 if (r2 & 0x1) {
690 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
691 return 0;
692 }
693
694 fh = env->regs[r1] >> 32;
695 sstart = start = env->regs[r2];
696 end = start + env->regs[r2 + 1];
697
698 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
699 if (!pbdev) {
700 DPRINTF("rpcit no pci dev\n");
701 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
702 return 0;
703 }
704
705 switch (pbdev->state) {
706 case ZPCI_FS_RESERVED:
707 case ZPCI_FS_STANDBY:
708 case ZPCI_FS_DISABLED:
709 case ZPCI_FS_PERMANENT_ERROR:
710 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
711 return 0;
712 case ZPCI_FS_ERROR:
713 setcc(cpu, ZPCI_PCI_LS_ERR);
714 s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER);
715 return 0;
716 default:
717 break;
718 }
719
720 iommu = pbdev->iommu;
721 if (iommu->dma_limit) {
722 dma_avail = iommu->dma_limit->avail;
723 } else {
724 dma_avail = 1;
725 }
726 if (!iommu->g_iota) {
727 error = ERR_EVENT_INVALAS;
728 goto err;
729 }
730
731 if (end < iommu->pba || start > iommu->pal) {
732 error = ERR_EVENT_OORANGE;
733 goto err;
734 }
735
736 retry:
737 start = sstart;
738 again = false;
739 while (start < end) {
740 error = s390_guest_io_table_walk(iommu->g_iota, start, &entry);
741 if (error) {
742 break;
743 }
744
745 start += entry.len;
746 while (entry.iova < start && entry.iova < end) {
747 if (dma_avail > 0 || entry.perm == IOMMU_NONE) {
748 dma_avail = s390_pci_update_iotlb(iommu, &entry);
749 entry.iova += TARGET_PAGE_SIZE;
750 entry.translated_addr += TARGET_PAGE_SIZE;
751 } else {
752
753
754
755
756
757 again = true;
758 break;
759 }
760 }
761 }
762 if (again && dma_avail > 0)
763 goto retry;
764err:
765 if (error) {
766 pbdev->state = ZPCI_FS_ERROR;
767 setcc(cpu, ZPCI_PCI_LS_ERR);
768 s390_set_status_code(env, r1, ZPCI_PCI_ST_FUNC_IN_ERR);
769 s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0);
770 } else {
771 pbdev->fmb.counter[ZPCI_FMB_CNT_RPCIT]++;
772 if (dma_avail > 0) {
773 setcc(cpu, ZPCI_PCI_LS_OK);
774 } else {
775
776 setcc(cpu, ZPCI_PCI_LS_ERR);
777 s390_set_status_code(env, r1, ZPCI_RPCIT_ST_INSUFF_RES);
778 }
779 }
780 return 0;
781}
782
783int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
784 uint8_t ar, uintptr_t ra)
785{
786 CPUS390XState *env = &cpu->env;
787 S390PCIBusDevice *pbdev;
788 MemoryRegion *mr;
789 MemTxResult result;
790 uint64_t offset;
791 int i;
792 uint32_t fh;
793 uint8_t pcias;
794 uint16_t len;
795 uint8_t buffer[128];
796
797 if (env->psw.mask & PSW_MASK_PSTATE) {
798 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
799 return 0;
800 }
801
802 fh = env->regs[r1] >> 32;
803 pcias = (env->regs[r1] >> 16) & 0xf;
804 len = env->regs[r1] & 0x1fff;
805 offset = env->regs[r3];
806
807 if (!(fh & FH_MASK_ENABLE)) {
808 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
809 return 0;
810 }
811
812 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
813 if (!pbdev) {
814 DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
815 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
816 return 0;
817 }
818
819 switch (pbdev->state) {
820 case ZPCI_FS_PERMANENT_ERROR:
821 case ZPCI_FS_ERROR:
822 setcc(cpu, ZPCI_PCI_LS_ERR);
823 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
824 return 0;
825 default:
826 break;
827 }
828
829 if (pcias > ZPCI_IO_BAR_MAX) {
830 DPRINTF("pcistb invalid space\n");
831 setcc(cpu, ZPCI_PCI_LS_ERR);
832 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
833 return 0;
834 }
835
836
837
838 if (offset % 8) {
839 goto specification_error;
840 }
841
842
843 if ((len <= 8) || (len % 8) ||
844 (len > pbdev->pci_group->zpci_group.maxstbl)) {
845 goto specification_error;
846 }
847
848 if (((offset & 0xfff) + len) > 0x1000) {
849 goto specification_error;
850 }
851
852 if (gaddr & 0x07UL) {
853 goto specification_error;
854 }
855
856 mr = pbdev->pdev->io_regions[pcias].memory;
857 mr = s390_get_subregion(mr, offset, len);
858 offset -= mr->addr;
859
860 for (i = 0; i < len; i += 8) {
861 if (!memory_region_access_valid(mr, offset + i, 8, true,
862 MEMTXATTRS_UNSPECIFIED)) {
863 s390_program_interrupt(env, PGM_OPERAND, ra);
864 return 0;
865 }
866 }
867
868 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
869 s390_cpu_virt_mem_handle_exc(cpu, ra);
870 return 0;
871 }
872
873 for (i = 0; i < len / 8; i++) {
874 result = memory_region_dispatch_write(mr, offset + i * 8,
875 ldq_p(buffer + i * 8),
876 MO_64, MEMTXATTRS_UNSPECIFIED);
877 if (result != MEMTX_OK) {
878 s390_program_interrupt(env, PGM_OPERAND, ra);
879 return 0;
880 }
881 }
882
883 pbdev->fmb.counter[ZPCI_FMB_CNT_STB]++;
884
885 setcc(cpu, ZPCI_PCI_LS_OK);
886 return 0;
887
888specification_error:
889 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
890 return 0;
891}
892
893static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
894{
895 int ret, len;
896 uint8_t isc = FIB_DATA_ISC(ldl_p(&fib.data));
897
898 pbdev->routes.adapter.adapter_id = css_get_adapter_id(
899 CSS_IO_ADAPTER_PCI, isc);
900 pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t));
901 len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long);
902 pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len);
903
904 ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
905 if (ret) {
906 goto out;
907 }
908
909 ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator);
910 if (ret) {
911 goto out;
912 }
913
914 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
915 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
916 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
917 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
918 pbdev->isc = isc;
919 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
920 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
921
922 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
923 return 0;
924out:
925 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
926 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
927 pbdev->summary_ind = NULL;
928 pbdev->indicator = NULL;
929 return ret;
930}
931
932int pci_dereg_irqs(S390PCIBusDevice *pbdev)
933{
934 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
935 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
936
937 pbdev->summary_ind = NULL;
938 pbdev->indicator = NULL;
939 pbdev->routes.adapter.summary_addr = 0;
940 pbdev->routes.adapter.summary_offset = 0;
941 pbdev->routes.adapter.ind_addr = 0;
942 pbdev->routes.adapter.ind_offset = 0;
943 pbdev->isc = 0;
944 pbdev->noi = 0;
945 pbdev->sum = 0;
946
947 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
948 return 0;
949}
950
951static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib,
952 uintptr_t ra)
953{
954 S390PCIIOMMU *iommu = pbdev->iommu;
955 uint64_t pba = ldq_p(&fib.pba);
956 uint64_t pal = ldq_p(&fib.pal);
957 uint64_t g_iota = ldq_p(&fib.iota);
958 uint8_t dt = (g_iota >> 2) & 0x7;
959 uint8_t t = (g_iota >> 11) & 0x1;
960
961 pba &= ~0xfff;
962 pal |= 0xfff;
963 if (pba > pal || pba < pbdev->zpci_fn.sdma || pal > pbdev->zpci_fn.edma) {
964 s390_program_interrupt(env, PGM_OPERAND, ra);
965 return -EINVAL;
966 }
967
968
969 if (!(dt == ZPCI_IOTA_RTTO && t)) {
970 error_report("unsupported ioat dt %d t %d", dt, t);
971 s390_program_interrupt(env, PGM_OPERAND, ra);
972 return -EINVAL;
973 }
974
975 iommu->pba = pba;
976 iommu->pal = pal;
977 iommu->g_iota = g_iota;
978
979 s390_pci_iommu_enable(iommu);
980
981 return 0;
982}
983
984void pci_dereg_ioat(S390PCIIOMMU *iommu)
985{
986 s390_pci_iommu_disable(iommu);
987 iommu->pba = 0;
988 iommu->pal = 0;
989 iommu->g_iota = 0;
990}
991
992void fmb_timer_free(S390PCIBusDevice *pbdev)
993{
994 if (pbdev->fmb_timer) {
995 timer_free(pbdev->fmb_timer);
996 pbdev->fmb_timer = NULL;
997 }
998 pbdev->fmb_addr = 0;
999 memset(&pbdev->fmb, 0, sizeof(ZpciFmb));
1000}
1001
1002static int fmb_do_update(S390PCIBusDevice *pbdev, int offset, uint64_t val,
1003 int len)
1004{
1005 MemTxResult ret;
1006 uint64_t dst = pbdev->fmb_addr + offset;
1007
1008 switch (len) {
1009 case 8:
1010 address_space_stq_be(&address_space_memory, dst, val,
1011 MEMTXATTRS_UNSPECIFIED,
1012 &ret);
1013 break;
1014 case 4:
1015 address_space_stl_be(&address_space_memory, dst, val,
1016 MEMTXATTRS_UNSPECIFIED,
1017 &ret);
1018 break;
1019 case 2:
1020 address_space_stw_be(&address_space_memory, dst, val,
1021 MEMTXATTRS_UNSPECIFIED,
1022 &ret);
1023 break;
1024 case 1:
1025 address_space_stb(&address_space_memory, dst, val,
1026 MEMTXATTRS_UNSPECIFIED,
1027 &ret);
1028 break;
1029 default:
1030 ret = MEMTX_ERROR;
1031 break;
1032 }
1033 if (ret != MEMTX_OK) {
1034 s390_pci_generate_error_event(ERR_EVENT_FMBA, pbdev->fh, pbdev->fid,
1035 pbdev->fmb_addr, 0);
1036 fmb_timer_free(pbdev);
1037 }
1038
1039 return ret;
1040}
1041
1042static void fmb_update(void *opaque)
1043{
1044 S390PCIBusDevice *pbdev = opaque;
1045 int64_t t = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1046 int i;
1047
1048
1049 pbdev->fmb.last_update *= 2;
1050 pbdev->fmb.last_update |= UPDATE_U_BIT;
1051 if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1052 pbdev->fmb.last_update,
1053 sizeof(pbdev->fmb.last_update))) {
1054 return;
1055 }
1056
1057
1058 if (fmb_do_update(pbdev, offsetof(ZpciFmb, sample),
1059 pbdev->fmb.sample++,
1060 sizeof(pbdev->fmb.sample))) {
1061 return;
1062 }
1063
1064
1065 for (i = 0; i < ZPCI_FMB_CNT_MAX; i++) {
1066 if (fmb_do_update(pbdev, offsetof(ZpciFmb, counter[i]),
1067 pbdev->fmb.counter[i],
1068 sizeof(pbdev->fmb.counter[0]))) {
1069 return;
1070 }
1071 }
1072
1073
1074 pbdev->fmb.last_update = time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
1075 pbdev->fmb.last_update *= 2;
1076 if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1077 pbdev->fmb.last_update,
1078 sizeof(pbdev->fmb.last_update))) {
1079 return;
1080 }
1081 timer_mod(pbdev->fmb_timer, t + pbdev->pci_group->zpci_group.mui);
1082}
1083
1084static int mpcifc_reg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1085{
1086 int rc;
1087
1088 rc = s390_pci_kvm_aif_enable(pbdev, fib, pbdev->forwarding_assist);
1089 if (rc) {
1090 DPRINTF("Failed to enable interrupt forwarding\n");
1091 return rc;
1092 }
1093
1094 return 0;
1095}
1096
1097static int mpcifc_dereg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1098{
1099 int rc;
1100
1101 rc = s390_pci_kvm_aif_disable(pbdev);
1102 if (rc) {
1103 DPRINTF("Failed to disable interrupt forwarding\n");
1104 return rc;
1105 }
1106
1107 return 0;
1108}
1109
1110int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1111 uintptr_t ra)
1112{
1113 CPUS390XState *env = &cpu->env;
1114 uint8_t oc, dmaas;
1115 uint32_t fh;
1116 ZpciFib fib;
1117 S390PCIBusDevice *pbdev;
1118 uint64_t cc = ZPCI_PCI_LS_OK;
1119
1120 if (env->psw.mask & PSW_MASK_PSTATE) {
1121 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1122 return 0;
1123 }
1124
1125 oc = env->regs[r1] & 0xff;
1126 dmaas = (env->regs[r1] >> 16) & 0xff;
1127 fh = env->regs[r1] >> 32;
1128
1129 if (fiba & 0x7) {
1130 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1131 return 0;
1132 }
1133
1134 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
1135 if (!pbdev) {
1136 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
1137 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1138 return 0;
1139 }
1140
1141 switch (pbdev->state) {
1142 case ZPCI_FS_RESERVED:
1143 case ZPCI_FS_STANDBY:
1144 case ZPCI_FS_DISABLED:
1145 case ZPCI_FS_PERMANENT_ERROR:
1146 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1147 return 0;
1148 default:
1149 break;
1150 }
1151
1152 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1153 s390_cpu_virt_mem_handle_exc(cpu, ra);
1154 return 0;
1155 }
1156
1157 if (fib.fmt != 0) {
1158 s390_program_interrupt(env, PGM_OPERAND, ra);
1159 return 0;
1160 }
1161
1162 switch (oc) {
1163 case ZPCI_MOD_FC_REG_INT:
1164 if (pbdev->interp) {
1165 if (mpcifc_reg_int_interp(pbdev, &fib)) {
1166 cc = ZPCI_PCI_LS_ERR;
1167 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1168 }
1169 } else if (pbdev->summary_ind) {
1170 cc = ZPCI_PCI_LS_ERR;
1171 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1172 } else if (reg_irqs(env, pbdev, fib)) {
1173 cc = ZPCI_PCI_LS_ERR;
1174 s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL);
1175 }
1176 break;
1177 case ZPCI_MOD_FC_DEREG_INT:
1178 if (pbdev->interp) {
1179 if (mpcifc_dereg_int_interp(pbdev, &fib)) {
1180 cc = ZPCI_PCI_LS_ERR;
1181 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1182 }
1183 } else if (!pbdev->summary_ind) {
1184 cc = ZPCI_PCI_LS_ERR;
1185 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1186 } else {
1187 pci_dereg_irqs(pbdev);
1188 }
1189 break;
1190 case ZPCI_MOD_FC_REG_IOAT:
1191 if (dmaas != 0) {
1192 cc = ZPCI_PCI_LS_ERR;
1193 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1194 } else if (pbdev->iommu->enabled) {
1195 cc = ZPCI_PCI_LS_ERR;
1196 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1197 } else if (reg_ioat(env, pbdev, fib, ra)) {
1198 cc = ZPCI_PCI_LS_ERR;
1199 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1200 }
1201 break;
1202 case ZPCI_MOD_FC_DEREG_IOAT:
1203 if (dmaas != 0) {
1204 cc = ZPCI_PCI_LS_ERR;
1205 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1206 } else if (!pbdev->iommu->enabled) {
1207 cc = ZPCI_PCI_LS_ERR;
1208 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1209 } else {
1210 pci_dereg_ioat(pbdev->iommu);
1211 }
1212 break;
1213 case ZPCI_MOD_FC_REREG_IOAT:
1214 if (dmaas != 0) {
1215 cc = ZPCI_PCI_LS_ERR;
1216 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1217 } else if (!pbdev->iommu->enabled) {
1218 cc = ZPCI_PCI_LS_ERR;
1219 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1220 } else {
1221 pci_dereg_ioat(pbdev->iommu);
1222 if (reg_ioat(env, pbdev, fib, ra)) {
1223 cc = ZPCI_PCI_LS_ERR;
1224 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1225 }
1226 }
1227 break;
1228 case ZPCI_MOD_FC_RESET_ERROR:
1229 switch (pbdev->state) {
1230 case ZPCI_FS_BLOCKED:
1231 case ZPCI_FS_ERROR:
1232 pbdev->state = ZPCI_FS_ENABLED;
1233 break;
1234 default:
1235 cc = ZPCI_PCI_LS_ERR;
1236 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1237 }
1238 break;
1239 case ZPCI_MOD_FC_RESET_BLOCK:
1240 switch (pbdev->state) {
1241 case ZPCI_FS_ERROR:
1242 pbdev->state = ZPCI_FS_BLOCKED;
1243 break;
1244 default:
1245 cc = ZPCI_PCI_LS_ERR;
1246 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1247 }
1248 break;
1249 case ZPCI_MOD_FC_SET_MEASURE: {
1250 uint64_t fmb_addr = ldq_p(&fib.fmb_addr);
1251
1252 if (fmb_addr & FMBK_MASK) {
1253 cc = ZPCI_PCI_LS_ERR;
1254 s390_pci_generate_error_event(ERR_EVENT_FMBPRO, pbdev->fh,
1255 pbdev->fid, fmb_addr, 0);
1256 fmb_timer_free(pbdev);
1257 break;
1258 }
1259
1260 if (!fmb_addr) {
1261
1262 fmb_timer_free(pbdev);
1263 break;
1264 }
1265
1266 if (!pbdev->fmb_timer) {
1267 pbdev->fmb_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1268 fmb_update, pbdev);
1269 } else if (timer_pending(pbdev->fmb_timer)) {
1270
1271 timer_del(pbdev->fmb_timer);
1272 }
1273 pbdev->fmb_addr = fmb_addr;
1274 timer_mod(pbdev->fmb_timer,
1275 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
1276 pbdev->pci_group->zpci_group.mui);
1277 break;
1278 }
1279 default:
1280 s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
1281 cc = ZPCI_PCI_LS_ERR;
1282 }
1283
1284 setcc(cpu, cc);
1285 return 0;
1286}
1287
1288int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1289 uintptr_t ra)
1290{
1291 CPUS390XState *env = &cpu->env;
1292 uint8_t dmaas;
1293 uint32_t fh;
1294 ZpciFib fib;
1295 S390PCIBusDevice *pbdev;
1296 uint32_t data;
1297 uint64_t cc = ZPCI_PCI_LS_OK;
1298
1299 if (env->psw.mask & PSW_MASK_PSTATE) {
1300 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1301 return 0;
1302 }
1303
1304 fh = env->regs[r1] >> 32;
1305 dmaas = (env->regs[r1] >> 16) & 0xff;
1306
1307 if (dmaas) {
1308 setcc(cpu, ZPCI_PCI_LS_ERR);
1309 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS);
1310 return 0;
1311 }
1312
1313 if (fiba & 0x7) {
1314 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1315 return 0;
1316 }
1317
1318 pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX);
1319 if (!pbdev) {
1320 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1321 return 0;
1322 }
1323
1324 memset(&fib, 0, sizeof(fib));
1325
1326 switch (pbdev->state) {
1327 case ZPCI_FS_RESERVED:
1328 case ZPCI_FS_STANDBY:
1329 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1330 return 0;
1331 case ZPCI_FS_DISABLED:
1332 if (fh & FH_MASK_ENABLE) {
1333 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1334 return 0;
1335 }
1336 goto out;
1337
1338
1339 case ZPCI_FS_ERROR:
1340 fib.fc |= 0x20;
1341
1342 case ZPCI_FS_BLOCKED:
1343 fib.fc |= 0x40;
1344
1345 case ZPCI_FS_ENABLED:
1346 fib.fc |= 0x80;
1347 if (pbdev->iommu->enabled) {
1348 fib.fc |= 0x10;
1349 }
1350 if (!(fh & FH_MASK_ENABLE)) {
1351 env->regs[r1] |= 1ULL << 63;
1352 }
1353 break;
1354 case ZPCI_FS_PERMANENT_ERROR:
1355 setcc(cpu, ZPCI_PCI_LS_ERR);
1356 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR);
1357 return 0;
1358 }
1359
1360 stq_p(&fib.pba, pbdev->iommu->pba);
1361 stq_p(&fib.pal, pbdev->iommu->pal);
1362 stq_p(&fib.iota, pbdev->iommu->g_iota);
1363 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
1364 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
1365 stq_p(&fib.fmb_addr, pbdev->fmb_addr);
1366
1367 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
1368 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
1369 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
1370 stl_p(&fib.data, data);
1371
1372out:
1373 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1374 s390_cpu_virt_mem_handle_exc(cpu, ra);
1375 return 0;
1376 }
1377
1378 setcc(cpu, cc);
1379 return 0;
1380}
1381