linux/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
   3
   4#include <linux/completion.h>
   5#include <linux/circ_buf.h>
   6#include <linux/list.h>
   7
   8#include "a6xx_gmu.h"
   9#include "a6xx_gmu.xml.h"
  10#include "a6xx_gpu.h"
  11
  12#define HFI_MSG_ID(val) [val] = #val
  13
  14static const char * const a6xx_hfi_msg_id[] = {
  15        HFI_MSG_ID(HFI_H2F_MSG_INIT),
  16        HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
  17        HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
  18        HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
  19        HFI_MSG_ID(HFI_H2F_MSG_TEST),
  20        HFI_MSG_ID(HFI_H2F_MSG_START),
  21        HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
  22        HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
  23        HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
  24};
  25
  26static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
  27        struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
  28{
  29        struct a6xx_hfi_queue_header *header = queue->header;
  30        u32 i, hdr, index = header->read_index;
  31
  32        if (header->read_index == header->write_index) {
  33                header->rx_request = 1;
  34                return 0;
  35        }
  36
  37        hdr = queue->data[index];
  38
  39        /*
  40         * If we are to assume that the GMU firmware is in fact a rational actor
  41         * and is programmed to not send us a larger response than we expect
  42         * then we can also assume that if the header size is unexpectedly large
  43         * that it is due to memory corruption and/or hardware failure. In this
  44         * case the only reasonable course of action is to BUG() to help harden
  45         * the failure.
  46         */
  47
  48        BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
  49
  50        for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
  51                data[i] = queue->data[index];
  52                index = (index + 1) % header->size;
  53        }
  54
  55        if (!gmu->legacy)
  56                index = ALIGN(index, 4) % header->size;
  57
  58        header->read_index = index;
  59        return HFI_HEADER_SIZE(hdr);
  60}
  61
  62static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
  63        struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
  64{
  65        struct a6xx_hfi_queue_header *header = queue->header;
  66        u32 i, space, index = header->write_index;
  67
  68        spin_lock(&queue->lock);
  69
  70        space = CIRC_SPACE(header->write_index, header->read_index,
  71                header->size);
  72        if (space < dwords) {
  73                header->dropped++;
  74                spin_unlock(&queue->lock);
  75                return -ENOSPC;
  76        }
  77
  78        for (i = 0; i < dwords; i++) {
  79                queue->data[index] = data[i];
  80                index = (index + 1) % header->size;
  81        }
  82
  83        /* Cookify any non used data at the end of the write buffer */
  84        if (!gmu->legacy) {
  85                for (; index % 4; index = (index + 1) % header->size)
  86                        queue->data[index] = 0xfafafafa;
  87        }
  88
  89        header->write_index = index;
  90        spin_unlock(&queue->lock);
  91
  92        gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
  93        return 0;
  94}
  95
  96static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
  97                u32 *payload, u32 payload_size)
  98{
  99        struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
 100        u32 val;
 101        int ret;
 102
 103        /* Wait for a response */
 104        ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
 105                val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
 106
 107        if (ret) {
 108                DRM_DEV_ERROR(gmu->dev,
 109                        "Message %s id %d timed out waiting for response\n",
 110                        a6xx_hfi_msg_id[id], seqnum);
 111                return -ETIMEDOUT;
 112        }
 113
 114        /* Clear the interrupt */
 115        gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
 116                A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
 117
 118        for (;;) {
 119                struct a6xx_hfi_msg_response resp;
 120
 121                /* Get the next packet */
 122                ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
 123                        sizeof(resp) >> 2);
 124
 125                /* If the queue is empty our response never made it */
 126                if (!ret) {
 127                        DRM_DEV_ERROR(gmu->dev,
 128                                "The HFI response queue is unexpectedly empty\n");
 129
 130                        return -ENOENT;
 131                }
 132
 133                if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
 134                        struct a6xx_hfi_msg_error *error =
 135                                (struct a6xx_hfi_msg_error *) &resp;
 136
 137                        DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
 138                                error->code);
 139                        continue;
 140                }
 141
 142                if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
 143                        DRM_DEV_ERROR(gmu->dev,
 144                                "Unexpected message id %d on the response queue\n",
 145                                HFI_HEADER_SEQNUM(resp.ret_header));
 146                        continue;
 147                }
 148
 149                if (resp.error) {
 150                        DRM_DEV_ERROR(gmu->dev,
 151                                "Message %s id %d returned error %d\n",
 152                                a6xx_hfi_msg_id[id], seqnum, resp.error);
 153                        return -EINVAL;
 154                }
 155
 156                /* All is well, copy over the buffer */
 157                if (payload && payload_size)
 158                        memcpy(payload, resp.payload,
 159                                min_t(u32, payload_size, sizeof(resp.payload)));
 160
 161                return 0;
 162        }
 163}
 164
 165static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
 166                void *data, u32 size, u32 *payload, u32 payload_size)
 167{
 168        struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
 169        int ret, dwords = size >> 2;
 170        u32 seqnum;
 171
 172        seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
 173
 174        /* First dword of the message is the message header - fill it in */
 175        *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
 176                (dwords << 8) | id;
 177
 178        ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
 179        if (ret) {
 180                DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
 181                        a6xx_hfi_msg_id[id], seqnum);
 182                return ret;
 183        }
 184
 185        return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
 186}
 187
 188static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
 189{
 190        struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
 191
 192        msg.dbg_buffer_addr = (u32) gmu->debug.iova;
 193        msg.dbg_buffer_size = (u32) gmu->debug.size;
 194        msg.boot_state = boot_state;
 195
 196        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
 197                NULL, 0);
 198}
 199
 200static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
 201{
 202        struct a6xx_hfi_msg_fw_version msg = { 0 };
 203
 204        /* Currently supporting version 1.1 */
 205        msg.supported_version = (1 << 28) | (1 << 16);
 206
 207        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
 208                version, sizeof(*version));
 209}
 210
 211static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
 212{
 213        struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
 214        int i;
 215
 216        msg.num_gpu_levels = gmu->nr_gpu_freqs;
 217        msg.num_gmu_levels = gmu->nr_gmu_freqs;
 218
 219        for (i = 0; i < gmu->nr_gpu_freqs; i++) {
 220                msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
 221                msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
 222        }
 223
 224        for (i = 0; i < gmu->nr_gmu_freqs; i++) {
 225                msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
 226                msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
 227        }
 228
 229        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
 230                NULL, 0);
 231}
 232
 233static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
 234{
 235        struct a6xx_hfi_msg_perf_table msg = { 0 };
 236        int i;
 237
 238        msg.num_gpu_levels = gmu->nr_gpu_freqs;
 239        msg.num_gmu_levels = gmu->nr_gmu_freqs;
 240
 241        for (i = 0; i < gmu->nr_gpu_freqs; i++) {
 242                msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
 243                msg.gx_votes[i].acd = 0xffffffff;
 244                msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
 245        }
 246
 247        for (i = 0; i < gmu->nr_gmu_freqs; i++) {
 248                msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
 249                msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
 250        }
 251
 252        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
 253                NULL, 0);
 254}
 255
 256static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 257{
 258        /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
 259        msg->bw_level_num = 1;
 260
 261        msg->ddr_cmds_num = 3;
 262        msg->ddr_wait_bitmask = 0x01;
 263
 264        msg->ddr_cmds_addrs[0] = 0x50000;
 265        msg->ddr_cmds_addrs[1] = 0x5003c;
 266        msg->ddr_cmds_addrs[2] = 0x5000c;
 267
 268        msg->ddr_cmds_data[0][0] =  0x40000000;
 269        msg->ddr_cmds_data[0][1] =  0x40000000;
 270        msg->ddr_cmds_data[0][2] =  0x40000000;
 271
 272        /*
 273         * These are the CX (CNOC) votes - these are used by the GMU but the
 274         * votes are known and fixed for the target
 275         */
 276        msg->cnoc_cmds_num = 1;
 277        msg->cnoc_wait_bitmask = 0x01;
 278
 279        msg->cnoc_cmds_addrs[0] = 0x5007c;
 280        msg->cnoc_cmds_data[0][0] =  0x40000000;
 281        msg->cnoc_cmds_data[1][0] =  0x60000001;
 282}
 283
 284static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 285{
 286        /*
 287         * Send a single "off" entry just to get things running
 288         * TODO: bus scaling
 289         */
 290        msg->bw_level_num = 1;
 291
 292        msg->ddr_cmds_num = 3;
 293        msg->ddr_wait_bitmask = 0x01;
 294
 295        msg->ddr_cmds_addrs[0] = 0x50000;
 296        msg->ddr_cmds_addrs[1] = 0x5003c;
 297        msg->ddr_cmds_addrs[2] = 0x5000c;
 298
 299        msg->ddr_cmds_data[0][0] =  0x40000000;
 300        msg->ddr_cmds_data[0][1] =  0x40000000;
 301        msg->ddr_cmds_data[0][2] =  0x40000000;
 302
 303        /*
 304         * These are the CX (CNOC) votes - these are used by the GMU but the
 305         * votes are known and fixed for the target
 306         */
 307        msg->cnoc_cmds_num = 3;
 308        msg->cnoc_wait_bitmask = 0x01;
 309
 310        msg->cnoc_cmds_addrs[0] = 0x50034;
 311        msg->cnoc_cmds_addrs[1] = 0x5007c;
 312        msg->cnoc_cmds_addrs[2] = 0x5004c;
 313
 314        msg->cnoc_cmds_data[0][0] =  0x40000000;
 315        msg->cnoc_cmds_data[0][1] =  0x00000000;
 316        msg->cnoc_cmds_data[0][2] =  0x40000000;
 317
 318        msg->cnoc_cmds_data[1][0] =  0x60000001;
 319        msg->cnoc_cmds_data[1][1] =  0x20000001;
 320        msg->cnoc_cmds_data[1][2] =  0x60000001;
 321}
 322
 323static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 324{
 325        /*
 326         * Send a single "off" entry just to get things running
 327         * TODO: bus scaling
 328         */
 329        msg->bw_level_num = 1;
 330
 331        msg->ddr_cmds_num = 3;
 332        msg->ddr_wait_bitmask = 0x01;
 333
 334        msg->ddr_cmds_addrs[0] = 0x50000;
 335        msg->ddr_cmds_addrs[1] = 0x50004;
 336        msg->ddr_cmds_addrs[2] = 0x5007c;
 337
 338        msg->ddr_cmds_data[0][0] =  0x40000000;
 339        msg->ddr_cmds_data[0][1] =  0x40000000;
 340        msg->ddr_cmds_data[0][2] =  0x40000000;
 341
 342        /*
 343         * These are the CX (CNOC) votes - these are used by the GMU but the
 344         * votes are known and fixed for the target
 345         */
 346        msg->cnoc_cmds_num = 1;
 347        msg->cnoc_wait_bitmask = 0x01;
 348
 349        msg->cnoc_cmds_addrs[0] = 0x500a4;
 350        msg->cnoc_cmds_data[0][0] =  0x40000000;
 351        msg->cnoc_cmds_data[1][0] =  0x60000001;
 352}
 353
 354static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 355{
 356        /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
 357        msg->bw_level_num = 1;
 358
 359        msg->ddr_cmds_num = 3;
 360        msg->ddr_wait_bitmask = 0x07;
 361
 362        msg->ddr_cmds_addrs[0] = 0x50000;
 363        msg->ddr_cmds_addrs[1] = 0x5005c;
 364        msg->ddr_cmds_addrs[2] = 0x5000c;
 365
 366        msg->ddr_cmds_data[0][0] =  0x40000000;
 367        msg->ddr_cmds_data[0][1] =  0x40000000;
 368        msg->ddr_cmds_data[0][2] =  0x40000000;
 369
 370        /*
 371         * These are the CX (CNOC) votes.  This is used but the values for the
 372         * sdm845 GMU are known and fixed so we can hard code them.
 373         */
 374
 375        msg->cnoc_cmds_num = 3;
 376        msg->cnoc_wait_bitmask = 0x05;
 377
 378        msg->cnoc_cmds_addrs[0] = 0x50034;
 379        msg->cnoc_cmds_addrs[1] = 0x5007c;
 380        msg->cnoc_cmds_addrs[2] = 0x5004c;
 381
 382        msg->cnoc_cmds_data[0][0] =  0x40000000;
 383        msg->cnoc_cmds_data[0][1] =  0x00000000;
 384        msg->cnoc_cmds_data[0][2] =  0x40000000;
 385
 386        msg->cnoc_cmds_data[1][0] =  0x60000001;
 387        msg->cnoc_cmds_data[1][1] =  0x20000001;
 388        msg->cnoc_cmds_data[1][2] =  0x60000001;
 389}
 390
 391
 392static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
 393{
 394        struct a6xx_hfi_msg_bw_table msg = { 0 };
 395        struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
 396        struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 397
 398        if (adreno_is_a618(adreno_gpu))
 399                a618_build_bw_table(&msg);
 400        else if (adreno_is_a640(adreno_gpu))
 401                a640_build_bw_table(&msg);
 402        else if (adreno_is_a650(adreno_gpu))
 403                a650_build_bw_table(&msg);
 404        else
 405                a6xx_build_bw_table(&msg);
 406
 407        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
 408                NULL, 0);
 409}
 410
 411static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
 412{
 413        struct a6xx_hfi_msg_test msg = { 0 };
 414
 415        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
 416                NULL, 0);
 417}
 418
 419static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
 420{
 421        struct a6xx_hfi_msg_start msg = { 0 };
 422
 423        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
 424                NULL, 0);
 425}
 426
 427static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
 428{
 429        struct a6xx_hfi_msg_core_fw_start msg = { 0 };
 430
 431        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
 432                sizeof(msg), NULL, 0);
 433}
 434
 435int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
 436{
 437        struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
 438
 439        msg.ack_type = 1; /* blocking */
 440        msg.freq = index;
 441        msg.bw = 0; /* TODO: bus scaling */
 442
 443        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
 444                sizeof(msg), NULL, 0);
 445}
 446
 447int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
 448{
 449        struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
 450
 451        /* TODO: should freq and bw fields be non-zero ? */
 452
 453        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
 454                sizeof(msg), NULL, 0);
 455}
 456
 457static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
 458{
 459        int ret;
 460
 461        ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
 462        if (ret)
 463                return ret;
 464
 465        ret = a6xx_hfi_get_fw_version(gmu, NULL);
 466        if (ret)
 467                return ret;
 468
 469        /*
 470         * We have to get exchange version numbers per the sequence but at this
 471         * point th kernel driver doesn't need to know the exact version of
 472         * the GMU firmware
 473         */
 474
 475        ret = a6xx_hfi_send_perf_table_v1(gmu);
 476        if (ret)
 477                return ret;
 478
 479        ret = a6xx_hfi_send_bw_table(gmu);
 480        if (ret)
 481                return ret;
 482
 483        /*
 484         * Let the GMU know that there won't be any more HFI messages until next
 485         * boot
 486         */
 487        a6xx_hfi_send_test(gmu);
 488
 489        return 0;
 490}
 491
 492int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
 493{
 494        int ret;
 495
 496        if (gmu->legacy)
 497                return a6xx_hfi_start_v1(gmu, boot_state);
 498
 499
 500        ret = a6xx_hfi_send_perf_table(gmu);
 501        if (ret)
 502                return ret;
 503
 504        ret = a6xx_hfi_send_bw_table(gmu);
 505        if (ret)
 506                return ret;
 507
 508        ret = a6xx_hfi_send_core_fw_start(gmu);
 509        if (ret)
 510                return ret;
 511
 512        /*
 513         * Downstream driver sends this in its "a6xx_hw_init" equivalent,
 514         * but seems to be no harm in sending it here
 515         */
 516        ret = a6xx_hfi_send_start(gmu);
 517        if (ret)
 518                return ret;
 519
 520        return 0;
 521}
 522
 523void a6xx_hfi_stop(struct a6xx_gmu *gmu)
 524{
 525        int i;
 526
 527        for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
 528                struct a6xx_hfi_queue *queue = &gmu->queues[i];
 529
 530                if (!queue->header)
 531                        continue;
 532
 533                if (queue->header->read_index != queue->header->write_index)
 534                        DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
 535
 536                queue->header->read_index = 0;
 537                queue->header->write_index = 0;
 538        }
 539}
 540
 541static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
 542                struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
 543                u32 id)
 544{
 545        spin_lock_init(&queue->lock);
 546        queue->header = header;
 547        queue->data = virt;
 548        atomic_set(&queue->seqnum, 0);
 549
 550        /* Set up the shared memory header */
 551        header->iova = iova;
 552        header->type =  10 << 8 | id;
 553        header->status = 1;
 554        header->size = SZ_4K >> 2;
 555        header->msg_size = 0;
 556        header->dropped = 0;
 557        header->rx_watermark = 1;
 558        header->tx_watermark = 1;
 559        header->rx_request = 1;
 560        header->tx_request = 0;
 561        header->read_index = 0;
 562        header->write_index = 0;
 563}
 564
 565void a6xx_hfi_init(struct a6xx_gmu *gmu)
 566{
 567        struct a6xx_gmu_bo *hfi = &gmu->hfi;
 568        struct a6xx_hfi_queue_table_header *table = hfi->virt;
 569        struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
 570        u64 offset;
 571        int table_size;
 572
 573        /*
 574         * The table size is the size of the table header plus all of the queue
 575         * headers
 576         */
 577        table_size = sizeof(*table);
 578        table_size += (ARRAY_SIZE(gmu->queues) *
 579                sizeof(struct a6xx_hfi_queue_header));
 580
 581        table->version = 0;
 582        table->size = table_size;
 583        /* First queue header is located immediately after the table header */
 584        table->qhdr0_offset = sizeof(*table) >> 2;
 585        table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
 586        table->num_queues = ARRAY_SIZE(gmu->queues);
 587        table->active_queues = ARRAY_SIZE(gmu->queues);
 588
 589        /* Command queue */
 590        offset = SZ_4K;
 591        a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
 592                hfi->iova + offset, 0);
 593
 594        /* GMU response queue */
 595        offset += SZ_4K;
 596        a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
 597                hfi->iova + offset, gmu->legacy ? 4 : 1);
 598}
 599