linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/net/ehea/ehea_qmr.c
   3 *
   4 *  eHEA ethernet device driver for IBM eServer System p
   5 *
   6 *  (C) Copyright IBM Corp. 2006
   7 *
   8 *  Authors:
   9 *       Christoph Raisch <raisch@de.ibm.com>
  10 *       Jan-Bernd Themann <themann@de.ibm.com>
  11 *       Thomas Klein <tklein@de.ibm.com>
  12 *
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2, or (at your option)
  17 * any later version.
  18 *
  19 * This program is distributed in the hope that it will be useful,
  20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  22 * GNU General Public License for more details.
  23 *
  24 * You should have received a copy of the GNU General Public License
  25 * along with this program; if not, write to the Free Software
  26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/mm.h>
  32#include <linux/slab.h>
  33#include "ehea.h"
  34#include "ehea_phyp.h"
  35#include "ehea_qmr.h"
  36
  37static struct ehea_bmap *ehea_bmap;
  38
  39static void *hw_qpageit_get_inc(struct hw_queue *queue)
  40{
  41        void *retvalue = hw_qeit_get(queue);
  42
  43        queue->current_q_offset += queue->pagesize;
  44        if (queue->current_q_offset > queue->queue_length) {
  45                queue->current_q_offset -= queue->pagesize;
  46                retvalue = NULL;
  47        } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
  48                pr_err("not on pageboundary\n");
  49                retvalue = NULL;
  50        }
  51        return retvalue;
  52}
  53
  54static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
  55                          const u32 pagesize, const u32 qe_size)
  56{
  57        int pages_per_kpage = PAGE_SIZE / pagesize;
  58        int i, k;
  59
  60        if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
  61                pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
  62                       (int)PAGE_SIZE, (int)pagesize);
  63                return -EINVAL;
  64        }
  65
  66        queue->queue_length = nr_of_pages * pagesize;
  67        queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
  68        if (!queue->queue_pages) {
  69                pr_err("no mem for queue_pages\n");
  70                return -ENOMEM;
  71        }
  72
  73        /*
  74         * allocate pages for queue:
  75         * outer loop allocates whole kernel pages (page aligned) and
  76         * inner loop divides a kernel page into smaller hea queue pages
  77         */
  78        i = 0;
  79        while (i < nr_of_pages) {
  80                u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
  81                if (!kpage)
  82                        goto out_nomem;
  83                for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
  84                        (queue->queue_pages)[i] = (struct ehea_page *)kpage;
  85                        kpage += pagesize;
  86                        i++;
  87                }
  88        }
  89
  90        queue->current_q_offset = 0;
  91        queue->qe_size = qe_size;
  92        queue->pagesize = pagesize;
  93        queue->toggle_state = 1;
  94
  95        return 0;
  96out_nomem:
  97        for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
  98                if (!(queue->queue_pages)[i])
  99                        break;
 100                free_page((unsigned long)(queue->queue_pages)[i]);
 101        }
 102        return -ENOMEM;
 103}
 104
 105static void hw_queue_dtor(struct hw_queue *queue)
 106{
 107        int pages_per_kpage = PAGE_SIZE / queue->pagesize;
 108        int i, nr_pages;
 109
 110        if (!queue || !queue->queue_pages)
 111                return;
 112
 113        nr_pages = queue->queue_length / queue->pagesize;
 114
 115        for (i = 0; i < nr_pages; i += pages_per_kpage)
 116                free_page((unsigned long)(queue->queue_pages)[i]);
 117
 118        kfree(queue->queue_pages);
 119}
 120
 121struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
 122                               int nr_of_cqe, u64 eq_handle, u32 cq_token)
 123{
 124        struct ehea_cq *cq;
 125        struct h_epa epa;
 126        u64 *cq_handle_ref, hret, rpage;
 127        u32 act_nr_of_entries, act_pages, counter;
 128        int ret;
 129        void *vpage;
 130
 131        cq = kzalloc(sizeof(*cq), GFP_KERNEL);
 132        if (!cq) {
 133                pr_err("no mem for cq\n");
 134                goto out_nomem;
 135        }
 136
 137        cq->attr.max_nr_of_cqes = nr_of_cqe;
 138        cq->attr.cq_token = cq_token;
 139        cq->attr.eq_handle = eq_handle;
 140
 141        cq->adapter = adapter;
 142
 143        cq_handle_ref = &cq->fw_handle;
 144        act_nr_of_entries = 0;
 145        act_pages = 0;
 146
 147        hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
 148                                        &cq->fw_handle, &cq->epas);
 149        if (hret != H_SUCCESS) {
 150                pr_err("alloc_resource_cq failed\n");
 151                goto out_freemem;
 152        }
 153
 154        ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
 155                            EHEA_PAGESIZE, sizeof(struct ehea_cqe));
 156        if (ret)
 157                goto out_freeres;
 158
 159        for (counter = 0; counter < cq->attr.nr_pages; counter++) {
 160                vpage = hw_qpageit_get_inc(&cq->hw_queue);
 161                if (!vpage) {
 162                        pr_err("hw_qpageit_get_inc failed\n");
 163                        goto out_kill_hwq;
 164                }
 165
 166                rpage = virt_to_abs(vpage);
 167                hret = ehea_h_register_rpage(adapter->handle,
 168                                             0, EHEA_CQ_REGISTER_ORIG,
 169                                             cq->fw_handle, rpage, 1);
 170                if (hret < H_SUCCESS) {
 171                        pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
 172                               cq, hret, counter, cq->attr.nr_pages);
 173                        goto out_kill_hwq;
 174                }
 175
 176                if (counter == (cq->attr.nr_pages - 1)) {
 177                        vpage = hw_qpageit_get_inc(&cq->hw_queue);
 178
 179                        if ((hret != H_SUCCESS) || (vpage)) {
 180                                pr_err("registration of pages not complete hret=%llx\n",
 181                                       hret);
 182                                goto out_kill_hwq;
 183                        }
 184                } else {
 185                        if (hret != H_PAGE_REGISTERED) {
 186                                pr_err("CQ: registration of page failed hret=%llx\n",
 187                                       hret);
 188                                goto out_kill_hwq;
 189                        }
 190                }
 191        }
 192
 193        hw_qeit_reset(&cq->hw_queue);
 194        epa = cq->epas.kernel;
 195        ehea_reset_cq_ep(cq);
 196        ehea_reset_cq_n1(cq);
 197
 198        return cq;
 199
 200out_kill_hwq:
 201        hw_queue_dtor(&cq->hw_queue);
 202
 203out_freeres:
 204        ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
 205
 206out_freemem:
 207        kfree(cq);
 208
 209out_nomem:
 210        return NULL;
 211}
 212
 213static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
 214{
 215        u64 hret;
 216        u64 adapter_handle = cq->adapter->handle;
 217
 218        /* deregister all previous registered pages */
 219        hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
 220        if (hret != H_SUCCESS)
 221                return hret;
 222
 223        hw_queue_dtor(&cq->hw_queue);
 224        kfree(cq);
 225
 226        return hret;
 227}
 228
 229int ehea_destroy_cq(struct ehea_cq *cq)
 230{
 231        u64 hret, aer, aerr;
 232        if (!cq)
 233                return 0;
 234
 235        hcp_epas_dtor(&cq->epas);
 236        hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
 237        if (hret == H_R_STATE) {
 238                ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
 239                hret = ehea_destroy_cq_res(cq, FORCE_FREE);
 240        }
 241
 242        if (hret != H_SUCCESS) {
 243                pr_err("destroy CQ failed\n");
 244                return -EIO;
 245        }
 246
 247        return 0;
 248}
 249
 250struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
 251                               const enum ehea_eq_type type,
 252                               const u32 max_nr_of_eqes, const u8 eqe_gen)
 253{
 254        int ret, i;
 255        u64 hret, rpage;
 256        void *vpage;
 257        struct ehea_eq *eq;
 258
 259        eq = kzalloc(sizeof(*eq), GFP_KERNEL);
 260        if (!eq) {
 261                pr_err("no mem for eq\n");
 262                return NULL;
 263        }
 264
 265        eq->adapter = adapter;
 266        eq->attr.type = type;
 267        eq->attr.max_nr_of_eqes = max_nr_of_eqes;
 268        eq->attr.eqe_gen = eqe_gen;
 269        spin_lock_init(&eq->spinlock);
 270
 271        hret = ehea_h_alloc_resource_eq(adapter->handle,
 272                                        &eq->attr, &eq->fw_handle);
 273        if (hret != H_SUCCESS) {
 274                pr_err("alloc_resource_eq failed\n");
 275                goto out_freemem;
 276        }
 277
 278        ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
 279                            EHEA_PAGESIZE, sizeof(struct ehea_eqe));
 280        if (ret) {
 281                pr_err("can't allocate eq pages\n");
 282                goto out_freeres;
 283        }
 284
 285        for (i = 0; i < eq->attr.nr_pages; i++) {
 286                vpage = hw_qpageit_get_inc(&eq->hw_queue);
 287                if (!vpage) {
 288                        pr_err("hw_qpageit_get_inc failed\n");
 289                        hret = H_RESOURCE;
 290                        goto out_kill_hwq;
 291                }
 292
 293                rpage = virt_to_abs(vpage);
 294
 295                hret = ehea_h_register_rpage(adapter->handle, 0,
 296                                             EHEA_EQ_REGISTER_ORIG,
 297                                             eq->fw_handle, rpage, 1);
 298
 299                if (i == (eq->attr.nr_pages - 1)) {
 300                        /* last page */
 301                        vpage = hw_qpageit_get_inc(&eq->hw_queue);
 302                        if ((hret != H_SUCCESS) || (vpage))
 303                                goto out_kill_hwq;
 304
 305                } else {
 306                        if (hret != H_PAGE_REGISTERED)
 307                                goto out_kill_hwq;
 308
 309                }
 310        }
 311
 312        hw_qeit_reset(&eq->hw_queue);
 313        return eq;
 314
 315out_kill_hwq:
 316        hw_queue_dtor(&eq->hw_queue);
 317
 318out_freeres:
 319        ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
 320
 321out_freemem:
 322        kfree(eq);
 323        return NULL;
 324}
 325
 326struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
 327{
 328        struct ehea_eqe *eqe;
 329        unsigned long flags;
 330
 331        spin_lock_irqsave(&eq->spinlock, flags);
 332        eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
 333        spin_unlock_irqrestore(&eq->spinlock, flags);
 334
 335        return eqe;
 336}
 337
 338static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
 339{
 340        u64 hret;
 341        unsigned long flags;
 342
 343        spin_lock_irqsave(&eq->spinlock, flags);
 344
 345        hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
 346        spin_unlock_irqrestore(&eq->spinlock, flags);
 347
 348        if (hret != H_SUCCESS)
 349                return hret;
 350
 351        hw_queue_dtor(&eq->hw_queue);
 352        kfree(eq);
 353
 354        return hret;
 355}
 356
 357int ehea_destroy_eq(struct ehea_eq *eq)
 358{
 359        u64 hret, aer, aerr;
 360        if (!eq)
 361                return 0;
 362
 363        hcp_epas_dtor(&eq->epas);
 364
 365        hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
 366        if (hret == H_R_STATE) {
 367                ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
 368                hret = ehea_destroy_eq_res(eq, FORCE_FREE);
 369        }
 370
 371        if (hret != H_SUCCESS) {
 372                pr_err("destroy EQ failed\n");
 373                return -EIO;
 374        }
 375
 376        return 0;
 377}
 378
 379/**
 380 * allocates memory for a queue and registers pages in phyp
 381 */
 382static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
 383                           int nr_pages, int wqe_size, int act_nr_sges,
 384                           struct ehea_adapter *adapter, int h_call_q_selector)
 385{
 386        u64 hret, rpage;
 387        int ret, cnt;
 388        void *vpage;
 389
 390        ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
 391        if (ret)
 392                return ret;
 393
 394        for (cnt = 0; cnt < nr_pages; cnt++) {
 395                vpage = hw_qpageit_get_inc(hw_queue);
 396                if (!vpage) {
 397                        pr_err("hw_qpageit_get_inc failed\n");
 398                        goto out_kill_hwq;
 399                }
 400                rpage = virt_to_abs(vpage);
 401                hret = ehea_h_register_rpage(adapter->handle,
 402                                             0, h_call_q_selector,
 403                                             qp->fw_handle, rpage, 1);
 404                if (hret < H_SUCCESS) {
 405                        pr_err("register_rpage_qp failed\n");
 406                        goto out_kill_hwq;
 407                }
 408        }
 409        hw_qeit_reset(hw_queue);
 410        return 0;
 411
 412out_kill_hwq:
 413        hw_queue_dtor(hw_queue);
 414        return -EIO;
 415}
 416
 417static inline u32 map_wqe_size(u8 wqe_enc_size)
 418{
 419        return 128 << wqe_enc_size;
 420}
 421
 422struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
 423                               u32 pd, struct ehea_qp_init_attr *init_attr)
 424{
 425        int ret;
 426        u64 hret;
 427        struct ehea_qp *qp;
 428        u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
 429        u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
 430
 431
 432        qp = kzalloc(sizeof(*qp), GFP_KERNEL);
 433        if (!qp) {
 434                pr_err("no mem for qp\n");
 435                return NULL;
 436        }
 437
 438        qp->adapter = adapter;
 439
 440        hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
 441                                        &qp->fw_handle, &qp->epas);
 442        if (hret != H_SUCCESS) {
 443                pr_err("ehea_h_alloc_resource_qp failed\n");
 444                goto out_freemem;
 445        }
 446
 447        wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
 448        wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
 449        wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
 450        wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
 451
 452        ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
 453                                     wqe_size_in_bytes_sq,
 454                                     init_attr->act_wqe_size_enc_sq, adapter,
 455                                     0);
 456        if (ret) {
 457                pr_err("can't register for sq ret=%x\n", ret);
 458                goto out_freeres;
 459        }
 460
 461        ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
 462                                     init_attr->nr_rq1_pages,
 463                                     wqe_size_in_bytes_rq1,
 464                                     init_attr->act_wqe_size_enc_rq1,
 465                                     adapter, 1);
 466        if (ret) {
 467                pr_err("can't register for rq1 ret=%x\n", ret);
 468                goto out_kill_hwsq;
 469        }
 470
 471        if (init_attr->rq_count > 1) {
 472                ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
 473                                             init_attr->nr_rq2_pages,
 474                                             wqe_size_in_bytes_rq2,
 475                                             init_attr->act_wqe_size_enc_rq2,
 476                                             adapter, 2);
 477                if (ret) {
 478                        pr_err("can't register for rq2 ret=%x\n", ret);
 479                        goto out_kill_hwr1q;
 480                }
 481        }
 482
 483        if (init_attr->rq_count > 2) {
 484                ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
 485                                             init_attr->nr_rq3_pages,
 486                                             wqe_size_in_bytes_rq3,
 487                                             init_attr->act_wqe_size_enc_rq3,
 488                                             adapter, 3);
 489                if (ret) {
 490                        pr_err("can't register for rq3 ret=%x\n", ret);
 491                        goto out_kill_hwr2q;
 492                }
 493        }
 494
 495        qp->init_attr = *init_attr;
 496
 497        return qp;
 498
 499out_kill_hwr2q:
 500        hw_queue_dtor(&qp->hw_rqueue2);
 501
 502out_kill_hwr1q:
 503        hw_queue_dtor(&qp->hw_rqueue1);
 504
 505out_kill_hwsq:
 506        hw_queue_dtor(&qp->hw_squeue);
 507
 508out_freeres:
 509        ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
 510        ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
 511
 512out_freemem:
 513        kfree(qp);
 514        return NULL;
 515}
 516
 517static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
 518{
 519        u64 hret;
 520        struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
 521
 522
 523        ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
 524        hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
 525        if (hret != H_SUCCESS)
 526                return hret;
 527
 528        hw_queue_dtor(&qp->hw_squeue);
 529        hw_queue_dtor(&qp->hw_rqueue1);
 530
 531        if (qp_attr->rq_count > 1)
 532                hw_queue_dtor(&qp->hw_rqueue2);
 533        if (qp_attr->rq_count > 2)
 534                hw_queue_dtor(&qp->hw_rqueue3);
 535        kfree(qp);
 536
 537        return hret;
 538}
 539
 540int ehea_destroy_qp(struct ehea_qp *qp)
 541{
 542        u64 hret, aer, aerr;
 543        if (!qp)
 544                return 0;
 545
 546        hcp_epas_dtor(&qp->epas);
 547
 548        hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
 549        if (hret == H_R_STATE) {
 550                ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
 551                hret = ehea_destroy_qp_res(qp, FORCE_FREE);
 552        }
 553
 554        if (hret != H_SUCCESS) {
 555                pr_err("destroy QP failed\n");
 556                return -EIO;
 557        }
 558
 559        return 0;
 560}
 561
 562static inline int ehea_calc_index(unsigned long i, unsigned long s)
 563{
 564        return (i >> s) & EHEA_INDEX_MASK;
 565}
 566
 567static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
 568                                     int dir)
 569{
 570        if (!ehea_top_bmap->dir[dir]) {
 571                ehea_top_bmap->dir[dir] =
 572                        kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
 573                if (!ehea_top_bmap->dir[dir])
 574                        return -ENOMEM;
 575        }
 576        return 0;
 577}
 578
 579static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
 580{
 581        if (!ehea_bmap->top[top]) {
 582                ehea_bmap->top[top] =
 583                        kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
 584                if (!ehea_bmap->top[top])
 585                        return -ENOMEM;
 586        }
 587        return ehea_init_top_bmap(ehea_bmap->top[top], dir);
 588}
 589
 590static DEFINE_MUTEX(ehea_busmap_mutex);
 591static unsigned long ehea_mr_len;
 592
 593#define EHEA_BUSMAP_ADD_SECT 1
 594#define EHEA_BUSMAP_REM_SECT 0
 595
 596static void ehea_rebuild_busmap(void)
 597{
 598        u64 vaddr = EHEA_BUSMAP_START;
 599        int top, dir, idx;
 600
 601        for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
 602                struct ehea_top_bmap *ehea_top;
 603                int valid_dir_entries = 0;
 604
 605                if (!ehea_bmap->top[top])
 606                        continue;
 607                ehea_top = ehea_bmap->top[top];
 608                for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
 609                        struct ehea_dir_bmap *ehea_dir;
 610                        int valid_entries = 0;
 611
 612                        if (!ehea_top->dir[dir])
 613                                continue;
 614                        valid_dir_entries++;
 615                        ehea_dir = ehea_top->dir[dir];
 616                        for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
 617                                if (!ehea_dir->ent[idx])
 618                                        continue;
 619                                valid_entries++;
 620                                ehea_dir->ent[idx] = vaddr;
 621                                vaddr += EHEA_SECTSIZE;
 622                        }
 623                        if (!valid_entries) {
 624                                ehea_top->dir[dir] = NULL;
 625                                kfree(ehea_dir);
 626                        }
 627                }
 628                if (!valid_dir_entries) {
 629                        ehea_bmap->top[top] = NULL;
 630                        kfree(ehea_top);
 631                }
 632        }
 633}
 634
 635static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
 636{
 637        unsigned long i, start_section, end_section;
 638
 639        if (!nr_pages)
 640                return 0;
 641
 642        if (!ehea_bmap) {
 643                ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
 644                if (!ehea_bmap)
 645                        return -ENOMEM;
 646        }
 647
 648        start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
 649        end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
 650        /* Mark entries as valid or invalid only; address is assigned later */
 651        for (i = start_section; i < end_section; i++) {
 652                u64 flag;
 653                int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
 654                int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
 655                int idx = i & EHEA_INDEX_MASK;
 656
 657                if (add) {
 658                        int ret = ehea_init_bmap(ehea_bmap, top, dir);
 659                        if (ret)
 660                                return ret;
 661                        flag = 1; /* valid */
 662                        ehea_mr_len += EHEA_SECTSIZE;
 663                } else {
 664                        if (!ehea_bmap->top[top])
 665                                continue;
 666                        if (!ehea_bmap->top[top]->dir[dir])
 667                                continue;
 668                        flag = 0; /* invalid */
 669                        ehea_mr_len -= EHEA_SECTSIZE;
 670                }
 671
 672                ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
 673        }
 674        ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
 675        return 0;
 676}
 677
 678int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
 679{
 680        int ret;
 681
 682        mutex_lock(&ehea_busmap_mutex);
 683        ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
 684        mutex_unlock(&ehea_busmap_mutex);
 685        return ret;
 686}
 687
 688int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
 689{
 690        int ret;
 691
 692        mutex_lock(&ehea_busmap_mutex);
 693        ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
 694        mutex_unlock(&ehea_busmap_mutex);
 695        return ret;
 696}
 697
 698static int ehea_is_hugepage(unsigned long pfn)
 699{
 700        int page_order;
 701
 702        if (pfn & EHEA_HUGEPAGE_PFN_MASK)
 703                return 0;
 704
 705        page_order = compound_order(pfn_to_page(pfn));
 706        if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
 707                return 0;
 708
 709        return 1;
 710}
 711
 712static int ehea_create_busmap_callback(unsigned long initial_pfn,
 713                                       unsigned long total_nr_pages, void *arg)
 714{
 715        int ret;
 716        unsigned long pfn, start_pfn, end_pfn, nr_pages;
 717
 718        if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
 719                return ehea_update_busmap(initial_pfn, total_nr_pages,
 720                                          EHEA_BUSMAP_ADD_SECT);
 721
 722        /* Given chunk is >= 16GB -> check for hugepages */
 723        start_pfn = initial_pfn;
 724        end_pfn = initial_pfn + total_nr_pages;
 725        pfn = start_pfn;
 726
 727        while (pfn < end_pfn) {
 728                if (ehea_is_hugepage(pfn)) {
 729                        /* Add mem found in front of the hugepage */
 730                        nr_pages = pfn - start_pfn;
 731                        ret = ehea_update_busmap(start_pfn, nr_pages,
 732                                                 EHEA_BUSMAP_ADD_SECT);
 733                        if (ret)
 734                                return ret;
 735
 736                        /* Skip the hugepage */
 737                        pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
 738                        start_pfn = pfn;
 739                } else
 740                        pfn += (EHEA_SECTSIZE / PAGE_SIZE);
 741        }
 742
 743        /* Add mem found behind the hugepage(s)  */
 744        nr_pages = pfn - start_pfn;
 745        return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
 746}
 747
 748int ehea_create_busmap(void)
 749{
 750        int ret;
 751
 752        mutex_lock(&ehea_busmap_mutex);
 753        ehea_mr_len = 0;
 754        ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
 755                                   ehea_create_busmap_callback);
 756        mutex_unlock(&ehea_busmap_mutex);
 757        return ret;
 758}
 759
 760void ehea_destroy_busmap(void)
 761{
 762        int top, dir;
 763        mutex_lock(&ehea_busmap_mutex);
 764        if (!ehea_bmap)
 765                goto out_destroy;
 766
 767        for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
 768                if (!ehea_bmap->top[top])
 769                        continue;
 770
 771                for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
 772                        if (!ehea_bmap->top[top]->dir[dir])
 773                                continue;
 774
 775                        kfree(ehea_bmap->top[top]->dir[dir]);
 776                }
 777
 778                kfree(ehea_bmap->top[top]);
 779        }
 780
 781        kfree(ehea_bmap);
 782        ehea_bmap = NULL;
 783out_destroy:
 784        mutex_unlock(&ehea_busmap_mutex);
 785}
 786
 787u64 ehea_map_vaddr(void *caddr)
 788{
 789        int top, dir, idx;
 790        unsigned long index, offset;
 791
 792        if (!ehea_bmap)
 793                return EHEA_INVAL_ADDR;
 794
 795        index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
 796        top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
 797        if (!ehea_bmap->top[top])
 798                return EHEA_INVAL_ADDR;
 799
 800        dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
 801        if (!ehea_bmap->top[top]->dir[dir])
 802                return EHEA_INVAL_ADDR;
 803
 804        idx = index & EHEA_INDEX_MASK;
 805        if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
 806                return EHEA_INVAL_ADDR;
 807
 808        offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
 809        return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
 810}
 811
 812static inline void *ehea_calc_sectbase(int top, int dir, int idx)
 813{
 814        unsigned long ret = idx;
 815        ret |= dir << EHEA_DIR_INDEX_SHIFT;
 816        ret |= top << EHEA_TOP_INDEX_SHIFT;
 817        return abs_to_virt(ret << SECTION_SIZE_BITS);
 818}
 819
 820static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
 821                               struct ehea_adapter *adapter,
 822                               struct ehea_mr *mr)
 823{
 824        void *pg;
 825        u64 j, m, hret;
 826        unsigned long k = 0;
 827        u64 pt_abs = virt_to_abs(pt);
 828
 829        void *sectbase = ehea_calc_sectbase(top, dir, idx);
 830
 831        for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
 832
 833                for (m = 0; m < EHEA_MAX_RPAGE; m++) {
 834                        pg = sectbase + ((k++) * EHEA_PAGESIZE);
 835                        pt[m] = virt_to_abs(pg);
 836                }
 837                hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
 838                                                0, pt_abs, EHEA_MAX_RPAGE);
 839
 840                if ((hret != H_SUCCESS) &&
 841                    (hret != H_PAGE_REGISTERED)) {
 842                        ehea_h_free_resource(adapter->handle, mr->handle,
 843                                             FORCE_FREE);
 844                        pr_err("register_rpage_mr failed\n");
 845                        return hret;
 846                }
 847        }
 848        return hret;
 849}
 850
 851static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
 852                                struct ehea_adapter *adapter,
 853                                struct ehea_mr *mr)
 854{
 855        u64 hret = H_SUCCESS;
 856        int idx;
 857
 858        for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
 859                if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
 860                        continue;
 861
 862                hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
 863                if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
 864                        return hret;
 865        }
 866        return hret;
 867}
 868
 869static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
 870                                    struct ehea_adapter *adapter,
 871                                    struct ehea_mr *mr)
 872{
 873        u64 hret = H_SUCCESS;
 874        int dir;
 875
 876        for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
 877                if (!ehea_bmap->top[top]->dir[dir])
 878                        continue;
 879
 880                hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
 881                if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
 882                        return hret;
 883        }
 884        return hret;
 885}
 886
 887int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
 888{
 889        int ret;
 890        u64 *pt;
 891        u64 hret;
 892        u32 acc_ctrl = EHEA_MR_ACC_CTRL;
 893
 894        unsigned long top;
 895
 896        pt = (void *)get_zeroed_page(GFP_KERNEL);
 897        if (!pt) {
 898                pr_err("no mem\n");
 899                ret = -ENOMEM;
 900                goto out;
 901        }
 902
 903        hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
 904                                        ehea_mr_len, acc_ctrl, adapter->pd,
 905                                        &mr->handle, &mr->lkey);
 906
 907        if (hret != H_SUCCESS) {
 908                pr_err("alloc_resource_mr failed\n");
 909                ret = -EIO;
 910                goto out;
 911        }
 912
 913        if (!ehea_bmap) {
 914                ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
 915                pr_err("no busmap available\n");
 916                ret = -EIO;
 917                goto out;
 918        }
 919
 920        for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
 921                if (!ehea_bmap->top[top])
 922                        continue;
 923
 924                hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
 925                if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
 926                        break;
 927        }
 928
 929        if (hret != H_SUCCESS) {
 930                ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
 931                pr_err("registering mr failed\n");
 932                ret = -EIO;
 933                goto out;
 934        }
 935
 936        mr->vaddr = EHEA_BUSMAP_START;
 937        mr->adapter = adapter;
 938        ret = 0;
 939out:
 940        free_page((unsigned long)pt);
 941        return ret;
 942}
 943
 944int ehea_rem_mr(struct ehea_mr *mr)
 945{
 946        u64 hret;
 947
 948        if (!mr || !mr->adapter)
 949                return -EINVAL;
 950
 951        hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
 952                                    FORCE_FREE);
 953        if (hret != H_SUCCESS) {
 954                pr_err("destroy MR failed\n");
 955                return -EIO;
 956        }
 957
 958        return 0;
 959}
 960
 961int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
 962                 struct ehea_mr *shared_mr)
 963{
 964        u64 hret;
 965
 966        hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
 967                                   old_mr->vaddr, EHEA_MR_ACC_CTRL,
 968                                   adapter->pd, shared_mr);
 969        if (hret != H_SUCCESS)
 970                return -EIO;
 971
 972        shared_mr->adapter = adapter;
 973
 974        return 0;
 975}
 976
 977static void print_error_data(u64 *data)
 978{
 979        int length;
 980        u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
 981        u64 resource = data[1];
 982
 983        length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
 984
 985        if (length > EHEA_PAGESIZE)
 986                length = EHEA_PAGESIZE;
 987
 988        if (type == EHEA_AER_RESTYPE_QP)
 989                pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
 990                       resource, data[6], data[12], data[22]);
 991        else if (type == EHEA_AER_RESTYPE_CQ)
 992                pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
 993                       resource, data[6]);
 994        else if (type == EHEA_AER_RESTYPE_EQ)
 995                pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
 996                       resource, data[6]);
 997
 998        ehea_dump(data, length, "error data");
 999}
1000
1001u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1002                    u64 *aer, u64 *aerr)
1003{
1004        unsigned long ret;
1005        u64 *rblock;
1006        u64 type = 0;
1007
1008        rblock = (void *)get_zeroed_page(GFP_KERNEL);
1009        if (!rblock) {
1010                pr_err("Cannot allocate rblock memory\n");
1011                goto out;
1012        }
1013
1014        ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1015
1016        if (ret == H_SUCCESS) {
1017                type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1018                *aer = rblock[6];
1019                *aerr = rblock[12];
1020                print_error_data(rblock);
1021        } else if (ret == H_R_STATE) {
1022                pr_err("No error data available: %llX\n", res_handle);
1023        } else
1024                pr_err("Error data could not be fetched: %llX\n", res_handle);
1025
1026        free_page((unsigned long)rblock);
1027out:
1028        return type;
1029}
1030