linux/drivers/net/ehea/ehea_qmr.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/net/ehea/ehea_qmr.c
   3 *
   4 *  eHEA ethernet device driver for IBM eServer System p
   5 *
   6 *  (C) Copyright IBM Corp. 2006
   7 *
   8 *  Authors:
   9 *       Christoph Raisch <raisch@de.ibm.com>
  10 *       Jan-Bernd Themann <themann@de.ibm.com>
  11 *       Thomas Klein <tklein@de.ibm.com>
  12 *
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2, or (at your option)
  17 * any later version.
  18 *
  19 * This program is distributed in the hope that it will be useful,
  20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  22 * GNU General Public License for more details.
  23 *
  24 * You should have received a copy of the GNU General Public License
  25 * along with this program; if not, write to the Free Software
  26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/mm.h>
  32#include <linux/slab.h>
  33#include "ehea.h"
  34#include "ehea_phyp.h"
  35#include "ehea_qmr.h"
  36
  37struct ehea_bmap *ehea_bmap = NULL;
  38
  39
  40
  41static void *hw_qpageit_get_inc(struct hw_queue *queue)
  42{
  43        void *retvalue = hw_qeit_get(queue);
  44
  45        queue->current_q_offset += queue->pagesize;
  46        if (queue->current_q_offset > queue->queue_length) {
  47                queue->current_q_offset -= queue->pagesize;
  48                retvalue = NULL;
  49        } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
  50                pr_err("not on pageboundary\n");
  51                retvalue = NULL;
  52        }
  53        return retvalue;
  54}
  55
  56static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
  57                          const u32 pagesize, const u32 qe_size)
  58{
  59        int pages_per_kpage = PAGE_SIZE / pagesize;
  60        int i, k;
  61
  62        if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
  63                pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
  64                       (int)PAGE_SIZE, (int)pagesize);
  65                return -EINVAL;
  66        }
  67
  68        queue->queue_length = nr_of_pages * pagesize;
  69        queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
  70        if (!queue->queue_pages) {
  71                pr_err("no mem for queue_pages\n");
  72                return -ENOMEM;
  73        }
  74
  75        /*
  76         * allocate pages for queue:
  77         * outer loop allocates whole kernel pages (page aligned) and
  78         * inner loop divides a kernel page into smaller hea queue pages
  79         */
  80        i = 0;
  81        while (i < nr_of_pages) {
  82                u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
  83                if (!kpage)
  84                        goto out_nomem;
  85                for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
  86                        (queue->queue_pages)[i] = (struct ehea_page *)kpage;
  87                        kpage += pagesize;
  88                        i++;
  89                }
  90        }
  91
  92        queue->current_q_offset = 0;
  93        queue->qe_size = qe_size;
  94        queue->pagesize = pagesize;
  95        queue->toggle_state = 1;
  96
  97        return 0;
  98out_nomem:
  99        for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
 100                if (!(queue->queue_pages)[i])
 101                        break;
 102                free_page((unsigned long)(queue->queue_pages)[i]);
 103        }
 104        return -ENOMEM;
 105}
 106
 107static void hw_queue_dtor(struct hw_queue *queue)
 108{
 109        int pages_per_kpage = PAGE_SIZE / queue->pagesize;
 110        int i, nr_pages;
 111
 112        if (!queue || !queue->queue_pages)
 113                return;
 114
 115        nr_pages = queue->queue_length / queue->pagesize;
 116
 117        for (i = 0; i < nr_pages; i += pages_per_kpage)
 118                free_page((unsigned long)(queue->queue_pages)[i]);
 119
 120        kfree(queue->queue_pages);
 121}
 122
 123struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
 124                               int nr_of_cqe, u64 eq_handle, u32 cq_token)
 125{
 126        struct ehea_cq *cq;
 127        struct h_epa epa;
 128        u64 *cq_handle_ref, hret, rpage;
 129        u32 act_nr_of_entries, act_pages, counter;
 130        int ret;
 131        void *vpage;
 132
 133        cq = kzalloc(sizeof(*cq), GFP_KERNEL);
 134        if (!cq) {
 135                pr_err("no mem for cq\n");
 136                goto out_nomem;
 137        }
 138
 139        cq->attr.max_nr_of_cqes = nr_of_cqe;
 140        cq->attr.cq_token = cq_token;
 141        cq->attr.eq_handle = eq_handle;
 142
 143        cq->adapter = adapter;
 144
 145        cq_handle_ref = &cq->fw_handle;
 146        act_nr_of_entries = 0;
 147        act_pages = 0;
 148
 149        hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
 150                                        &cq->fw_handle, &cq->epas);
 151        if (hret != H_SUCCESS) {
 152                pr_err("alloc_resource_cq failed\n");
 153                goto out_freemem;
 154        }
 155
 156        ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
 157                            EHEA_PAGESIZE, sizeof(struct ehea_cqe));
 158        if (ret)
 159                goto out_freeres;
 160
 161        for (counter = 0; counter < cq->attr.nr_pages; counter++) {
 162                vpage = hw_qpageit_get_inc(&cq->hw_queue);
 163                if (!vpage) {
 164                        pr_err("hw_qpageit_get_inc failed\n");
 165                        goto out_kill_hwq;
 166                }
 167
 168                rpage = virt_to_abs(vpage);
 169                hret = ehea_h_register_rpage(adapter->handle,
 170                                             0, EHEA_CQ_REGISTER_ORIG,
 171                                             cq->fw_handle, rpage, 1);
 172                if (hret < H_SUCCESS) {
 173                        pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
 174                               cq, hret, counter, cq->attr.nr_pages);
 175                        goto out_kill_hwq;
 176                }
 177
 178                if (counter == (cq->attr.nr_pages - 1)) {
 179                        vpage = hw_qpageit_get_inc(&cq->hw_queue);
 180
 181                        if ((hret != H_SUCCESS) || (vpage)) {
 182                                pr_err("registration of pages not complete hret=%llx\n",
 183                                       hret);
 184                                goto out_kill_hwq;
 185                        }
 186                } else {
 187                        if (hret != H_PAGE_REGISTERED) {
 188                                pr_err("CQ: registration of page failed hret=%llx\n",
 189                                       hret);
 190                                goto out_kill_hwq;
 191                        }
 192                }
 193        }
 194
 195        hw_qeit_reset(&cq->hw_queue);
 196        epa = cq->epas.kernel;
 197        ehea_reset_cq_ep(cq);
 198        ehea_reset_cq_n1(cq);
 199
 200        return cq;
 201
 202out_kill_hwq:
 203        hw_queue_dtor(&cq->hw_queue);
 204
 205out_freeres:
 206        ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
 207
 208out_freemem:
 209        kfree(cq);
 210
 211out_nomem:
 212        return NULL;
 213}
 214
 215u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
 216{
 217        u64 hret;
 218        u64 adapter_handle = cq->adapter->handle;
 219
 220        /* deregister all previous registered pages */
 221        hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
 222        if (hret != H_SUCCESS)
 223                return hret;
 224
 225        hw_queue_dtor(&cq->hw_queue);
 226        kfree(cq);
 227
 228        return hret;
 229}
 230
 231int ehea_destroy_cq(struct ehea_cq *cq)
 232{
 233        u64 hret, aer, aerr;
 234        if (!cq)
 235                return 0;
 236
 237        hcp_epas_dtor(&cq->epas);
 238        hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
 239        if (hret == H_R_STATE) {
 240                ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
 241                hret = ehea_destroy_cq_res(cq, FORCE_FREE);
 242        }
 243
 244        if (hret != H_SUCCESS) {
 245                pr_err("destroy CQ failed\n");
 246                return -EIO;
 247        }
 248
 249        return 0;
 250}
 251
 252struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
 253                               const enum ehea_eq_type type,
 254                               const u32 max_nr_of_eqes, const u8 eqe_gen)
 255{
 256        int ret, i;
 257        u64 hret, rpage;
 258        void *vpage;
 259        struct ehea_eq *eq;
 260
 261        eq = kzalloc(sizeof(*eq), GFP_KERNEL);
 262        if (!eq) {
 263                pr_err("no mem for eq\n");
 264                return NULL;
 265        }
 266
 267        eq->adapter = adapter;
 268        eq->attr.type = type;
 269        eq->attr.max_nr_of_eqes = max_nr_of_eqes;
 270        eq->attr.eqe_gen = eqe_gen;
 271        spin_lock_init(&eq->spinlock);
 272
 273        hret = ehea_h_alloc_resource_eq(adapter->handle,
 274                                        &eq->attr, &eq->fw_handle);
 275        if (hret != H_SUCCESS) {
 276                pr_err("alloc_resource_eq failed\n");
 277                goto out_freemem;
 278        }
 279
 280        ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
 281                            EHEA_PAGESIZE, sizeof(struct ehea_eqe));
 282        if (ret) {
 283                pr_err("can't allocate eq pages\n");
 284                goto out_freeres;
 285        }
 286
 287        for (i = 0; i < eq->attr.nr_pages; i++) {
 288                vpage = hw_qpageit_get_inc(&eq->hw_queue);
 289                if (!vpage) {
 290                        pr_err("hw_qpageit_get_inc failed\n");
 291                        hret = H_RESOURCE;
 292                        goto out_kill_hwq;
 293                }
 294
 295                rpage = virt_to_abs(vpage);
 296
 297                hret = ehea_h_register_rpage(adapter->handle, 0,
 298                                             EHEA_EQ_REGISTER_ORIG,
 299                                             eq->fw_handle, rpage, 1);
 300
 301                if (i == (eq->attr.nr_pages - 1)) {
 302                        /* last page */
 303                        vpage = hw_qpageit_get_inc(&eq->hw_queue);
 304                        if ((hret != H_SUCCESS) || (vpage))
 305                                goto out_kill_hwq;
 306
 307                } else {
 308                        if (hret != H_PAGE_REGISTERED)
 309                                goto out_kill_hwq;
 310
 311                }
 312        }
 313
 314        hw_qeit_reset(&eq->hw_queue);
 315        return eq;
 316
 317out_kill_hwq:
 318        hw_queue_dtor(&eq->hw_queue);
 319
 320out_freeres:
 321        ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
 322
 323out_freemem:
 324        kfree(eq);
 325        return NULL;
 326}
 327
 328struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
 329{
 330        struct ehea_eqe *eqe;
 331        unsigned long flags;
 332
 333        spin_lock_irqsave(&eq->spinlock, flags);
 334        eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
 335        spin_unlock_irqrestore(&eq->spinlock, flags);
 336
 337        return eqe;
 338}
 339
 340u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
 341{
 342        u64 hret;
 343        unsigned long flags;
 344
 345        spin_lock_irqsave(&eq->spinlock, flags);
 346
 347        hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
 348        spin_unlock_irqrestore(&eq->spinlock, flags);
 349
 350        if (hret != H_SUCCESS)
 351                return hret;
 352
 353        hw_queue_dtor(&eq->hw_queue);
 354        kfree(eq);
 355
 356        return hret;
 357}
 358
 359int ehea_destroy_eq(struct ehea_eq *eq)
 360{
 361        u64 hret, aer, aerr;
 362        if (!eq)
 363                return 0;
 364
 365        hcp_epas_dtor(&eq->epas);
 366
 367        hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
 368        if (hret == H_R_STATE) {
 369                ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
 370                hret = ehea_destroy_eq_res(eq, FORCE_FREE);
 371        }
 372
 373        if (hret != H_SUCCESS) {
 374                pr_err("destroy EQ failed\n");
 375                return -EIO;
 376        }
 377
 378        return 0;
 379}
 380
 381/**
 382 * allocates memory for a queue and registers pages in phyp
 383 */
 384int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
 385                           int nr_pages, int wqe_size, int act_nr_sges,
 386                           struct ehea_adapter *adapter, int h_call_q_selector)
 387{
 388        u64 hret, rpage;
 389        int ret, cnt;
 390        void *vpage;
 391
 392        ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
 393        if (ret)
 394                return ret;
 395
 396        for (cnt = 0; cnt < nr_pages; cnt++) {
 397                vpage = hw_qpageit_get_inc(hw_queue);
 398                if (!vpage) {
 399                        pr_err("hw_qpageit_get_inc failed\n");
 400                        goto out_kill_hwq;
 401                }
 402                rpage = virt_to_abs(vpage);
 403                hret = ehea_h_register_rpage(adapter->handle,
 404                                             0, h_call_q_selector,
 405                                             qp->fw_handle, rpage, 1);
 406                if (hret < H_SUCCESS) {
 407                        pr_err("register_rpage_qp failed\n");
 408                        goto out_kill_hwq;
 409                }
 410        }
 411        hw_qeit_reset(hw_queue);
 412        return 0;
 413
 414out_kill_hwq:
 415        hw_queue_dtor(hw_queue);
 416        return -EIO;
 417}
 418
 419static inline u32 map_wqe_size(u8 wqe_enc_size)
 420{
 421        return 128 << wqe_enc_size;
 422}
 423
 424struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
 425                               u32 pd, struct ehea_qp_init_attr *init_attr)
 426{
 427        int ret;
 428        u64 hret;
 429        struct ehea_qp *qp;
 430        u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
 431        u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
 432
 433
 434        qp = kzalloc(sizeof(*qp), GFP_KERNEL);
 435        if (!qp) {
 436                pr_err("no mem for qp\n");
 437                return NULL;
 438        }
 439
 440        qp->adapter = adapter;
 441
 442        hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
 443                                        &qp->fw_handle, &qp->epas);
 444        if (hret != H_SUCCESS) {
 445                pr_err("ehea_h_alloc_resource_qp failed\n");
 446                goto out_freemem;
 447        }
 448
 449        wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
 450        wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
 451        wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
 452        wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
 453
 454        ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
 455                                     wqe_size_in_bytes_sq,
 456                                     init_attr->act_wqe_size_enc_sq, adapter,
 457                                     0);
 458        if (ret) {
 459                pr_err("can't register for sq ret=%x\n", ret);
 460                goto out_freeres;
 461        }
 462
 463        ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
 464                                     init_attr->nr_rq1_pages,
 465                                     wqe_size_in_bytes_rq1,
 466                                     init_attr->act_wqe_size_enc_rq1,
 467                                     adapter, 1);
 468        if (ret) {
 469                pr_err("can't register for rq1 ret=%x\n", ret);
 470                goto out_kill_hwsq;
 471        }
 472
 473        if (init_attr->rq_count > 1) {
 474                ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
 475                                             init_attr->nr_rq2_pages,
 476                                             wqe_size_in_bytes_rq2,
 477                                             init_attr->act_wqe_size_enc_rq2,
 478                                             adapter, 2);
 479                if (ret) {
 480                        pr_err("can't register for rq2 ret=%x\n", ret);
 481                        goto out_kill_hwr1q;
 482                }
 483        }
 484
 485        if (init_attr->rq_count > 2) {
 486                ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
 487                                             init_attr->nr_rq3_pages,
 488                                             wqe_size_in_bytes_rq3,
 489                                             init_attr->act_wqe_size_enc_rq3,
 490                                             adapter, 3);
 491                if (ret) {
 492                        pr_err("can't register for rq3 ret=%x\n", ret);
 493                        goto out_kill_hwr2q;
 494                }
 495        }
 496
 497        qp->init_attr = *init_attr;
 498
 499        return qp;
 500
 501out_kill_hwr2q:
 502        hw_queue_dtor(&qp->hw_rqueue2);
 503
 504out_kill_hwr1q:
 505        hw_queue_dtor(&qp->hw_rqueue1);
 506
 507out_kill_hwsq:
 508        hw_queue_dtor(&qp->hw_squeue);
 509
 510out_freeres:
 511        ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
 512        ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
 513
 514out_freemem:
 515        kfree(qp);
 516        return NULL;
 517}
 518
 519u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
 520{
 521        u64 hret;
 522        struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
 523
 524
 525        ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
 526        hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
 527        if (hret != H_SUCCESS)
 528                return hret;
 529
 530        hw_queue_dtor(&qp->hw_squeue);
 531        hw_queue_dtor(&qp->hw_rqueue1);
 532
 533        if (qp_attr->rq_count > 1)
 534                hw_queue_dtor(&qp->hw_rqueue2);
 535        if (qp_attr->rq_count > 2)
 536                hw_queue_dtor(&qp->hw_rqueue3);
 537        kfree(qp);
 538
 539        return hret;
 540}
 541
 542int ehea_destroy_qp(struct ehea_qp *qp)
 543{
 544        u64 hret, aer, aerr;
 545        if (!qp)
 546                return 0;
 547
 548        hcp_epas_dtor(&qp->epas);
 549
 550        hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
 551        if (hret == H_R_STATE) {
 552                ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
 553                hret = ehea_destroy_qp_res(qp, FORCE_FREE);
 554        }
 555
 556        if (hret != H_SUCCESS) {
 557                pr_err("destroy QP failed\n");
 558                return -EIO;
 559        }
 560
 561        return 0;
 562}
 563
 564static inline int ehea_calc_index(unsigned long i, unsigned long s)
 565{
 566        return (i >> s) & EHEA_INDEX_MASK;
 567}
 568
 569static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
 570                                     int dir)
 571{
 572        if (!ehea_top_bmap->dir[dir]) {
 573                ehea_top_bmap->dir[dir] =
 574                        kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
 575                if (!ehea_top_bmap->dir[dir])
 576                        return -ENOMEM;
 577        }
 578        return 0;
 579}
 580
 581static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
 582{
 583        if (!ehea_bmap->top[top]) {
 584                ehea_bmap->top[top] =
 585                        kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
 586                if (!ehea_bmap->top[top])
 587                        return -ENOMEM;
 588        }
 589        return ehea_init_top_bmap(ehea_bmap->top[top], dir);
 590}
 591
 592static DEFINE_MUTEX(ehea_busmap_mutex);
 593static unsigned long ehea_mr_len;
 594
 595#define EHEA_BUSMAP_ADD_SECT 1
 596#define EHEA_BUSMAP_REM_SECT 0
 597
 598static void ehea_rebuild_busmap(void)
 599{
 600        u64 vaddr = EHEA_BUSMAP_START;
 601        int top, dir, idx;
 602
 603        for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
 604                struct ehea_top_bmap *ehea_top;
 605                int valid_dir_entries = 0;
 606
 607                if (!ehea_bmap->top[top])
 608                        continue;
 609                ehea_top = ehea_bmap->top[top];
 610                for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
 611                        struct ehea_dir_bmap *ehea_dir;
 612                        int valid_entries = 0;
 613
 614                        if (!ehea_top->dir[dir])
 615                                continue;
 616                        valid_dir_entries++;
 617                        ehea_dir = ehea_top->dir[dir];
 618                        for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
 619                                if (!ehea_dir->ent[idx])
 620                                        continue;
 621                                valid_entries++;
 622                                ehea_dir->ent[idx] = vaddr;
 623                                vaddr += EHEA_SECTSIZE;
 624                        }
 625                        if (!valid_entries) {
 626                                ehea_top->dir[dir] = NULL;
 627                                kfree(ehea_dir);
 628                        }
 629                }
 630                if (!valid_dir_entries) {
 631                        ehea_bmap->top[top] = NULL;
 632                        kfree(ehea_top);
 633                }
 634        }
 635}
 636
 637static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
 638{
 639        unsigned long i, start_section, end_section;
 640
 641        if (!nr_pages)
 642                return 0;
 643
 644        if (!ehea_bmap) {
 645                ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
 646                if (!ehea_bmap)
 647                        return -ENOMEM;
 648        }
 649
 650        start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
 651        end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
 652        /* Mark entries as valid or invalid only; address is assigned later */
 653        for (i = start_section; i < end_section; i++) {
 654                u64 flag;
 655                int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
 656                int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
 657                int idx = i & EHEA_INDEX_MASK;
 658
 659                if (add) {
 660                        int ret = ehea_init_bmap(ehea_bmap, top, dir);
 661                        if (ret)
 662                                return ret;
 663                        flag = 1; /* valid */
 664                        ehea_mr_len += EHEA_SECTSIZE;
 665                } else {
 666                        if (!ehea_bmap->top[top])
 667                                continue;
 668                        if (!ehea_bmap->top[top]->dir[dir])
 669                                continue;
 670                        flag = 0; /* invalid */
 671                        ehea_mr_len -= EHEA_SECTSIZE;
 672                }
 673
 674                ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
 675        }
 676        ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
 677        return 0;
 678}
 679
 680int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
 681{
 682        int ret;
 683
 684        mutex_lock(&ehea_busmap_mutex);
 685        ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
 686        mutex_unlock(&ehea_busmap_mutex);
 687        return ret;
 688}
 689
 690int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
 691{
 692        int ret;
 693
 694        mutex_lock(&ehea_busmap_mutex);
 695        ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
 696        mutex_unlock(&ehea_busmap_mutex);
 697        return ret;
 698}
 699
 700static int ehea_is_hugepage(unsigned long pfn)
 701{
 702        int page_order;
 703
 704        if (pfn & EHEA_HUGEPAGE_PFN_MASK)
 705                return 0;
 706
 707        page_order = compound_order(pfn_to_page(pfn));
 708        if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
 709                return 0;
 710
 711        return 1;
 712}
 713
 714static int ehea_create_busmap_callback(unsigned long initial_pfn,
 715                                       unsigned long total_nr_pages, void *arg)
 716{
 717        int ret;
 718        unsigned long pfn, start_pfn, end_pfn, nr_pages;
 719
 720        if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
 721                return ehea_update_busmap(initial_pfn, total_nr_pages,
 722                                          EHEA_BUSMAP_ADD_SECT);
 723
 724        /* Given chunk is >= 16GB -> check for hugepages */
 725        start_pfn = initial_pfn;
 726        end_pfn = initial_pfn + total_nr_pages;
 727        pfn = start_pfn;
 728
 729        while (pfn < end_pfn) {
 730                if (ehea_is_hugepage(pfn)) {
 731                        /* Add mem found in front of the hugepage */
 732                        nr_pages = pfn - start_pfn;
 733                        ret = ehea_update_busmap(start_pfn, nr_pages,
 734                                                 EHEA_BUSMAP_ADD_SECT);
 735                        if (ret)
 736                                return ret;
 737
 738                        /* Skip the hugepage */
 739                        pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
 740                        start_pfn = pfn;
 741                } else
 742                        pfn += (EHEA_SECTSIZE / PAGE_SIZE);
 743        }
 744
 745        /* Add mem found behind the hugepage(s)  */
 746        nr_pages = pfn - start_pfn;
 747        return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
 748}
 749
 750int ehea_create_busmap(void)
 751{
 752        int ret;
 753
 754        mutex_lock(&ehea_busmap_mutex);
 755        ehea_mr_len = 0;
 756        ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
 757                                   ehea_create_busmap_callback);
 758        mutex_unlock(&ehea_busmap_mutex);
 759        return ret;
 760}
 761
 762void ehea_destroy_busmap(void)
 763{
 764        int top, dir;
 765        mutex_lock(&ehea_busmap_mutex);
 766        if (!ehea_bmap)
 767                goto out_destroy;
 768
 769        for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
 770                if (!ehea_bmap->top[top])
 771                        continue;
 772
 773                for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
 774                        if (!ehea_bmap->top[top]->dir[dir])
 775                                continue;
 776
 777                        kfree(ehea_bmap->top[top]->dir[dir]);
 778                }
 779
 780                kfree(ehea_bmap->top[top]);
 781        }
 782
 783        kfree(ehea_bmap);
 784        ehea_bmap = NULL;
 785out_destroy:
 786        mutex_unlock(&ehea_busmap_mutex);
 787}
 788
 789u64 ehea_map_vaddr(void *caddr)
 790{
 791        int top, dir, idx;
 792        unsigned long index, offset;
 793
 794        if (!ehea_bmap)
 795                return EHEA_INVAL_ADDR;
 796
 797        index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
 798        top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
 799        if (!ehea_bmap->top[top])
 800                return EHEA_INVAL_ADDR;
 801
 802        dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
 803        if (!ehea_bmap->top[top]->dir[dir])
 804                return EHEA_INVAL_ADDR;
 805
 806        idx = index & EHEA_INDEX_MASK;
 807        if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
 808                return EHEA_INVAL_ADDR;
 809
 810        offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
 811        return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
 812}
 813
 814static inline void *ehea_calc_sectbase(int top, int dir, int idx)
 815{
 816        unsigned long ret = idx;
 817        ret |= dir << EHEA_DIR_INDEX_SHIFT;
 818        ret |= top << EHEA_TOP_INDEX_SHIFT;
 819        return abs_to_virt(ret << SECTION_SIZE_BITS);
 820}
 821
 822static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
 823                               struct ehea_adapter *adapter,
 824                               struct ehea_mr *mr)
 825{
 826        void *pg;
 827        u64 j, m, hret;
 828        unsigned long k = 0;
 829        u64 pt_abs = virt_to_abs(pt);
 830
 831        void *sectbase = ehea_calc_sectbase(top, dir, idx);
 832
 833        for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
 834
 835                for (m = 0; m < EHEA_MAX_RPAGE; m++) {
 836                        pg = sectbase + ((k++) * EHEA_PAGESIZE);
 837                        pt[m] = virt_to_abs(pg);
 838                }
 839                hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
 840                                                0, pt_abs, EHEA_MAX_RPAGE);
 841
 842                if ((hret != H_SUCCESS) &&
 843                    (hret != H_PAGE_REGISTERED)) {
 844                        ehea_h_free_resource(adapter->handle, mr->handle,
 845                                             FORCE_FREE);
 846                        pr_err("register_rpage_mr failed\n");
 847                        return hret;
 848                }
 849        }
 850        return hret;
 851}
 852
 853static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
 854                                struct ehea_adapter *adapter,
 855                                struct ehea_mr *mr)
 856{
 857        u64 hret = H_SUCCESS;
 858        int idx;
 859
 860        for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
 861                if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
 862                        continue;
 863
 864                hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
 865                if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
 866                        return hret;
 867        }
 868        return hret;
 869}
 870
 871static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
 872                                    struct ehea_adapter *adapter,
 873                                    struct ehea_mr *mr)
 874{
 875        u64 hret = H_SUCCESS;
 876        int dir;
 877
 878        for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
 879                if (!ehea_bmap->top[top]->dir[dir])
 880                        continue;
 881
 882                hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
 883                if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
 884                        return hret;
 885        }
 886        return hret;
 887}
 888
 889int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
 890{
 891        int ret;
 892        u64 *pt;
 893        u64 hret;
 894        u32 acc_ctrl = EHEA_MR_ACC_CTRL;
 895
 896        unsigned long top;
 897
 898        pt = (void *)get_zeroed_page(GFP_KERNEL);
 899        if (!pt) {
 900                pr_err("no mem\n");
 901                ret = -ENOMEM;
 902                goto out;
 903        }
 904
 905        hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
 906                                        ehea_mr_len, acc_ctrl, adapter->pd,
 907                                        &mr->handle, &mr->lkey);
 908
 909        if (hret != H_SUCCESS) {
 910                pr_err("alloc_resource_mr failed\n");
 911                ret = -EIO;
 912                goto out;
 913        }
 914
 915        if (!ehea_bmap) {
 916                ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
 917                pr_err("no busmap available\n");
 918                ret = -EIO;
 919                goto out;
 920        }
 921
 922        for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
 923                if (!ehea_bmap->top[top])
 924                        continue;
 925
 926                hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
 927                if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
 928                        break;
 929        }
 930
 931        if (hret != H_SUCCESS) {
 932                ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
 933                pr_err("registering mr failed\n");
 934                ret = -EIO;
 935                goto out;
 936        }
 937
 938        mr->vaddr = EHEA_BUSMAP_START;
 939        mr->adapter = adapter;
 940        ret = 0;
 941out:
 942        free_page((unsigned long)pt);
 943        return ret;
 944}
 945
 946int ehea_rem_mr(struct ehea_mr *mr)
 947{
 948        u64 hret;
 949
 950        if (!mr || !mr->adapter)
 951                return -EINVAL;
 952
 953        hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
 954                                    FORCE_FREE);
 955        if (hret != H_SUCCESS) {
 956                pr_err("destroy MR failed\n");
 957                return -EIO;
 958        }
 959
 960        return 0;
 961}
 962
 963int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
 964                 struct ehea_mr *shared_mr)
 965{
 966        u64 hret;
 967
 968        hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
 969                                   old_mr->vaddr, EHEA_MR_ACC_CTRL,
 970                                   adapter->pd, shared_mr);
 971        if (hret != H_SUCCESS)
 972                return -EIO;
 973
 974        shared_mr->adapter = adapter;
 975
 976        return 0;
 977}
 978
 979void print_error_data(u64 *data)
 980{
 981        int length;
 982        u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
 983        u64 resource = data[1];
 984
 985        length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
 986
 987        if (length > EHEA_PAGESIZE)
 988                length = EHEA_PAGESIZE;
 989
 990        if (type == EHEA_AER_RESTYPE_QP)
 991                pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
 992                       resource, data[6], data[12], data[22]);
 993        else if (type == EHEA_AER_RESTYPE_CQ)
 994                pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
 995                       resource, data[6]);
 996        else if (type == EHEA_AER_RESTYPE_EQ)
 997                pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
 998                       resource, data[6]);
 999
1000        ehea_dump(data, length, "error data");
1001}
1002
1003u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1004                    u64 *aer, u64 *aerr)
1005{
1006        unsigned long ret;
1007        u64 *rblock;
1008        u64 type = 0;
1009
1010        rblock = (void *)get_zeroed_page(GFP_KERNEL);
1011        if (!rblock) {
1012                pr_err("Cannot allocate rblock memory\n");
1013                goto out;
1014        }
1015
1016        ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1017
1018        if (ret == H_SUCCESS) {
1019                type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1020                *aer = rblock[6];
1021                *aerr = rblock[12];
1022                print_error_data(rblock);
1023        } else if (ret == H_R_STATE) {
1024                pr_err("No error data available: %llX\n", res_handle);
1025        } else
1026                pr_err("Error data could not be fetched: %llX\n", res_handle);
1027
1028        free_page((unsigned long)rblock);
1029out:
1030        return type;
1031}
1032