linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
   3 *
   4 *  eHEA ethernet device driver for IBM eServer System p
   5 *
   6 *  (C) Copyright IBM Corp. 2006
   7 *
   8 *  Authors:
   9 *       Christoph Raisch <raisch@de.ibm.com>
  10 *       Jan-Bernd Themann <themann@de.ibm.com>
  11 *       Thomas Klein <tklein@de.ibm.com>
  12 *
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2, or (at your option)
  17 * any later version.
  18 *
  19 * This program is distributed in the hope that it will be useful,
  20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  22 * GNU General Public License for more details.
  23 *
  24 * You should have received a copy of the GNU General Public License
  25 * along with this program; if not, write to the Free Software
  26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/mm.h>
  32#include <linux/slab.h>
  33#include "ehea.h"
  34#include "ehea_phyp.h"
  35#include "ehea_qmr.h"
  36
  37static struct ehea_bmap *ehea_bmap;
  38
  39static void *hw_qpageit_get_inc(struct hw_queue *queue)
  40{
  41        void *retvalue = hw_qeit_get(queue);
  42
  43        queue->current_q_offset += queue->pagesize;
  44        if (queue->current_q_offset > queue->queue_length) {
  45                queue->current_q_offset -= queue->pagesize;
  46                retvalue = NULL;
  47        } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
  48                pr_err("not on pageboundary\n");
  49                retvalue = NULL;
  50        }
  51        return retvalue;
  52}
  53
  54static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
  55                          const u32 pagesize, const u32 qe_size)
  56{
  57        int pages_per_kpage = PAGE_SIZE / pagesize;
  58        int i, k;
  59
  60        if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
  61                pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
  62                       (int)PAGE_SIZE, (int)pagesize);
  63                return -EINVAL;
  64        }
  65
  66        queue->queue_length = nr_of_pages * pagesize;
  67        queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
  68                                           GFP_KERNEL);
  69        if (!queue->queue_pages)
  70                return -ENOMEM;
  71
  72        /*
  73         * allocate pages for queue:
  74         * outer loop allocates whole kernel pages (page aligned) and
  75         * inner loop divides a kernel page into smaller hea queue pages
  76         */
  77        i = 0;
  78        while (i < nr_of_pages) {
  79                u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
  80                if (!kpage)
  81                        goto out_nomem;
  82                for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
  83                        (queue->queue_pages)[i] = (struct ehea_page *)kpage;
  84                        kpage += pagesize;
  85                        i++;
  86                }
  87        }
  88
  89        queue->current_q_offset = 0;
  90        queue->qe_size = qe_size;
  91        queue->pagesize = pagesize;
  92        queue->toggle_state = 1;
  93
  94        return 0;
  95out_nomem:
  96        for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
  97                if (!(queue->queue_pages)[i])
  98                        break;
  99                free_page((unsigned long)(queue->queue_pages)[i]);
 100        }
 101        return -ENOMEM;
 102}
 103
 104static void hw_queue_dtor(struct hw_queue *queue)
 105{
 106        int pages_per_kpage;
 107        int i, nr_pages;
 108
 109        if (!queue || !queue->queue_pages)
 110                return;
 111
 112        pages_per_kpage = PAGE_SIZE / queue->pagesize;
 113
 114        nr_pages = queue->queue_length / queue->pagesize;
 115
 116        for (i = 0; i < nr_pages; i += pages_per_kpage)
 117                free_page((unsigned long)(queue->queue_pages)[i]);
 118
 119        kfree(queue->queue_pages);
 120}
 121
 122struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
 123                               int nr_of_cqe, u64 eq_handle, u32 cq_token)
 124{
 125        struct ehea_cq *cq;
 126        struct h_epa epa;
 127        u64 *cq_handle_ref, hret, rpage;
 128        u32 act_nr_of_entries, act_pages, counter;
 129        int ret;
 130        void *vpage;
 131
 132        cq = kzalloc(sizeof(*cq), GFP_KERNEL);
 133        if (!cq)
 134                goto out_nomem;
 135
 136        cq->attr.max_nr_of_cqes = nr_of_cqe;
 137        cq->attr.cq_token = cq_token;
 138        cq->attr.eq_handle = eq_handle;
 139
 140        cq->adapter = adapter;
 141
 142        cq_handle_ref = &cq->fw_handle;
 143        act_nr_of_entries = 0;
 144        act_pages = 0;
 145
 146        hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
 147                                        &cq->fw_handle, &cq->epas);
 148        if (hret != H_SUCCESS) {
 149                pr_err("alloc_resource_cq failed\n");
 150                goto out_freemem;
 151        }
 152
 153        ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
 154                            EHEA_PAGESIZE, sizeof(struct ehea_cqe));
 155        if (ret)
 156                goto out_freeres;
 157
 158        for (counter = 0; counter < cq->attr.nr_pages; counter++) {
 159                vpage = hw_qpageit_get_inc(&cq->hw_queue);
 160                if (!vpage) {
 161                        pr_err("hw_qpageit_get_inc failed\n");
 162                        goto out_kill_hwq;
 163                }
 164
 165                rpage = __pa(vpage);
 166                hret = ehea_h_register_rpage(adapter->handle,
 167                                             0, EHEA_CQ_REGISTER_ORIG,
 168                                             cq->fw_handle, rpage, 1);
 169                if (hret < H_SUCCESS) {
 170                        pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
 171                               cq, hret, counter, cq->attr.nr_pages);
 172                        goto out_kill_hwq;
 173                }
 174
 175                if (counter == (cq->attr.nr_pages - 1)) {
 176                        vpage = hw_qpageit_get_inc(&cq->hw_queue);
 177
 178                        if ((hret != H_SUCCESS) || (vpage)) {
 179                                pr_err("registration of pages not complete hret=%llx\n",
 180                                       hret);
 181                                goto out_kill_hwq;
 182                        }
 183                } else {
 184                        if (hret != H_PAGE_REGISTERED) {
 185                                pr_err("CQ: registration of page failed hret=%llx\n",
 186                                       hret);
 187                                goto out_kill_hwq;
 188                        }
 189                }
 190        }
 191
 192        hw_qeit_reset(&cq->hw_queue);
 193        epa = cq->epas.kernel;
 194        ehea_reset_cq_ep(cq);
 195        ehea_reset_cq_n1(cq);
 196
 197        return cq;
 198
 199out_kill_hwq:
 200        hw_queue_dtor(&cq->hw_queue);
 201
 202out_freeres:
 203        ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
 204
 205out_freemem:
 206        kfree(cq);
 207
 208out_nomem:
 209        return NULL;
 210}
 211
 212static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
 213{
 214        u64 hret;
 215        u64 adapter_handle = cq->adapter->handle;
 216
 217        /* deregister all previous registered pages */
 218        hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
 219        if (hret != H_SUCCESS)
 220                return hret;
 221
 222        hw_queue_dtor(&cq->hw_queue);
 223        kfree(cq);
 224
 225        return hret;
 226}
 227
 228int ehea_destroy_cq(struct ehea_cq *cq)
 229{
 230        u64 hret, aer, aerr;
 231        if (!cq)
 232                return 0;
 233
 234        hcp_epas_dtor(&cq->epas);
 235        hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
 236        if (hret == H_R_STATE) {
 237                ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
 238                hret = ehea_destroy_cq_res(cq, FORCE_FREE);
 239        }
 240
 241        if (hret != H_SUCCESS) {
 242                pr_err("destroy CQ failed\n");
 243                return -EIO;
 244        }
 245
 246        return 0;
 247}
 248
 249struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
 250                               const enum ehea_eq_type type,
 251                               const u32 max_nr_of_eqes, const u8 eqe_gen)
 252{
 253        int ret, i;
 254        u64 hret, rpage;
 255        void *vpage;
 256        struct ehea_eq *eq;
 257
 258        eq = kzalloc(sizeof(*eq), GFP_KERNEL);
 259        if (!eq)
 260                return NULL;
 261
 262        eq->adapter = adapter;
 263        eq->attr.type = type;
 264        eq->attr.max_nr_of_eqes = max_nr_of_eqes;
 265        eq->attr.eqe_gen = eqe_gen;
 266        spin_lock_init(&eq->spinlock);
 267
 268        hret = ehea_h_alloc_resource_eq(adapter->handle,
 269                                        &eq->attr, &eq->fw_handle);
 270        if (hret != H_SUCCESS) {
 271                pr_err("alloc_resource_eq failed\n");
 272                goto out_freemem;
 273        }
 274
 275        ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
 276                            EHEA_PAGESIZE, sizeof(struct ehea_eqe));
 277        if (ret) {
 278                pr_err("can't allocate eq pages\n");
 279                goto out_freeres;
 280        }
 281
 282        for (i = 0; i < eq->attr.nr_pages; i++) {
 283                vpage = hw_qpageit_get_inc(&eq->hw_queue);
 284                if (!vpage) {
 285                        pr_err("hw_qpageit_get_inc failed\n");
 286                        hret = H_RESOURCE;
 287                        goto out_kill_hwq;
 288                }
 289
 290                rpage = __pa(vpage);
 291
 292                hret = ehea_h_register_rpage(adapter->handle, 0,
 293                                             EHEA_EQ_REGISTER_ORIG,
 294                                             eq->fw_handle, rpage, 1);
 295
 296                if (i == (eq->attr.nr_pages - 1)) {
 297                        /* last page */
 298                        vpage = hw_qpageit_get_inc(&eq->hw_queue);
 299                        if ((hret != H_SUCCESS) || (vpage))
 300                                goto out_kill_hwq;
 301
 302                } else {
 303                        if (hret != H_PAGE_REGISTERED)
 304                                goto out_kill_hwq;
 305
 306                }
 307        }
 308
 309        hw_qeit_reset(&eq->hw_queue);
 310        return eq;
 311
 312out_kill_hwq:
 313        hw_queue_dtor(&eq->hw_queue);
 314
 315out_freeres:
 316        ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
 317
 318out_freemem:
 319        kfree(eq);
 320        return NULL;
 321}
 322
 323struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
 324{
 325        struct ehea_eqe *eqe;
 326        unsigned long flags;
 327
 328        spin_lock_irqsave(&eq->spinlock, flags);
 329        eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
 330        spin_unlock_irqrestore(&eq->spinlock, flags);
 331
 332        return eqe;
 333}
 334
 335static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
 336{
 337        u64 hret;
 338        unsigned long flags;
 339
 340        spin_lock_irqsave(&eq->spinlock, flags);
 341
 342        hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
 343        spin_unlock_irqrestore(&eq->spinlock, flags);
 344
 345        if (hret != H_SUCCESS)
 346                return hret;
 347
 348        hw_queue_dtor(&eq->hw_queue);
 349        kfree(eq);
 350
 351        return hret;
 352}
 353
 354int ehea_destroy_eq(struct ehea_eq *eq)
 355{
 356        u64 hret, aer, aerr;
 357        if (!eq)
 358                return 0;
 359
 360        hcp_epas_dtor(&eq->epas);
 361
 362        hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
 363        if (hret == H_R_STATE) {
 364                ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
 365                hret = ehea_destroy_eq_res(eq, FORCE_FREE);
 366        }
 367
 368        if (hret != H_SUCCESS) {
 369                pr_err("destroy EQ failed\n");
 370                return -EIO;
 371        }
 372
 373        return 0;
 374}
 375
 376/* allocates memory for a queue and registers pages in phyp */
 377static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
 378                           int nr_pages, int wqe_size, int act_nr_sges,
 379                           struct ehea_adapter *adapter, int h_call_q_selector)
 380{
 381        u64 hret, rpage;
 382        int ret, cnt;
 383        void *vpage;
 384
 385        ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
 386        if (ret)
 387                return ret;
 388
 389        for (cnt = 0; cnt < nr_pages; cnt++) {
 390                vpage = hw_qpageit_get_inc(hw_queue);
 391                if (!vpage) {
 392                        pr_err("hw_qpageit_get_inc failed\n");
 393                        goto out_kill_hwq;
 394                }
 395                rpage = __pa(vpage);
 396                hret = ehea_h_register_rpage(adapter->handle,
 397                                             0, h_call_q_selector,
 398                                             qp->fw_handle, rpage, 1);
 399                if (hret < H_SUCCESS) {
 400                        pr_err("register_rpage_qp failed\n");
 401                        goto out_kill_hwq;
 402                }
 403        }
 404        hw_qeit_reset(hw_queue);
 405        return 0;
 406
 407out_kill_hwq:
 408        hw_queue_dtor(hw_queue);
 409        return -EIO;
 410}
 411
 412static inline u32 map_wqe_size(u8 wqe_enc_size)
 413{
 414        return 128 << wqe_enc_size;
 415}
 416
 417struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
 418                               u32 pd, struct ehea_qp_init_attr *init_attr)
 419{
 420        int ret;
 421        u64 hret;
 422        struct ehea_qp *qp;
 423        u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
 424        u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
 425
 426
 427        qp = kzalloc(sizeof(*qp), GFP_KERNEL);
 428        if (!qp)
 429                return NULL;
 430
 431        qp->adapter = adapter;
 432
 433        hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
 434                                        &qp->fw_handle, &qp->epas);
 435        if (hret != H_SUCCESS) {
 436                pr_err("ehea_h_alloc_resource_qp failed\n");
 437                goto out_freemem;
 438        }
 439
 440        wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
 441        wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
 442        wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
 443        wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
 444
 445        ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
 446                                     wqe_size_in_bytes_sq,
 447                                     init_attr->act_wqe_size_enc_sq, adapter,
 448                                     0);
 449        if (ret) {
 450                pr_err("can't register for sq ret=%x\n", ret);
 451                goto out_freeres;
 452        }
 453
 454        ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
 455                                     init_attr->nr_rq1_pages,
 456                                     wqe_size_in_bytes_rq1,
 457                                     init_attr->act_wqe_size_enc_rq1,
 458                                     adapter, 1);
 459        if (ret) {
 460                pr_err("can't register for rq1 ret=%x\n", ret);
 461                goto out_kill_hwsq;
 462        }
 463
 464        if (init_attr->rq_count > 1) {
 465                ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
 466                                             init_attr->nr_rq2_pages,
 467                                             wqe_size_in_bytes_rq2,
 468                                             init_attr->act_wqe_size_enc_rq2,
 469                                             adapter, 2);
 470                if (ret) {
 471                        pr_err("can't register for rq2 ret=%x\n", ret);
 472                        goto out_kill_hwr1q;
 473                }
 474        }
 475
 476        if (init_attr->rq_count > 2) {
 477                ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
 478                                             init_attr->nr_rq3_pages,
 479                                             wqe_size_in_bytes_rq3,
 480                                             init_attr->act_wqe_size_enc_rq3,
 481                                             adapter, 3);
 482                if (ret) {
 483                        pr_err("can't register for rq3 ret=%x\n", ret);
 484                        goto out_kill_hwr2q;
 485                }
 486        }
 487
 488        qp->init_attr = *init_attr;
 489
 490        return qp;
 491
 492out_kill_hwr2q:
 493        hw_queue_dtor(&qp->hw_rqueue2);
 494
 495out_kill_hwr1q:
 496        hw_queue_dtor(&qp->hw_rqueue1);
 497
 498out_kill_hwsq:
 499        hw_queue_dtor(&qp->hw_squeue);
 500
 501out_freeres:
 502        ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
 503        ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
 504
 505out_freemem:
 506        kfree(qp);
 507        return NULL;
 508}
 509
 510static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
 511{
 512        u64 hret;
 513        struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
 514
 515
 516        ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
 517        hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
 518        if (hret != H_SUCCESS)
 519                return hret;
 520
 521        hw_queue_dtor(&qp->hw_squeue);
 522        hw_queue_dtor(&qp->hw_rqueue1);
 523
 524        if (qp_attr->rq_count > 1)
 525                hw_queue_dtor(&qp->hw_rqueue2);
 526        if (qp_attr->rq_count > 2)
 527                hw_queue_dtor(&qp->hw_rqueue3);
 528        kfree(qp);
 529
 530        return hret;
 531}
 532
 533int ehea_destroy_qp(struct ehea_qp *qp)
 534{
 535        u64 hret, aer, aerr;
 536        if (!qp)
 537                return 0;
 538
 539        hcp_epas_dtor(&qp->epas);
 540
 541        hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
 542        if (hret == H_R_STATE) {
 543                ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
 544                hret = ehea_destroy_qp_res(qp, FORCE_FREE);
 545        }
 546
 547        if (hret != H_SUCCESS) {
 548                pr_err("destroy QP failed\n");
 549                return -EIO;
 550        }
 551
 552        return 0;
 553}
 554
 555static inline int ehea_calc_index(unsigned long i, unsigned long s)
 556{
 557        return (i >> s) & EHEA_INDEX_MASK;
 558}
 559
 560static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
 561                                     int dir)
 562{
 563        if (!ehea_top_bmap->dir[dir]) {
 564                ehea_top_bmap->dir[dir] =
 565                        kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
 566                if (!ehea_top_bmap->dir[dir])
 567                        return -ENOMEM;
 568        }
 569        return 0;
 570}
 571
 572static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
 573{
 574        if (!ehea_bmap->top[top]) {
 575                ehea_bmap->top[top] =
 576                        kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
 577                if (!ehea_bmap->top[top])
 578                        return -ENOMEM;
 579        }
 580        return ehea_init_top_bmap(ehea_bmap->top[top], dir);
 581}
 582
 583static DEFINE_MUTEX(ehea_busmap_mutex);
 584static unsigned long ehea_mr_len;
 585
 586#define EHEA_BUSMAP_ADD_SECT 1
 587#define EHEA_BUSMAP_REM_SECT 0
 588
 589static void ehea_rebuild_busmap(void)
 590{
 591        u64 vaddr = EHEA_BUSMAP_START;
 592        int top, dir, idx;
 593
 594        for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
 595                struct ehea_top_bmap *ehea_top;
 596                int valid_dir_entries = 0;
 597
 598                if (!ehea_bmap->top[top])
 599                        continue;
 600                ehea_top = ehea_bmap->top[top];
 601                for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
 602                        struct ehea_dir_bmap *ehea_dir;
 603                        int valid_entries = 0;
 604
 605                        if (!ehea_top->dir[dir])
 606                                continue;
 607                        valid_dir_entries++;
 608                        ehea_dir = ehea_top->dir[dir];
 609                        for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
 610                                if (!ehea_dir->ent[idx])
 611                                        continue;
 612                                valid_entries++;
 613                                ehea_dir->ent[idx] = vaddr;
 614                                vaddr += EHEA_SECTSIZE;
 615                        }
 616                        if (!valid_entries) {
 617                                ehea_top->dir[dir] = NULL;
 618                                kfree(ehea_dir);
 619                        }
 620                }
 621                if (!valid_dir_entries) {
 622                        ehea_bmap->top[top] = NULL;
 623                        kfree(ehea_top);
 624                }
 625        }
 626}
 627
 628static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
 629{
 630        unsigned long i, start_section, end_section;
 631
 632        if (!nr_pages)
 633                return 0;
 634
 635        if (!ehea_bmap) {
 636                ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
 637                if (!ehea_bmap)
 638                        return -ENOMEM;
 639        }
 640
 641        start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
 642        end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
 643        /* Mark entries as valid or invalid only; address is assigned later */
 644        for (i = start_section; i < end_section; i++) {
 645                u64 flag;
 646                int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
 647                int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
 648                int idx = i & EHEA_INDEX_MASK;
 649
 650                if (add) {
 651                        int ret = ehea_init_bmap(ehea_bmap, top, dir);
 652                        if (ret)
 653                                return ret;
 654                        flag = 1; /* valid */
 655                        ehea_mr_len += EHEA_SECTSIZE;
 656                } else {
 657                        if (!ehea_bmap->top[top])
 658                                continue;
 659                        if (!ehea_bmap->top[top]->dir[dir])
 660                                continue;
 661                        flag = 0; /* invalid */
 662                        ehea_mr_len -= EHEA_SECTSIZE;
 663                }
 664
 665                ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
 666        }
 667        ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
 668        return 0;
 669}
 670
 671int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
 672{
 673        int ret;
 674
 675        mutex_lock(&ehea_busmap_mutex);
 676        ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
 677        mutex_unlock(&ehea_busmap_mutex);
 678        return ret;
 679}
 680
 681int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
 682{
 683        int ret;
 684
 685        mutex_lock(&ehea_busmap_mutex);
 686        ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
 687        mutex_unlock(&ehea_busmap_mutex);
 688        return ret;
 689}
 690
 691static int ehea_is_hugepage(unsigned long pfn)
 692{
 693        int page_order;
 694
 695        if (pfn & EHEA_HUGEPAGE_PFN_MASK)
 696                return 0;
 697
 698        page_order = compound_order(pfn_to_page(pfn));
 699        if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
 700                return 0;
 701
 702        return 1;
 703}
 704
 705static int ehea_create_busmap_callback(unsigned long initial_pfn,
 706                                       unsigned long total_nr_pages, void *arg)
 707{
 708        int ret;
 709        unsigned long pfn, start_pfn, end_pfn, nr_pages;
 710
 711        if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
 712                return ehea_update_busmap(initial_pfn, total_nr_pages,
 713                                          EHEA_BUSMAP_ADD_SECT);
 714
 715        /* Given chunk is >= 16GB -> check for hugepages */
 716        start_pfn = initial_pfn;
 717        end_pfn = initial_pfn + total_nr_pages;
 718        pfn = start_pfn;
 719
 720        while (pfn < end_pfn) {
 721                if (ehea_is_hugepage(pfn)) {
 722                        /* Add mem found in front of the hugepage */
 723                        nr_pages = pfn - start_pfn;
 724                        ret = ehea_update_busmap(start_pfn, nr_pages,
 725                                                 EHEA_BUSMAP_ADD_SECT);
 726                        if (ret)
 727                                return ret;
 728
 729                        /* Skip the hugepage */
 730                        pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
 731                        start_pfn = pfn;
 732                } else
 733                        pfn += (EHEA_SECTSIZE / PAGE_SIZE);
 734        }
 735
 736        /* Add mem found behind the hugepage(s)  */
 737        nr_pages = pfn - start_pfn;
 738        return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
 739}
 740
 741int ehea_create_busmap(void)
 742{
 743        int ret;
 744
 745        mutex_lock(&ehea_busmap_mutex);
 746        ehea_mr_len = 0;
 747        ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
 748                                   ehea_create_busmap_callback);
 749        mutex_unlock(&ehea_busmap_mutex);
 750        return ret;
 751}
 752
 753void ehea_destroy_busmap(void)
 754{
 755        int top, dir;
 756        mutex_lock(&ehea_busmap_mutex);
 757        if (!ehea_bmap)
 758                goto out_destroy;
 759
 760        for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
 761                if (!ehea_bmap->top[top])
 762                        continue;
 763
 764                for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
 765                        if (!ehea_bmap->top[top]->dir[dir])
 766                                continue;
 767
 768                        kfree(ehea_bmap->top[top]->dir[dir]);
 769                }
 770
 771                kfree(ehea_bmap->top[top]);
 772        }
 773
 774        kfree(ehea_bmap);
 775        ehea_bmap = NULL;
 776out_destroy:
 777        mutex_unlock(&ehea_busmap_mutex);
 778}
 779
 780u64 ehea_map_vaddr(void *caddr)
 781{
 782        int top, dir, idx;
 783        unsigned long index, offset;
 784
 785        if (!ehea_bmap)
 786                return EHEA_INVAL_ADDR;
 787
 788        index = __pa(caddr) >> SECTION_SIZE_BITS;
 789        top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
 790        if (!ehea_bmap->top[top])
 791                return EHEA_INVAL_ADDR;
 792
 793        dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
 794        if (!ehea_bmap->top[top]->dir[dir])
 795                return EHEA_INVAL_ADDR;
 796
 797        idx = index & EHEA_INDEX_MASK;
 798        if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
 799                return EHEA_INVAL_ADDR;
 800
 801        offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
 802        return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
 803}
 804
 805static inline void *ehea_calc_sectbase(int top, int dir, int idx)
 806{
 807        unsigned long ret = idx;
 808        ret |= dir << EHEA_DIR_INDEX_SHIFT;
 809        ret |= top << EHEA_TOP_INDEX_SHIFT;
 810        return __va(ret << SECTION_SIZE_BITS);
 811}
 812
 813static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
 814                               struct ehea_adapter *adapter,
 815                               struct ehea_mr *mr)
 816{
 817        void *pg;
 818        u64 j, m, hret;
 819        unsigned long k = 0;
 820        u64 pt_abs = __pa(pt);
 821
 822        void *sectbase = ehea_calc_sectbase(top, dir, idx);
 823
 824        for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
 825
 826                for (m = 0; m < EHEA_MAX_RPAGE; m++) {
 827                        pg = sectbase + ((k++) * EHEA_PAGESIZE);
 828                        pt[m] = __pa(pg);
 829                }
 830                hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
 831                                                0, pt_abs, EHEA_MAX_RPAGE);
 832
 833                if ((hret != H_SUCCESS) &&
 834                    (hret != H_PAGE_REGISTERED)) {
 835                        ehea_h_free_resource(adapter->handle, mr->handle,
 836                                             FORCE_FREE);
 837                        pr_err("register_rpage_mr failed\n");
 838                        return hret;
 839                }
 840        }
 841        return hret;
 842}
 843
 844static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
 845                                struct ehea_adapter *adapter,
 846                                struct ehea_mr *mr)
 847{
 848        u64 hret = H_SUCCESS;
 849        int idx;
 850
 851        for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
 852                if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
 853                        continue;
 854
 855                hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
 856                if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
 857                        return hret;
 858        }
 859        return hret;
 860}
 861
 862static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
 863                                    struct ehea_adapter *adapter,
 864                                    struct ehea_mr *mr)
 865{
 866        u64 hret = H_SUCCESS;
 867        int dir;
 868
 869        for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
 870                if (!ehea_bmap->top[top]->dir[dir])
 871                        continue;
 872
 873                hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
 874                if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
 875                        return hret;
 876        }
 877        return hret;
 878}
 879
 880int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
 881{
 882        int ret;
 883        u64 *pt;
 884        u64 hret;
 885        u32 acc_ctrl = EHEA_MR_ACC_CTRL;
 886
 887        unsigned long top;
 888
 889        pt = (void *)get_zeroed_page(GFP_KERNEL);
 890        if (!pt) {
 891                pr_err("no mem\n");
 892                ret = -ENOMEM;
 893                goto out;
 894        }
 895
 896        hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
 897                                        ehea_mr_len, acc_ctrl, adapter->pd,
 898                                        &mr->handle, &mr->lkey);
 899
 900        if (hret != H_SUCCESS) {
 901                pr_err("alloc_resource_mr failed\n");
 902                ret = -EIO;
 903                goto out;
 904        }
 905
 906        if (!ehea_bmap) {
 907                ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
 908                pr_err("no busmap available\n");
 909                ret = -EIO;
 910                goto out;
 911        }
 912
 913        for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
 914                if (!ehea_bmap->top[top])
 915                        continue;
 916
 917                hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
 918                if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
 919                        break;
 920        }
 921
 922        if (hret != H_SUCCESS) {
 923                ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
 924                pr_err("registering mr failed\n");
 925                ret = -EIO;
 926                goto out;
 927        }
 928
 929        mr->vaddr = EHEA_BUSMAP_START;
 930        mr->adapter = adapter;
 931        ret = 0;
 932out:
 933        free_page((unsigned long)pt);
 934        return ret;
 935}
 936
 937int ehea_rem_mr(struct ehea_mr *mr)
 938{
 939        u64 hret;
 940
 941        if (!mr || !mr->adapter)
 942                return -EINVAL;
 943
 944        hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
 945                                    FORCE_FREE);
 946        if (hret != H_SUCCESS) {
 947                pr_err("destroy MR failed\n");
 948                return -EIO;
 949        }
 950
 951        return 0;
 952}
 953
 954int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
 955                 struct ehea_mr *shared_mr)
 956{
 957        u64 hret;
 958
 959        hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
 960                                   old_mr->vaddr, EHEA_MR_ACC_CTRL,
 961                                   adapter->pd, shared_mr);
 962        if (hret != H_SUCCESS)
 963                return -EIO;
 964
 965        shared_mr->adapter = adapter;
 966
 967        return 0;
 968}
 969
 970static void print_error_data(u64 *data)
 971{
 972        int length;
 973        u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
 974        u64 resource = data[1];
 975
 976        length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
 977
 978        if (length > EHEA_PAGESIZE)
 979                length = EHEA_PAGESIZE;
 980
 981        if (type == EHEA_AER_RESTYPE_QP)
 982                pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
 983                       resource, data[6], data[12], data[22]);
 984        else if (type == EHEA_AER_RESTYPE_CQ)
 985                pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
 986                       resource, data[6]);
 987        else if (type == EHEA_AER_RESTYPE_EQ)
 988                pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
 989                       resource, data[6]);
 990
 991        ehea_dump(data, length, "error data");
 992}
 993
 994u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
 995                    u64 *aer, u64 *aerr)
 996{
 997        unsigned long ret;
 998        u64 *rblock;
 999        u64 type = 0;
1000
1001        rblock = (void *)get_zeroed_page(GFP_KERNEL);
1002        if (!rblock) {
1003                pr_err("Cannot allocate rblock memory\n");
1004                goto out;
1005        }
1006
1007        ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1008
1009        if (ret == H_SUCCESS) {
1010                type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1011                *aer = rblock[6];
1012                *aerr = rblock[12];
1013                print_error_data(rblock);
1014        } else if (ret == H_R_STATE) {
1015                pr_err("No error data available: %llX\n", res_handle);
1016        } else
1017                pr_err("Error data could not be fetched: %llX\n", res_handle);
1018
1019        free_page((unsigned long)rblock);
1020out:
1021        return type;
1022}
1023