linux/arch/s390/pci/pci_clp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright IBM Corp. 2012
   4 *
   5 * Author(s):
   6 *   Jan Glauber <jang@linux.vnet.ibm.com>
   7 */
   8
   9#define KMSG_COMPONENT "zpci"
  10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11
  12#include <linux/compat.h>
  13#include <linux/kernel.h>
  14#include <linux/miscdevice.h>
  15#include <linux/slab.h>
  16#include <linux/err.h>
  17#include <linux/delay.h>
  18#include <linux/pci.h>
  19#include <linux/uaccess.h>
  20#include <asm/pci_debug.h>
  21#include <asm/pci_clp.h>
  22#include <asm/clp.h>
  23#include <uapi/asm/clp.h>
  24
  25bool zpci_unique_uid;
  26
  27void update_uid_checking(bool new)
  28{
  29        if (zpci_unique_uid != new)
  30                zpci_dbg(1, "uid checking:%d\n", new);
  31
  32        zpci_unique_uid = new;
  33}
  34
  35static inline void zpci_err_clp(unsigned int rsp, int rc)
  36{
  37        struct {
  38                unsigned int rsp;
  39                int rc;
  40        } __packed data = {rsp, rc};
  41
  42        zpci_err_hex(&data, sizeof(data));
  43}
  44
  45/*
  46 * Call Logical Processor with c=1, lps=0 and command 1
  47 * to get the bit mask of installed logical processors
  48 */
  49static inline int clp_get_ilp(unsigned long *ilp)
  50{
  51        unsigned long mask;
  52        int cc = 3;
  53
  54        asm volatile (
  55                "       .insn   rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
  56                "0:     ipm     %[cc]\n"
  57                "       srl     %[cc],28\n"
  58                "1:\n"
  59                EX_TABLE(0b, 1b)
  60                : [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
  61                : "cc");
  62        *ilp = mask;
  63        return cc;
  64}
  65
  66/*
  67 * Call Logical Processor with c=0, the give constant lps and an lpcb request.
  68 */
  69static inline int clp_req(void *data, unsigned int lps)
  70{
  71        struct { u8 _[CLP_BLK_SIZE]; } *req = data;
  72        u64 ignored;
  73        int cc = 3;
  74
  75        asm volatile (
  76                "       .insn   rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
  77                "0:     ipm     %[cc]\n"
  78                "       srl     %[cc],28\n"
  79                "1:\n"
  80                EX_TABLE(0b, 1b)
  81                : [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
  82                : [req] "a" (req), [lps] "i" (lps)
  83                : "cc");
  84        return cc;
  85}
  86
  87static void *clp_alloc_block(gfp_t gfp_mask)
  88{
  89        return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
  90}
  91
  92static void clp_free_block(void *ptr)
  93{
  94        free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
  95}
  96
  97static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
  98                                      struct clp_rsp_query_pci_grp *response)
  99{
 100        zdev->tlb_refresh = response->refresh;
 101        zdev->dma_mask = response->dasm;
 102        zdev->msi_addr = response->msia;
 103        zdev->max_msi = response->noi;
 104        zdev->fmb_update = response->mui;
 105        zdev->version = response->version;
 106
 107        switch (response->version) {
 108        case 1:
 109                zdev->max_bus_speed = PCIE_SPEED_5_0GT;
 110                break;
 111        default:
 112                zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
 113                break;
 114        }
 115}
 116
 117static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
 118{
 119        struct clp_req_rsp_query_pci_grp *rrb;
 120        int rc;
 121
 122        rrb = clp_alloc_block(GFP_KERNEL);
 123        if (!rrb)
 124                return -ENOMEM;
 125
 126        memset(rrb, 0, sizeof(*rrb));
 127        rrb->request.hdr.len = sizeof(rrb->request);
 128        rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
 129        rrb->response.hdr.len = sizeof(rrb->response);
 130        rrb->request.pfgid = pfgid;
 131
 132        rc = clp_req(rrb, CLP_LPS_PCI);
 133        if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
 134                clp_store_query_pci_fngrp(zdev, &rrb->response);
 135        else {
 136                zpci_err("Q PCI FGRP:\n");
 137                zpci_err_clp(rrb->response.hdr.rsp, rc);
 138                rc = -EIO;
 139        }
 140        clp_free_block(rrb);
 141        return rc;
 142}
 143
 144static int clp_store_query_pci_fn(struct zpci_dev *zdev,
 145                                  struct clp_rsp_query_pci *response)
 146{
 147        int i;
 148
 149        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 150                zdev->bars[i].val = le32_to_cpu(response->bar[i]);
 151                zdev->bars[i].size = response->bar_size[i];
 152        }
 153        zdev->start_dma = response->sdma;
 154        zdev->end_dma = response->edma;
 155        zdev->pchid = response->pchid;
 156        zdev->pfgid = response->pfgid;
 157        zdev->pft = response->pft;
 158        zdev->vfn = response->vfn;
 159        zdev->port = response->port;
 160        zdev->uid = response->uid;
 161        zdev->fmb_length = sizeof(u32) * response->fmb_len;
 162        zdev->rid_available = response->rid_avail;
 163        zdev->is_physfn = response->is_physfn;
 164        if (!s390_pci_no_rid && zdev->rid_available)
 165                zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
 166
 167        memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
 168        if (response->util_str_avail) {
 169                memcpy(zdev->util_str, response->util_str,
 170                       sizeof(zdev->util_str));
 171                zdev->util_str_avail = 1;
 172        }
 173        zdev->mio_capable = response->mio_addr_avail;
 174        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 175                if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1))))
 176                        continue;
 177
 178                zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
 179                zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
 180        }
 181        return 0;
 182}
 183
 184static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
 185{
 186        struct clp_req_rsp_query_pci *rrb;
 187        int rc;
 188
 189        rrb = clp_alloc_block(GFP_KERNEL);
 190        if (!rrb)
 191                return -ENOMEM;
 192
 193        memset(rrb, 0, sizeof(*rrb));
 194        rrb->request.hdr.len = sizeof(rrb->request);
 195        rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
 196        rrb->response.hdr.len = sizeof(rrb->response);
 197        rrb->request.fh = fh;
 198
 199        rc = clp_req(rrb, CLP_LPS_PCI);
 200        if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
 201                rc = clp_store_query_pci_fn(zdev, &rrb->response);
 202                if (rc)
 203                        goto out;
 204                rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
 205        } else {
 206                zpci_err("Q PCI FN:\n");
 207                zpci_err_clp(rrb->response.hdr.rsp, rc);
 208                rc = -EIO;
 209        }
 210out:
 211        clp_free_block(rrb);
 212        return rc;
 213}
 214
 215int clp_add_pci_device(u32 fid, u32 fh, int configured)
 216{
 217        struct zpci_dev *zdev;
 218        int rc = -ENOMEM;
 219
 220        zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
 221        zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
 222        if (!zdev)
 223                goto error;
 224
 225        zdev->fh = fh;
 226        zdev->fid = fid;
 227
 228        /* Query function properties and update zdev */
 229        rc = clp_query_pci_fn(zdev, fh);
 230        if (rc)
 231                goto error;
 232
 233        if (configured)
 234                zdev->state = ZPCI_FN_STATE_CONFIGURED;
 235        else
 236                zdev->state = ZPCI_FN_STATE_STANDBY;
 237
 238        rc = zpci_create_device(zdev);
 239        if (rc)
 240                goto error;
 241        return 0;
 242
 243error:
 244        zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
 245        kfree(zdev);
 246        return rc;
 247}
 248
 249/*
 250 * Enable/Disable a given PCI function and update its function handle if
 251 * necessary
 252 */
 253static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
 254{
 255        struct clp_req_rsp_set_pci *rrb;
 256        int rc, retries = 100;
 257        u32 fid = zdev->fid;
 258
 259        rrb = clp_alloc_block(GFP_KERNEL);
 260        if (!rrb)
 261                return -ENOMEM;
 262
 263        do {
 264                memset(rrb, 0, sizeof(*rrb));
 265                rrb->request.hdr.len = sizeof(rrb->request);
 266                rrb->request.hdr.cmd = CLP_SET_PCI_FN;
 267                rrb->response.hdr.len = sizeof(rrb->response);
 268                rrb->request.fh = zdev->fh;
 269                rrb->request.oc = command;
 270                rrb->request.ndas = nr_dma_as;
 271
 272                rc = clp_req(rrb, CLP_LPS_PCI);
 273                if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
 274                        retries--;
 275                        if (retries < 0)
 276                                break;
 277                        msleep(20);
 278                }
 279        } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
 280
 281        if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
 282                zpci_err("Set PCI FN:\n");
 283                zpci_err_clp(rrb->response.hdr.rsp, rc);
 284        }
 285
 286        if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
 287                zdev->fh = rrb->response.fh;
 288        } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
 289                        rrb->response.fh == 0) {
 290                /* Function is already in desired state - update handle */
 291                rc = clp_rescan_pci_devices_simple(&fid);
 292        }
 293        clp_free_block(rrb);
 294        return rc;
 295}
 296
 297int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
 298{
 299        int rc;
 300
 301        rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
 302        zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
 303        if (rc)
 304                goto out;
 305
 306        if (zpci_use_mio(zdev)) {
 307                rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
 308                zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
 309                                zdev->fid, zdev->fh, rc);
 310                if (rc)
 311                        clp_disable_fh(zdev);
 312        }
 313out:
 314        return rc;
 315}
 316
 317int clp_disable_fh(struct zpci_dev *zdev)
 318{
 319        u32 fh = zdev->fh;
 320        int rc;
 321
 322        if (!zdev_enabled(zdev))
 323                return 0;
 324
 325        rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
 326        zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
 327        return rc;
 328}
 329
 330static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
 331                        void (*cb)(struct clp_fh_list_entry *, void *))
 332{
 333        u64 resume_token = 0;
 334        int entries, i, rc;
 335
 336        do {
 337                memset(rrb, 0, sizeof(*rrb));
 338                rrb->request.hdr.len = sizeof(rrb->request);
 339                rrb->request.hdr.cmd = CLP_LIST_PCI;
 340                /* store as many entries as possible */
 341                rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
 342                rrb->request.resume_token = resume_token;
 343
 344                /* Get PCI function handle list */
 345                rc = clp_req(rrb, CLP_LPS_PCI);
 346                if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
 347                        zpci_err("List PCI FN:\n");
 348                        zpci_err_clp(rrb->response.hdr.rsp, rc);
 349                        rc = -EIO;
 350                        goto out;
 351                }
 352
 353                update_uid_checking(rrb->response.uid_checking);
 354                WARN_ON_ONCE(rrb->response.entry_size !=
 355                        sizeof(struct clp_fh_list_entry));
 356
 357                entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
 358                        rrb->response.entry_size;
 359
 360                resume_token = rrb->response.resume_token;
 361                for (i = 0; i < entries; i++)
 362                        cb(&rrb->response.fh_list[i], data);
 363        } while (resume_token);
 364out:
 365        return rc;
 366}
 367
 368static void __clp_add(struct clp_fh_list_entry *entry, void *data)
 369{
 370        struct zpci_dev *zdev;
 371
 372        if (!entry->vendor_id)
 373                return;
 374
 375        zdev = get_zdev_by_fid(entry->fid);
 376        if (!zdev)
 377                clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
 378}
 379
 380static void __clp_update(struct clp_fh_list_entry *entry, void *data)
 381{
 382        struct zpci_dev *zdev;
 383        u32 *fid = data;
 384
 385        if (!entry->vendor_id)
 386                return;
 387
 388        if (fid && *fid != entry->fid)
 389                return;
 390
 391        zdev = get_zdev_by_fid(entry->fid);
 392        if (!zdev)
 393                return;
 394
 395        zdev->fh = entry->fh;
 396}
 397
 398int clp_scan_pci_devices(void)
 399{
 400        struct clp_req_rsp_list_pci *rrb;
 401        int rc;
 402
 403        rrb = clp_alloc_block(GFP_KERNEL);
 404        if (!rrb)
 405                return -ENOMEM;
 406
 407        rc = clp_list_pci(rrb, NULL, __clp_add);
 408
 409        clp_free_block(rrb);
 410        return rc;
 411}
 412
 413int clp_rescan_pci_devices(void)
 414{
 415        struct clp_req_rsp_list_pci *rrb;
 416        int rc;
 417
 418        zpci_remove_reserved_devices();
 419
 420        rrb = clp_alloc_block(GFP_KERNEL);
 421        if (!rrb)
 422                return -ENOMEM;
 423
 424        rc = clp_list_pci(rrb, NULL, __clp_add);
 425
 426        clp_free_block(rrb);
 427        return rc;
 428}
 429
 430/* Rescan PCI functions and refresh function handles. If fid is non-NULL only
 431 * refresh the handle of the function matching @fid
 432 */
 433int clp_rescan_pci_devices_simple(u32 *fid)
 434{
 435        struct clp_req_rsp_list_pci *rrb;
 436        int rc;
 437
 438        rrb = clp_alloc_block(GFP_NOWAIT);
 439        if (!rrb)
 440                return -ENOMEM;
 441
 442        rc = clp_list_pci(rrb, fid, __clp_update);
 443
 444        clp_free_block(rrb);
 445        return rc;
 446}
 447
 448struct clp_state_data {
 449        u32 fid;
 450        enum zpci_state state;
 451};
 452
 453static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
 454{
 455        struct clp_state_data *sd = data;
 456
 457        if (entry->fid != sd->fid)
 458                return;
 459
 460        sd->state = entry->config_state;
 461}
 462
 463int clp_get_state(u32 fid, enum zpci_state *state)
 464{
 465        struct clp_req_rsp_list_pci *rrb;
 466        struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
 467        int rc;
 468
 469        rrb = clp_alloc_block(GFP_KERNEL);
 470        if (!rrb)
 471                return -ENOMEM;
 472
 473        rc = clp_list_pci(rrb, &sd, __clp_get_state);
 474        if (!rc)
 475                *state = sd.state;
 476
 477        clp_free_block(rrb);
 478        return rc;
 479}
 480
 481static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
 482{
 483        unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 484
 485        if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 486            lpcb->response.hdr.len > limit)
 487                return -EINVAL;
 488        return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
 489}
 490
 491static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
 492{
 493        switch (lpcb->cmd) {
 494        case 0x0001: /* store logical-processor characteristics */
 495                return clp_base_slpc(req, (void *) lpcb);
 496        default:
 497                return -EINVAL;
 498        }
 499}
 500
 501static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
 502{
 503        unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 504
 505        if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 506            lpcb->response.hdr.len > limit)
 507                return -EINVAL;
 508        return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
 509}
 510
 511static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
 512{
 513        unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 514
 515        if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 516            lpcb->response.hdr.len > limit)
 517                return -EINVAL;
 518        if (lpcb->request.reserved2 != 0)
 519                return -EINVAL;
 520        return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
 521}
 522
 523static int clp_pci_query(struct clp_req *req,
 524                         struct clp_req_rsp_query_pci *lpcb)
 525{
 526        unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 527
 528        if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 529            lpcb->response.hdr.len > limit)
 530                return -EINVAL;
 531        if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
 532                return -EINVAL;
 533        return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
 534}
 535
 536static int clp_pci_query_grp(struct clp_req *req,
 537                             struct clp_req_rsp_query_pci_grp *lpcb)
 538{
 539        unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 540
 541        if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 542            lpcb->response.hdr.len > limit)
 543                return -EINVAL;
 544        if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
 545            lpcb->request.reserved4 != 0)
 546                return -EINVAL;
 547        return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
 548}
 549
 550static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
 551{
 552        switch (lpcb->cmd) {
 553        case 0x0001: /* store logical-processor characteristics */
 554                return clp_pci_slpc(req, (void *) lpcb);
 555        case 0x0002: /* list PCI functions */
 556                return clp_pci_list(req, (void *) lpcb);
 557        case 0x0003: /* query PCI function */
 558                return clp_pci_query(req, (void *) lpcb);
 559        case 0x0004: /* query PCI function group */
 560                return clp_pci_query_grp(req, (void *) lpcb);
 561        default:
 562                return -EINVAL;
 563        }
 564}
 565
 566static int clp_normal_command(struct clp_req *req)
 567{
 568        struct clp_req_hdr *lpcb;
 569        void __user *uptr;
 570        int rc;
 571
 572        rc = -EINVAL;
 573        if (req->lps != 0 && req->lps != 2)
 574                goto out;
 575
 576        rc = -ENOMEM;
 577        lpcb = clp_alloc_block(GFP_KERNEL);
 578        if (!lpcb)
 579                goto out;
 580
 581        rc = -EFAULT;
 582        uptr = (void __force __user *)(unsigned long) req->data_p;
 583        if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
 584                goto out_free;
 585
 586        rc = -EINVAL;
 587        if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
 588                goto out_free;
 589
 590        switch (req->lps) {
 591        case 0:
 592                rc = clp_base_command(req, lpcb);
 593                break;
 594        case 2:
 595                rc = clp_pci_command(req, lpcb);
 596                break;
 597        }
 598        if (rc)
 599                goto out_free;
 600
 601        rc = -EFAULT;
 602        if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
 603                goto out_free;
 604
 605        rc = 0;
 606
 607out_free:
 608        clp_free_block(lpcb);
 609out:
 610        return rc;
 611}
 612
 613static int clp_immediate_command(struct clp_req *req)
 614{
 615        void __user *uptr;
 616        unsigned long ilp;
 617        int exists;
 618
 619        if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
 620                return -EINVAL;
 621
 622        uptr = (void __force __user *)(unsigned long) req->data_p;
 623        if (req->cmd == 0) {
 624                /* Command code 0: test for a specific processor */
 625                exists = test_bit_inv(req->lps, &ilp);
 626                return put_user(exists, (int __user *) uptr);
 627        }
 628        /* Command code 1: return bit mask of installed processors */
 629        return put_user(ilp, (unsigned long __user *) uptr);
 630}
 631
 632static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
 633                           unsigned long arg)
 634{
 635        struct clp_req req;
 636        void __user *argp;
 637
 638        if (cmd != CLP_SYNC)
 639                return -EINVAL;
 640
 641        argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
 642        if (copy_from_user(&req, argp, sizeof(req)))
 643                return -EFAULT;
 644        if (req.r != 0)
 645                return -EINVAL;
 646        return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
 647}
 648
 649static int clp_misc_release(struct inode *inode, struct file *filp)
 650{
 651        return 0;
 652}
 653
 654static const struct file_operations clp_misc_fops = {
 655        .owner = THIS_MODULE,
 656        .open = nonseekable_open,
 657        .release = clp_misc_release,
 658        .unlocked_ioctl = clp_misc_ioctl,
 659        .compat_ioctl = clp_misc_ioctl,
 660        .llseek = no_llseek,
 661};
 662
 663static struct miscdevice clp_misc_device = {
 664        .minor = MISC_DYNAMIC_MINOR,
 665        .name = "clp",
 666        .fops = &clp_misc_fops,
 667};
 668
 669static int __init clp_misc_init(void)
 670{
 671        return misc_register(&clp_misc_device);
 672}
 673
 674device_initcall(clp_misc_init);
 675