linux/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Intel Speed Select Interface: Common functions
   4 * Copyright (c) 2019, Intel Corporation.
   5 * All rights reserved.
   6 *
   7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
   8 */
   9
  10#include <linux/cpufeature.h>
  11#include <linux/cpuhotplug.h>
  12#include <linux/fs.h>
  13#include <linux/hashtable.h>
  14#include <linux/miscdevice.h>
  15#include <linux/module.h>
  16#include <linux/pci.h>
  17#include <linux/sched/signal.h>
  18#include <linux/slab.h>
  19#include <linux/uaccess.h>
  20#include <uapi/linux/isst_if.h>
  21
  22#include "isst_if_common.h"
  23
  24#define MSR_THREAD_ID_INFO      0x53
  25#define MSR_CPU_BUS_NUMBER      0x128
  26
  27static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
  28
  29static int punit_msr_white_list[] = {
  30        MSR_TURBO_RATIO_LIMIT,
  31        MSR_CONFIG_TDP_CONTROL,
  32        MSR_TURBO_RATIO_LIMIT1,
  33        MSR_TURBO_RATIO_LIMIT2,
  34};
  35
  36struct isst_valid_cmd_ranges {
  37        u16 cmd;
  38        u16 sub_cmd_beg;
  39        u16 sub_cmd_end;
  40};
  41
  42struct isst_cmd_set_req_type {
  43        u16 cmd;
  44        u16 sub_cmd;
  45        u16 param;
  46};
  47
  48static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
  49        {0xD0, 0x00, 0x03},
  50        {0x7F, 0x00, 0x0B},
  51        {0x7F, 0x10, 0x12},
  52        {0x7F, 0x20, 0x23},
  53        {0x94, 0x03, 0x03},
  54        {0x95, 0x03, 0x03},
  55};
  56
  57static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
  58        {0xD0, 0x00, 0x08},
  59        {0xD0, 0x01, 0x08},
  60        {0xD0, 0x02, 0x08},
  61        {0xD0, 0x03, 0x08},
  62        {0x7F, 0x02, 0x00},
  63        {0x7F, 0x08, 0x00},
  64        {0x95, 0x03, 0x03},
  65};
  66
  67struct isst_cmd {
  68        struct hlist_node hnode;
  69        u64 data;
  70        u32 cmd;
  71        int cpu;
  72        int mbox_cmd_type;
  73        u32 param;
  74};
  75
  76static DECLARE_HASHTABLE(isst_hash, 8);
  77static DEFINE_MUTEX(isst_hash_lock);
  78
  79static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
  80                              u32 data)
  81{
  82        struct isst_cmd *sst_cmd;
  83
  84        sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
  85        if (!sst_cmd)
  86                return -ENOMEM;
  87
  88        sst_cmd->cpu = cpu;
  89        sst_cmd->cmd = cmd;
  90        sst_cmd->mbox_cmd_type = mbox_cmd_type;
  91        sst_cmd->param = param;
  92        sst_cmd->data = data;
  93
  94        hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
  95
  96        return 0;
  97}
  98
  99static void isst_delete_hash(void)
 100{
 101        struct isst_cmd *sst_cmd;
 102        struct hlist_node *tmp;
 103        int i;
 104
 105        hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
 106                hash_del(&sst_cmd->hnode);
 107                kfree(sst_cmd);
 108        }
 109}
 110
 111/**
 112 * isst_store_cmd() - Store command to a hash table
 113 * @cmd: Mailbox command.
 114 * @sub_cmd: Mailbox sub-command or MSR id.
 115 * @mbox_cmd_type: Mailbox or MSR command.
 116 * @param: Mailbox parameter.
 117 * @data: Mailbox request data or MSR data.
 118 *
 119 * Stores the command to a hash table if there is no such command already
 120 * stored. If already stored update the latest parameter and data for the
 121 * command.
 122 *
 123 * Return: Return result of store to hash table, 0 for success, others for
 124 * failure.
 125 */
 126int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
 127                   u32 param, u64 data)
 128{
 129        struct isst_cmd *sst_cmd;
 130        int full_cmd, ret;
 131
 132        full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
 133        full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
 134        mutex_lock(&isst_hash_lock);
 135        hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
 136                if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
 137                    sst_cmd->mbox_cmd_type == mbox_cmd_type) {
 138                        sst_cmd->param = param;
 139                        sst_cmd->data = data;
 140                        mutex_unlock(&isst_hash_lock);
 141                        return 0;
 142                }
 143        }
 144
 145        ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
 146        mutex_unlock(&isst_hash_lock);
 147
 148        return ret;
 149}
 150EXPORT_SYMBOL_GPL(isst_store_cmd);
 151
 152static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
 153                                     struct isst_cmd *sst_cmd)
 154{
 155        struct isst_if_mbox_cmd mbox_cmd;
 156        int wr_only;
 157
 158        mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
 159        mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
 160        mbox_cmd.parameter = sst_cmd->param;
 161        mbox_cmd.req_data = sst_cmd->data;
 162        mbox_cmd.logical_cpu = sst_cmd->cpu;
 163        (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
 164}
 165
 166/**
 167 * isst_resume_common() - Process Resume request
 168 *
 169 * On resume replay all mailbox commands and MSRs.
 170 *
 171 * Return: None.
 172 */
 173void isst_resume_common(void)
 174{
 175        struct isst_cmd *sst_cmd;
 176        int i;
 177
 178        hash_for_each(isst_hash, i, sst_cmd, hnode) {
 179                struct isst_if_cmd_cb *cb;
 180
 181                if (sst_cmd->mbox_cmd_type) {
 182                        cb = &punit_callbacks[ISST_IF_DEV_MBOX];
 183                        if (cb->registered)
 184                                isst_mbox_resume_command(cb, sst_cmd);
 185                } else {
 186                        wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
 187                                           sst_cmd->data);
 188                }
 189        }
 190}
 191EXPORT_SYMBOL_GPL(isst_resume_common);
 192
 193static void isst_restore_msr_local(int cpu)
 194{
 195        struct isst_cmd *sst_cmd;
 196        int i;
 197
 198        mutex_lock(&isst_hash_lock);
 199        for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
 200                if (!punit_msr_white_list[i])
 201                        break;
 202
 203                hash_for_each_possible(isst_hash, sst_cmd, hnode,
 204                                       punit_msr_white_list[i]) {
 205                        if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
 206                                wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
 207                }
 208        }
 209        mutex_unlock(&isst_hash_lock);
 210}
 211
 212/**
 213 * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
 214 * @cmd: Pointer to the command structure to verify.
 215 *
 216 * Invalid command to PUNIT to may result in instability of the platform.
 217 * This function has a whitelist of commands, which are allowed.
 218 *
 219 * Return: Return true if the command is invalid, else false.
 220 */
 221bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
 222{
 223        int i;
 224
 225        if (cmd->logical_cpu >= nr_cpu_ids)
 226                return true;
 227
 228        for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
 229                if (cmd->command == isst_valid_cmds[i].cmd &&
 230                    (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
 231                     cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
 232                        return false;
 233                }
 234        }
 235
 236        return true;
 237}
 238EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
 239
 240/**
 241 * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
 242 * @cmd: Pointer to the command structure to verify.
 243 *
 244 * Check if the given mail box level is set request and not a get request.
 245 *
 246 * Return: Return true if the command is set_req, else false.
 247 */
 248bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
 249{
 250        int i;
 251
 252        for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
 253                if (cmd->command == isst_cmd_set_reqs[i].cmd &&
 254                    cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
 255                    cmd->parameter == isst_cmd_set_reqs[i].param) {
 256                        return true;
 257                }
 258        }
 259
 260        return false;
 261}
 262EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
 263
 264static int isst_if_get_platform_info(void __user *argp)
 265{
 266        struct isst_if_platform_info info;
 267
 268        info.api_version = ISST_IF_API_VERSION;
 269        info.driver_version = ISST_IF_DRIVER_VERSION;
 270        info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
 271        info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
 272        info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
 273
 274        if (copy_to_user(argp, &info, sizeof(info)))
 275                return -EFAULT;
 276
 277        return 0;
 278}
 279
 280
 281struct isst_if_cpu_info {
 282        /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
 283        int bus_info[2];
 284        struct pci_dev *pci_dev[2];
 285        int punit_cpu_id;
 286        int numa_node;
 287};
 288
 289static struct isst_if_cpu_info *isst_cpu_info;
 290#define ISST_MAX_PCI_DOMAINS    8
 291
 292static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
 293{
 294        struct pci_dev *matched_pci_dev = NULL;
 295        struct pci_dev *pci_dev = NULL;
 296        int no_matches = 0;
 297        int i, bus_number;
 298
 299        if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
 300            cpu >= num_possible_cpus())
 301                return NULL;
 302
 303        bus_number = isst_cpu_info[cpu].bus_info[bus_no];
 304        if (bus_number < 0)
 305                return NULL;
 306
 307        for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
 308                struct pci_dev *_pci_dev;
 309                int node;
 310
 311                _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
 312                if (!_pci_dev)
 313                        continue;
 314
 315                ++no_matches;
 316                if (!matched_pci_dev)
 317                        matched_pci_dev = _pci_dev;
 318
 319                node = dev_to_node(&_pci_dev->dev);
 320                if (node == NUMA_NO_NODE) {
 321                        pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
 322                                cpu, bus_no, dev, fn);
 323                        continue;
 324                }
 325
 326                if (node == isst_cpu_info[cpu].numa_node) {
 327                        pci_dev = _pci_dev;
 328                        break;
 329                }
 330        }
 331
 332        /*
 333         * If there is no numa matched pci_dev, then there can be following cases:
 334         * 1. CONFIG_NUMA is not defined: In this case if there is only single device
 335         *    match, then we don't need numa information. Simply return last match.
 336         *    Othewise return NULL.
 337         * 2. NUMA information is not exposed via _SEG method. In this case it is similar
 338         *    to case 1.
 339         * 3. Numa information doesn't match with CPU numa node and more than one match
 340         *    return NULL.
 341         */
 342        if (!pci_dev && no_matches == 1)
 343                pci_dev = matched_pci_dev;
 344
 345        return pci_dev;
 346}
 347
 348/**
 349 * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
 350 * @cpu: Logical CPU number.
 351 * @bus_number: The bus number assigned by the hardware.
 352 * @dev: The device number assigned by the hardware.
 353 * @fn: The function number assigned by the hardware.
 354 *
 355 * Using cached bus information, find out the PCI device for a bus number,
 356 * device and function.
 357 *
 358 * Return: Return pci_dev pointer or NULL.
 359 */
 360struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
 361{
 362        struct pci_dev *pci_dev;
 363
 364        if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
 365            cpu >= num_possible_cpus())
 366                return NULL;
 367
 368        pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
 369
 370        if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
 371                return pci_dev;
 372
 373        return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
 374}
 375EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
 376
 377static int isst_if_cpu_online(unsigned int cpu)
 378{
 379        u64 data;
 380        int ret;
 381
 382        isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
 383
 384        ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
 385        if (ret) {
 386                /* This is not a fatal error on MSR mailbox only I/F */
 387                isst_cpu_info[cpu].bus_info[0] = -1;
 388                isst_cpu_info[cpu].bus_info[1] = -1;
 389        } else {
 390                isst_cpu_info[cpu].bus_info[0] = data & 0xff;
 391                isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
 392                isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
 393                isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
 394        }
 395
 396        ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
 397        if (ret) {
 398                isst_cpu_info[cpu].punit_cpu_id = -1;
 399                return ret;
 400        }
 401        isst_cpu_info[cpu].punit_cpu_id = data;
 402
 403        isst_restore_msr_local(cpu);
 404
 405        return 0;
 406}
 407
 408static int isst_if_online_id;
 409
 410static int isst_if_cpu_info_init(void)
 411{
 412        int ret;
 413
 414        isst_cpu_info = kcalloc(num_possible_cpus(),
 415                                sizeof(*isst_cpu_info),
 416                                GFP_KERNEL);
 417        if (!isst_cpu_info)
 418                return -ENOMEM;
 419
 420        ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
 421                                "platform/x86/isst-if:online",
 422                                isst_if_cpu_online, NULL);
 423        if (ret < 0) {
 424                kfree(isst_cpu_info);
 425                return ret;
 426        }
 427
 428        isst_if_online_id = ret;
 429
 430        return 0;
 431}
 432
 433static void isst_if_cpu_info_exit(void)
 434{
 435        cpuhp_remove_state(isst_if_online_id);
 436        kfree(isst_cpu_info);
 437};
 438
 439static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
 440{
 441        struct isst_if_cpu_map *cpu_map;
 442
 443        cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
 444        if (cpu_map->logical_cpu >= nr_cpu_ids ||
 445            cpu_map->logical_cpu >= num_possible_cpus())
 446                return -EINVAL;
 447
 448        *write_only = 0;
 449        cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
 450
 451        return 0;
 452}
 453
 454static bool match_punit_msr_white_list(int msr)
 455{
 456        int i;
 457
 458        for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
 459                if (punit_msr_white_list[i] == msr)
 460                        return true;
 461        }
 462
 463        return false;
 464}
 465
 466static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
 467{
 468        struct isst_if_msr_cmd *msr_cmd;
 469        int ret;
 470
 471        msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
 472
 473        if (!match_punit_msr_white_list(msr_cmd->msr))
 474                return -EINVAL;
 475
 476        if (msr_cmd->logical_cpu >= nr_cpu_ids)
 477                return -EINVAL;
 478
 479        if (msr_cmd->read_write) {
 480                if (!capable(CAP_SYS_ADMIN))
 481                        return -EPERM;
 482
 483                ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
 484                                         msr_cmd->msr,
 485                                         msr_cmd->data);
 486                *write_only = 1;
 487                if (!ret && !resume)
 488                        ret = isst_store_cmd(0, msr_cmd->msr,
 489                                             msr_cmd->logical_cpu,
 490                                             0, 0, msr_cmd->data);
 491        } else {
 492                u64 data;
 493
 494                ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
 495                                         msr_cmd->msr, &data);
 496                if (!ret) {
 497                        msr_cmd->data = data;
 498                        *write_only = 0;
 499                }
 500        }
 501
 502
 503        return ret;
 504}
 505
 506static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
 507{
 508        unsigned char __user *ptr;
 509        u32 cmd_count;
 510        u8 *cmd_ptr;
 511        long ret;
 512        int i;
 513
 514        /* Each multi command has u32 command count as the first field */
 515        if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
 516                return -EFAULT;
 517
 518        if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
 519                return -EINVAL;
 520
 521        cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
 522        if (!cmd_ptr)
 523                return -ENOMEM;
 524
 525        /* cb->offset points to start of the command after the command count */
 526        ptr = argp + cb->offset;
 527
 528        for (i = 0; i < cmd_count; ++i) {
 529                int wr_only;
 530
 531                if (signal_pending(current)) {
 532                        ret = -EINTR;
 533                        break;
 534                }
 535
 536                if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
 537                        ret = -EFAULT;
 538                        break;
 539                }
 540
 541                ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
 542                if (ret)
 543                        break;
 544
 545                if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
 546                        ret = -EFAULT;
 547                        break;
 548                }
 549
 550                ptr += cb->cmd_size;
 551        }
 552
 553        kfree(cmd_ptr);
 554
 555        return i ? i : ret;
 556}
 557
 558static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
 559                              unsigned long arg)
 560{
 561        void __user *argp = (void __user *)arg;
 562        struct isst_if_cmd_cb cmd_cb;
 563        struct isst_if_cmd_cb *cb;
 564        long ret = -ENOTTY;
 565
 566        switch (cmd) {
 567        case ISST_IF_GET_PLATFORM_INFO:
 568                ret = isst_if_get_platform_info(argp);
 569                break;
 570        case ISST_IF_GET_PHY_ID:
 571                cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
 572                cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
 573                cmd_cb.cmd_callback = isst_if_proc_phyid_req;
 574                ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
 575                break;
 576        case ISST_IF_IO_CMD:
 577                cb = &punit_callbacks[ISST_IF_DEV_MMIO];
 578                if (cb->registered)
 579                        ret = isst_if_exec_multi_cmd(argp, cb);
 580                break;
 581        case ISST_IF_MBOX_COMMAND:
 582                cb = &punit_callbacks[ISST_IF_DEV_MBOX];
 583                if (cb->registered)
 584                        ret = isst_if_exec_multi_cmd(argp, cb);
 585                break;
 586        case ISST_IF_MSR_COMMAND:
 587                cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
 588                cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
 589                cmd_cb.cmd_callback = isst_if_msr_cmd_req;
 590                ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
 591                break;
 592        default:
 593                break;
 594        }
 595
 596        return ret;
 597}
 598
 599static DEFINE_MUTEX(punit_misc_dev_lock);
 600static int misc_usage_count;
 601static int misc_device_ret;
 602static int misc_device_open;
 603
 604static int isst_if_open(struct inode *inode, struct file *file)
 605{
 606        int i, ret = 0;
 607
 608        /* Fail open, if a module is going away */
 609        mutex_lock(&punit_misc_dev_lock);
 610        for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
 611                struct isst_if_cmd_cb *cb = &punit_callbacks[i];
 612
 613                if (cb->registered && !try_module_get(cb->owner)) {
 614                        ret = -ENODEV;
 615                        break;
 616                }
 617        }
 618        if (ret) {
 619                int j;
 620
 621                for (j = 0; j < i; ++j) {
 622                        struct isst_if_cmd_cb *cb;
 623
 624                        cb = &punit_callbacks[j];
 625                        if (cb->registered)
 626                                module_put(cb->owner);
 627                }
 628        } else {
 629                misc_device_open++;
 630        }
 631        mutex_unlock(&punit_misc_dev_lock);
 632
 633        return ret;
 634}
 635
 636static int isst_if_relase(struct inode *inode, struct file *f)
 637{
 638        int i;
 639
 640        mutex_lock(&punit_misc_dev_lock);
 641        misc_device_open--;
 642        for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
 643                struct isst_if_cmd_cb *cb = &punit_callbacks[i];
 644
 645                if (cb->registered)
 646                        module_put(cb->owner);
 647        }
 648        mutex_unlock(&punit_misc_dev_lock);
 649
 650        return 0;
 651}
 652
 653static const struct file_operations isst_if_char_driver_ops = {
 654        .open = isst_if_open,
 655        .unlocked_ioctl = isst_if_def_ioctl,
 656        .release = isst_if_relase,
 657};
 658
 659static struct miscdevice isst_if_char_driver = {
 660        .minor          = MISC_DYNAMIC_MINOR,
 661        .name           = "isst_interface",
 662        .fops           = &isst_if_char_driver_ops,
 663};
 664
 665/**
 666 * isst_if_cdev_register() - Register callback for IOCTL
 667 * @device_type: The device type this callback handling.
 668 * @cb: Callback structure.
 669 *
 670 * This function registers a callback to device type. On very first call
 671 * it will register a misc device, which is used for user kernel interface.
 672 * Other calls simply increment ref count. Registry will fail, if the user
 673 * already opened misc device for operation. Also if the misc device
 674 * creation failed, then it will not try again and all callers will get
 675 * failure code.
 676 *
 677 * Return: Return the return value from the misc creation device or -EINVAL
 678 * for unsupported device type.
 679 */
 680int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
 681{
 682        if (misc_device_ret)
 683                return misc_device_ret;
 684
 685        if (device_type >= ISST_IF_DEV_MAX)
 686                return -EINVAL;
 687
 688        mutex_lock(&punit_misc_dev_lock);
 689        if (misc_device_open) {
 690                mutex_unlock(&punit_misc_dev_lock);
 691                return -EAGAIN;
 692        }
 693        if (!misc_usage_count) {
 694                int ret;
 695
 696                misc_device_ret = misc_register(&isst_if_char_driver);
 697                if (misc_device_ret)
 698                        goto unlock_exit;
 699
 700                ret = isst_if_cpu_info_init();
 701                if (ret) {
 702                        misc_deregister(&isst_if_char_driver);
 703                        misc_device_ret = ret;
 704                        goto unlock_exit;
 705                }
 706        }
 707        memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
 708        punit_callbacks[device_type].registered = 1;
 709        misc_usage_count++;
 710unlock_exit:
 711        mutex_unlock(&punit_misc_dev_lock);
 712
 713        return misc_device_ret;
 714}
 715EXPORT_SYMBOL_GPL(isst_if_cdev_register);
 716
 717/**
 718 * isst_if_cdev_unregister() - Unregister callback for IOCTL
 719 * @device_type: The device type to unregister.
 720 *
 721 * This function unregisters the previously registered callback. If this
 722 * is the last callback unregistering, then misc device is removed.
 723 *
 724 * Return: None.
 725 */
 726void isst_if_cdev_unregister(int device_type)
 727{
 728        mutex_lock(&punit_misc_dev_lock);
 729        misc_usage_count--;
 730        punit_callbacks[device_type].registered = 0;
 731        if (device_type == ISST_IF_DEV_MBOX)
 732                isst_delete_hash();
 733        if (!misc_usage_count && !misc_device_ret) {
 734                misc_deregister(&isst_if_char_driver);
 735                isst_if_cpu_info_exit();
 736        }
 737        mutex_unlock(&punit_misc_dev_lock);
 738}
 739EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
 740
 741MODULE_LICENSE("GPL v2");
 742