linux/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Intel Speed Select Interface: Common functions
   4 * Copyright (c) 2019, Intel Corporation.
   5 * All rights reserved.
   6 *
   7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
   8 */
   9
  10#include <linux/cpufeature.h>
  11#include <linux/cpuhotplug.h>
  12#include <linux/fs.h>
  13#include <linux/hashtable.h>
  14#include <linux/miscdevice.h>
  15#include <linux/module.h>
  16#include <linux/pci.h>
  17#include <linux/sched/signal.h>
  18#include <linux/slab.h>
  19#include <linux/uaccess.h>
  20#include <uapi/linux/isst_if.h>
  21
  22#include "isst_if_common.h"
  23
  24#define MSR_THREAD_ID_INFO      0x53
  25#define MSR_CPU_BUS_NUMBER      0x128
  26
  27static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
  28
  29static int punit_msr_white_list[] = {
  30        MSR_TURBO_RATIO_LIMIT,
  31        MSR_CONFIG_TDP_CONTROL,
  32        MSR_TURBO_RATIO_LIMIT1,
  33        MSR_TURBO_RATIO_LIMIT2,
  34};
  35
  36struct isst_valid_cmd_ranges {
  37        u16 cmd;
  38        u16 sub_cmd_beg;
  39        u16 sub_cmd_end;
  40};
  41
  42struct isst_cmd_set_req_type {
  43        u16 cmd;
  44        u16 sub_cmd;
  45        u16 param;
  46};
  47
  48static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
  49        {0xD0, 0x00, 0x03},
  50        {0x7F, 0x00, 0x0B},
  51        {0x7F, 0x10, 0x12},
  52        {0x7F, 0x20, 0x23},
  53        {0x94, 0x03, 0x03},
  54        {0x95, 0x03, 0x03},
  55};
  56
  57static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
  58        {0xD0, 0x00, 0x08},
  59        {0xD0, 0x01, 0x08},
  60        {0xD0, 0x02, 0x08},
  61        {0xD0, 0x03, 0x08},
  62        {0x7F, 0x02, 0x00},
  63        {0x7F, 0x08, 0x00},
  64        {0x95, 0x03, 0x03},
  65};
  66
  67struct isst_cmd {
  68        struct hlist_node hnode;
  69        u64 data;
  70        u32 cmd;
  71        int cpu;
  72        int mbox_cmd_type;
  73        u32 param;
  74};
  75
  76static DECLARE_HASHTABLE(isst_hash, 8);
  77static DEFINE_MUTEX(isst_hash_lock);
  78
  79static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
  80                              u32 data)
  81{
  82        struct isst_cmd *sst_cmd;
  83
  84        sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
  85        if (!sst_cmd)
  86                return -ENOMEM;
  87
  88        sst_cmd->cpu = cpu;
  89        sst_cmd->cmd = cmd;
  90        sst_cmd->mbox_cmd_type = mbox_cmd_type;
  91        sst_cmd->param = param;
  92        sst_cmd->data = data;
  93
  94        hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
  95
  96        return 0;
  97}
  98
  99static void isst_delete_hash(void)
 100{
 101        struct isst_cmd *sst_cmd;
 102        struct hlist_node *tmp;
 103        int i;
 104
 105        hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
 106                hash_del(&sst_cmd->hnode);
 107                kfree(sst_cmd);
 108        }
 109}
 110
 111/**
 112 * isst_store_cmd() - Store command to a hash table
 113 * @cmd: Mailbox command.
 114 * @sub_cmd: Mailbox sub-command or MSR id.
 115 * @mbox_cmd_type: Mailbox or MSR command.
 116 * @param: Mailbox parameter.
 117 * @data: Mailbox request data or MSR data.
 118 *
 119 * Stores the command to a hash table if there is no such command already
 120 * stored. If already stored update the latest parameter and data for the
 121 * command.
 122 *
 123 * Return: Return result of store to hash table, 0 for success, others for
 124 * failure.
 125 */
 126int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
 127                   u32 param, u64 data)
 128{
 129        struct isst_cmd *sst_cmd;
 130        int full_cmd, ret;
 131
 132        full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
 133        full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
 134        mutex_lock(&isst_hash_lock);
 135        hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
 136                if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
 137                    sst_cmd->mbox_cmd_type == mbox_cmd_type) {
 138                        sst_cmd->param = param;
 139                        sst_cmd->data = data;
 140                        mutex_unlock(&isst_hash_lock);
 141                        return 0;
 142                }
 143        }
 144
 145        ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
 146        mutex_unlock(&isst_hash_lock);
 147
 148        return ret;
 149}
 150EXPORT_SYMBOL_GPL(isst_store_cmd);
 151
 152static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
 153                                     struct isst_cmd *sst_cmd)
 154{
 155        struct isst_if_mbox_cmd mbox_cmd;
 156        int wr_only;
 157
 158        mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
 159        mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
 160        mbox_cmd.parameter = sst_cmd->param;
 161        mbox_cmd.req_data = sst_cmd->data;
 162        mbox_cmd.logical_cpu = sst_cmd->cpu;
 163        (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
 164}
 165
 166/**
 167 * isst_resume_common() - Process Resume request
 168 *
 169 * On resume replay all mailbox commands and MSRs.
 170 *
 171 * Return: None.
 172 */
 173void isst_resume_common(void)
 174{
 175        struct isst_cmd *sst_cmd;
 176        int i;
 177
 178        hash_for_each(isst_hash, i, sst_cmd, hnode) {
 179                struct isst_if_cmd_cb *cb;
 180
 181                if (sst_cmd->mbox_cmd_type) {
 182                        cb = &punit_callbacks[ISST_IF_DEV_MBOX];
 183                        if (cb->registered)
 184                                isst_mbox_resume_command(cb, sst_cmd);
 185                } else {
 186                        wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
 187                                           sst_cmd->data);
 188                }
 189        }
 190}
 191EXPORT_SYMBOL_GPL(isst_resume_common);
 192
 193static void isst_restore_msr_local(int cpu)
 194{
 195        struct isst_cmd *sst_cmd;
 196        int i;
 197
 198        mutex_lock(&isst_hash_lock);
 199        for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
 200                if (!punit_msr_white_list[i])
 201                        break;
 202
 203                hash_for_each_possible(isst_hash, sst_cmd, hnode,
 204                                       punit_msr_white_list[i]) {
 205                        if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
 206                                wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
 207                }
 208        }
 209        mutex_unlock(&isst_hash_lock);
 210}
 211
 212/**
 213 * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
 214 * @cmd: Pointer to the command structure to verify.
 215 *
 216 * Invalid command to PUNIT to may result in instability of the platform.
 217 * This function has a whitelist of commands, which are allowed.
 218 *
 219 * Return: Return true if the command is invalid, else false.
 220 */
 221bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
 222{
 223        int i;
 224
 225        if (cmd->logical_cpu >= nr_cpu_ids)
 226                return true;
 227
 228        for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
 229                if (cmd->command == isst_valid_cmds[i].cmd &&
 230                    (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
 231                     cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
 232                        return false;
 233                }
 234        }
 235
 236        return true;
 237}
 238EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
 239
 240/**
 241 * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
 242 * @cmd: Pointer to the command structure to verify.
 243 *
 244 * Check if the given mail box level is set request and not a get request.
 245 *
 246 * Return: Return true if the command is set_req, else false.
 247 */
 248bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
 249{
 250        int i;
 251
 252        for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
 253                if (cmd->command == isst_cmd_set_reqs[i].cmd &&
 254                    cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
 255                    cmd->parameter == isst_cmd_set_reqs[i].param) {
 256                        return true;
 257                }
 258        }
 259
 260        return false;
 261}
 262EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
 263
 264static int isst_if_get_platform_info(void __user *argp)
 265{
 266        struct isst_if_platform_info info;
 267
 268        info.api_version = ISST_IF_API_VERSION,
 269        info.driver_version = ISST_IF_DRIVER_VERSION,
 270        info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT,
 271        info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
 272        info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
 273
 274        if (copy_to_user(argp, &info, sizeof(info)))
 275                return -EFAULT;
 276
 277        return 0;
 278}
 279
 280
 281struct isst_if_cpu_info {
 282        /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
 283        int bus_info[2];
 284        struct pci_dev *pci_dev[2];
 285        int punit_cpu_id;
 286        int numa_node;
 287};
 288
 289static struct isst_if_cpu_info *isst_cpu_info;
 290#define ISST_MAX_PCI_DOMAINS    8
 291
 292static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
 293{
 294        struct pci_dev *matched_pci_dev = NULL;
 295        struct pci_dev *pci_dev = NULL;
 296        int no_matches = 0;
 297        int i, bus_number;
 298
 299        if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
 300            cpu >= num_possible_cpus())
 301                return NULL;
 302
 303        bus_number = isst_cpu_info[cpu].bus_info[bus_no];
 304        if (bus_number < 0)
 305                return NULL;
 306
 307        for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
 308                struct pci_dev *_pci_dev;
 309                int node;
 310
 311                _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
 312                if (!_pci_dev)
 313                        continue;
 314
 315                ++no_matches;
 316                if (!matched_pci_dev)
 317                        matched_pci_dev = _pci_dev;
 318
 319                node = dev_to_node(&_pci_dev->dev);
 320                if (node == NUMA_NO_NODE) {
 321                        pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
 322                                cpu, bus_no, dev, fn);
 323                        continue;
 324                }
 325
 326                if (node == isst_cpu_info[cpu].numa_node) {
 327                        pci_dev = _pci_dev;
 328                        break;
 329                }
 330        }
 331
 332        /*
 333         * If there is no numa matched pci_dev, then there can be following cases:
 334         * 1. CONFIG_NUMA is not defined: In this case if there is only single device
 335         *    match, then we don't need numa information. Simply return last match.
 336         *    Othewise return NULL.
 337         * 2. NUMA information is not exposed via _SEG method. In this case it is similar
 338         *    to case 1.
 339         * 3. Numa information doesn't match with CPU numa node and more than one match
 340         *    return NULL.
 341         */
 342        if (!pci_dev && no_matches == 1)
 343                pci_dev = matched_pci_dev;
 344
 345        return pci_dev;
 346}
 347
 348/**
 349 * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
 350 * @cpu: Logical CPU number.
 351 * @bus_number: The bus number assigned by the hardware.
 352 * @dev: The device number assigned by the hardware.
 353 * @fn: The function number assigned by the hardware.
 354 *
 355 * Using cached bus information, find out the PCI device for a bus number,
 356 * device and function.
 357 *
 358 * Return: Return pci_dev pointer or NULL.
 359 */
 360struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
 361{
 362        struct pci_dev *pci_dev;
 363
 364        if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
 365            cpu >= num_possible_cpus())
 366                return NULL;
 367
 368        pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
 369
 370        if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
 371                return pci_dev;
 372
 373        return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
 374}
 375EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
 376
 377static int isst_if_cpu_online(unsigned int cpu)
 378{
 379        u64 data;
 380        int ret;
 381
 382        ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
 383        if (ret) {
 384                /* This is not a fatal error on MSR mailbox only I/F */
 385                isst_cpu_info[cpu].bus_info[0] = -1;
 386                isst_cpu_info[cpu].bus_info[1] = -1;
 387        } else {
 388                isst_cpu_info[cpu].bus_info[0] = data & 0xff;
 389                isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
 390                isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
 391                isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
 392        }
 393
 394        ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
 395        if (ret) {
 396                isst_cpu_info[cpu].punit_cpu_id = -1;
 397                return ret;
 398        }
 399        isst_cpu_info[cpu].punit_cpu_id = data;
 400        isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
 401
 402        isst_restore_msr_local(cpu);
 403
 404        return 0;
 405}
 406
 407static int isst_if_online_id;
 408
 409static int isst_if_cpu_info_init(void)
 410{
 411        int ret;
 412
 413        isst_cpu_info = kcalloc(num_possible_cpus(),
 414                                sizeof(*isst_cpu_info),
 415                                GFP_KERNEL);
 416        if (!isst_cpu_info)
 417                return -ENOMEM;
 418
 419        ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
 420                                "platform/x86/isst-if:online",
 421                                isst_if_cpu_online, NULL);
 422        if (ret < 0) {
 423                kfree(isst_cpu_info);
 424                return ret;
 425        }
 426
 427        isst_if_online_id = ret;
 428
 429        return 0;
 430}
 431
 432static void isst_if_cpu_info_exit(void)
 433{
 434        cpuhp_remove_state(isst_if_online_id);
 435        kfree(isst_cpu_info);
 436};
 437
 438static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
 439{
 440        struct isst_if_cpu_map *cpu_map;
 441
 442        cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
 443        if (cpu_map->logical_cpu >= nr_cpu_ids ||
 444            cpu_map->logical_cpu >= num_possible_cpus())
 445                return -EINVAL;
 446
 447        *write_only = 0;
 448        cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
 449
 450        return 0;
 451}
 452
 453static bool match_punit_msr_white_list(int msr)
 454{
 455        int i;
 456
 457        for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
 458                if (punit_msr_white_list[i] == msr)
 459                        return true;
 460        }
 461
 462        return false;
 463}
 464
 465static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
 466{
 467        struct isst_if_msr_cmd *msr_cmd;
 468        int ret;
 469
 470        msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
 471
 472        if (!match_punit_msr_white_list(msr_cmd->msr))
 473                return -EINVAL;
 474
 475        if (msr_cmd->logical_cpu >= nr_cpu_ids)
 476                return -EINVAL;
 477
 478        if (msr_cmd->read_write) {
 479                if (!capable(CAP_SYS_ADMIN))
 480                        return -EPERM;
 481
 482                ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
 483                                         msr_cmd->msr,
 484                                         msr_cmd->data);
 485                *write_only = 1;
 486                if (!ret && !resume)
 487                        ret = isst_store_cmd(0, msr_cmd->msr,
 488                                             msr_cmd->logical_cpu,
 489                                             0, 0, msr_cmd->data);
 490        } else {
 491                u64 data;
 492
 493                ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
 494                                         msr_cmd->msr, &data);
 495                if (!ret) {
 496                        msr_cmd->data = data;
 497                        *write_only = 0;
 498                }
 499        }
 500
 501
 502        return ret;
 503}
 504
 505static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
 506{
 507        unsigned char __user *ptr;
 508        u32 cmd_count;
 509        u8 *cmd_ptr;
 510        long ret;
 511        int i;
 512
 513        /* Each multi command has u32 command count as the first field */
 514        if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
 515                return -EFAULT;
 516
 517        if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
 518                return -EINVAL;
 519
 520        cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
 521        if (!cmd_ptr)
 522                return -ENOMEM;
 523
 524        /* cb->offset points to start of the command after the command count */
 525        ptr = argp + cb->offset;
 526
 527        for (i = 0; i < cmd_count; ++i) {
 528                int wr_only;
 529
 530                if (signal_pending(current)) {
 531                        ret = -EINTR;
 532                        break;
 533                }
 534
 535                if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
 536                        ret = -EFAULT;
 537                        break;
 538                }
 539
 540                ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
 541                if (ret)
 542                        break;
 543
 544                if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
 545                        ret = -EFAULT;
 546                        break;
 547                }
 548
 549                ptr += cb->cmd_size;
 550        }
 551
 552        kfree(cmd_ptr);
 553
 554        return i ? i : ret;
 555}
 556
 557static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
 558                              unsigned long arg)
 559{
 560        void __user *argp = (void __user *)arg;
 561        struct isst_if_cmd_cb cmd_cb;
 562        struct isst_if_cmd_cb *cb;
 563        long ret = -ENOTTY;
 564
 565        switch (cmd) {
 566        case ISST_IF_GET_PLATFORM_INFO:
 567                ret = isst_if_get_platform_info(argp);
 568                break;
 569        case ISST_IF_GET_PHY_ID:
 570                cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
 571                cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
 572                cmd_cb.cmd_callback = isst_if_proc_phyid_req;
 573                ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
 574                break;
 575        case ISST_IF_IO_CMD:
 576                cb = &punit_callbacks[ISST_IF_DEV_MMIO];
 577                if (cb->registered)
 578                        ret = isst_if_exec_multi_cmd(argp, cb);
 579                break;
 580        case ISST_IF_MBOX_COMMAND:
 581                cb = &punit_callbacks[ISST_IF_DEV_MBOX];
 582                if (cb->registered)
 583                        ret = isst_if_exec_multi_cmd(argp, cb);
 584                break;
 585        case ISST_IF_MSR_COMMAND:
 586                cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
 587                cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
 588                cmd_cb.cmd_callback = isst_if_msr_cmd_req;
 589                ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
 590                break;
 591        default:
 592                break;
 593        }
 594
 595        return ret;
 596}
 597
 598static DEFINE_MUTEX(punit_misc_dev_lock);
 599static int misc_usage_count;
 600static int misc_device_ret;
 601static int misc_device_open;
 602
 603static int isst_if_open(struct inode *inode, struct file *file)
 604{
 605        int i, ret = 0;
 606
 607        /* Fail open, if a module is going away */
 608        mutex_lock(&punit_misc_dev_lock);
 609        for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
 610                struct isst_if_cmd_cb *cb = &punit_callbacks[i];
 611
 612                if (cb->registered && !try_module_get(cb->owner)) {
 613                        ret = -ENODEV;
 614                        break;
 615                }
 616        }
 617        if (ret) {
 618                int j;
 619
 620                for (j = 0; j < i; ++j) {
 621                        struct isst_if_cmd_cb *cb;
 622
 623                        cb = &punit_callbacks[j];
 624                        if (cb->registered)
 625                                module_put(cb->owner);
 626                }
 627        } else {
 628                misc_device_open++;
 629        }
 630        mutex_unlock(&punit_misc_dev_lock);
 631
 632        return ret;
 633}
 634
 635static int isst_if_relase(struct inode *inode, struct file *f)
 636{
 637        int i;
 638
 639        mutex_lock(&punit_misc_dev_lock);
 640        misc_device_open--;
 641        for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
 642                struct isst_if_cmd_cb *cb = &punit_callbacks[i];
 643
 644                if (cb->registered)
 645                        module_put(cb->owner);
 646        }
 647        mutex_unlock(&punit_misc_dev_lock);
 648
 649        return 0;
 650}
 651
 652static const struct file_operations isst_if_char_driver_ops = {
 653        .open = isst_if_open,
 654        .unlocked_ioctl = isst_if_def_ioctl,
 655        .release = isst_if_relase,
 656};
 657
 658static struct miscdevice isst_if_char_driver = {
 659        .minor          = MISC_DYNAMIC_MINOR,
 660        .name           = "isst_interface",
 661        .fops           = &isst_if_char_driver_ops,
 662};
 663
 664/**
 665 * isst_if_cdev_register() - Register callback for IOCTL
 666 * @device_type: The device type this callback handling.
 667 * @cb: Callback structure.
 668 *
 669 * This function registers a callback to device type. On very first call
 670 * it will register a misc device, which is used for user kernel interface.
 671 * Other calls simply increment ref count. Registry will fail, if the user
 672 * already opened misc device for operation. Also if the misc device
 673 * creation failed, then it will not try again and all callers will get
 674 * failure code.
 675 *
 676 * Return: Return the return value from the misc creation device or -EINVAL
 677 * for unsupported device type.
 678 */
 679int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
 680{
 681        if (misc_device_ret)
 682                return misc_device_ret;
 683
 684        if (device_type >= ISST_IF_DEV_MAX)
 685                return -EINVAL;
 686
 687        mutex_lock(&punit_misc_dev_lock);
 688        if (misc_device_open) {
 689                mutex_unlock(&punit_misc_dev_lock);
 690                return -EAGAIN;
 691        }
 692        if (!misc_usage_count) {
 693                int ret;
 694
 695                misc_device_ret = misc_register(&isst_if_char_driver);
 696                if (misc_device_ret)
 697                        goto unlock_exit;
 698
 699                ret = isst_if_cpu_info_init();
 700                if (ret) {
 701                        misc_deregister(&isst_if_char_driver);
 702                        misc_device_ret = ret;
 703                        goto unlock_exit;
 704                }
 705        }
 706        memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
 707        punit_callbacks[device_type].registered = 1;
 708        misc_usage_count++;
 709unlock_exit:
 710        mutex_unlock(&punit_misc_dev_lock);
 711
 712        return misc_device_ret;
 713}
 714EXPORT_SYMBOL_GPL(isst_if_cdev_register);
 715
 716/**
 717 * isst_if_cdev_unregister() - Unregister callback for IOCTL
 718 * @device_type: The device type to unregister.
 719 *
 720 * This function unregisters the previously registered callback. If this
 721 * is the last callback unregistering, then misc device is removed.
 722 *
 723 * Return: None.
 724 */
 725void isst_if_cdev_unregister(int device_type)
 726{
 727        mutex_lock(&punit_misc_dev_lock);
 728        misc_usage_count--;
 729        punit_callbacks[device_type].registered = 0;
 730        if (device_type == ISST_IF_DEV_MBOX)
 731                isst_delete_hash();
 732        if (!misc_usage_count && !misc_device_ret) {
 733                misc_deregister(&isst_if_char_driver);
 734                isst_if_cpu_info_exit();
 735        }
 736        mutex_unlock(&punit_misc_dev_lock);
 737}
 738EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
 739
 740MODULE_LICENSE("GPL v2");
 741