linux/arch/x86/kvm/i8259.c
<<
>>
Prefs
   1/*
   2 * 8259 interrupt controller emulation
   3 *
   4 * Copyright (c) 2003-2004 Fabrice Bellard
   5 * Copyright (c) 2007 Intel Corporation
   6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a copy
   9 * of this software and associated documentation files (the "Software"), to deal
  10 * in the Software without restriction, including without limitation the rights
  11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12 * copies of the Software, and to permit persons to whom the Software is
  13 * furnished to do so, subject to the following conditions:
  14 *
  15 * The above copyright notice and this permission notice shall be included in
  16 * all copies or substantial portions of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24 * THE SOFTWARE.
  25 * Authors:
  26 *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
  27 *   Port from Qemu.
  28 */
  29#include <linux/mm.h>
  30#include <linux/slab.h>
  31#include <linux/bitops.h>
  32#include "irq.h"
  33
  34#include <linux/kvm_host.h>
  35#include "trace.h"
  36
  37#define pr_pic_unimpl(fmt, ...) \
  38        pr_err_ratelimited("kvm: pic: " fmt, ## __VA_ARGS__)
  39
  40static void pic_irq_request(struct kvm *kvm, int level);
  41
  42static void pic_lock(struct kvm_pic *s)
  43        __acquires(&s->lock)
  44{
  45        spin_lock(&s->lock);
  46}
  47
  48static void pic_unlock(struct kvm_pic *s)
  49        __releases(&s->lock)
  50{
  51        bool wakeup = s->wakeup_needed;
  52        struct kvm_vcpu *vcpu;
  53        int i;
  54
  55        s->wakeup_needed = false;
  56
  57        spin_unlock(&s->lock);
  58
  59        if (wakeup) {
  60                kvm_for_each_vcpu(i, vcpu, s->kvm) {
  61                        if (kvm_apic_accept_pic_intr(vcpu)) {
  62                                kvm_make_request(KVM_REQ_EVENT, vcpu);
  63                                kvm_vcpu_kick(vcpu);
  64                                return;
  65                        }
  66                }
  67        }
  68}
  69
  70static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
  71{
  72        s->isr &= ~(1 << irq);
  73        if (s != &s->pics_state->pics[0])
  74                irq += 8;
  75        /*
  76         * We are dropping lock while calling ack notifiers since ack
  77         * notifier callbacks for assigned devices call into PIC recursively.
  78         * Other interrupt may be delivered to PIC while lock is dropped but
  79         * it should be safe since PIC state is already updated at this stage.
  80         */
  81        pic_unlock(s->pics_state);
  82        kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
  83        pic_lock(s->pics_state);
  84}
  85
  86/*
  87 * set irq level. If an edge is detected, then the IRR is set to 1
  88 */
  89static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
  90{
  91        int mask, ret = 1;
  92        mask = 1 << irq;
  93        if (s->elcr & mask)     /* level triggered */
  94                if (level) {
  95                        ret = !(s->irr & mask);
  96                        s->irr |= mask;
  97                        s->last_irr |= mask;
  98                } else {
  99                        s->irr &= ~mask;
 100                        s->last_irr &= ~mask;
 101                }
 102        else    /* edge triggered */
 103                if (level) {
 104                        if ((s->last_irr & mask) == 0) {
 105                                ret = !(s->irr & mask);
 106                                s->irr |= mask;
 107                        }
 108                        s->last_irr |= mask;
 109                } else
 110                        s->last_irr &= ~mask;
 111
 112        return (s->imr & mask) ? -1 : ret;
 113}
 114
 115/*
 116 * return the highest priority found in mask (highest = smallest
 117 * number). Return 8 if no irq
 118 */
 119static inline int get_priority(struct kvm_kpic_state *s, int mask)
 120{
 121        int priority;
 122        if (mask == 0)
 123                return 8;
 124        priority = 0;
 125        while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
 126                priority++;
 127        return priority;
 128}
 129
 130/*
 131 * return the pic wanted interrupt. return -1 if none
 132 */
 133static int pic_get_irq(struct kvm_kpic_state *s)
 134{
 135        int mask, cur_priority, priority;
 136
 137        mask = s->irr & ~s->imr;
 138        priority = get_priority(s, mask);
 139        if (priority == 8)
 140                return -1;
 141        /*
 142         * compute current priority. If special fully nested mode on the
 143         * master, the IRQ coming from the slave is not taken into account
 144         * for the priority computation.
 145         */
 146        mask = s->isr;
 147        if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
 148                mask &= ~(1 << 2);
 149        cur_priority = get_priority(s, mask);
 150        if (priority < cur_priority)
 151                /*
 152                 * higher priority found: an irq should be generated
 153                 */
 154                return (priority + s->priority_add) & 7;
 155        else
 156                return -1;
 157}
 158
 159/*
 160 * raise irq to CPU if necessary. must be called every time the active
 161 * irq may change
 162 */
 163static void pic_update_irq(struct kvm_pic *s)
 164{
 165        int irq2, irq;
 166
 167        irq2 = pic_get_irq(&s->pics[1]);
 168        if (irq2 >= 0) {
 169                /*
 170                 * if irq request by slave pic, signal master PIC
 171                 */
 172                pic_set_irq1(&s->pics[0], 2, 1);
 173                pic_set_irq1(&s->pics[0], 2, 0);
 174        }
 175        irq = pic_get_irq(&s->pics[0]);
 176        pic_irq_request(s->kvm, irq >= 0);
 177}
 178
 179void kvm_pic_update_irq(struct kvm_pic *s)
 180{
 181        pic_lock(s);
 182        pic_update_irq(s);
 183        pic_unlock(s);
 184}
 185
 186int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level)
 187{
 188        int ret, irq_level;
 189
 190        BUG_ON(irq < 0 || irq >= PIC_NUM_PINS);
 191
 192        pic_lock(s);
 193        irq_level = __kvm_irq_line_state(&s->irq_states[irq],
 194                                         irq_source_id, level);
 195        ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level);
 196        pic_update_irq(s);
 197        trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
 198                              s->pics[irq >> 3].imr, ret == 0);
 199        pic_unlock(s);
 200
 201        return ret;
 202}
 203
 204void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id)
 205{
 206        int i;
 207
 208        pic_lock(s);
 209        for (i = 0; i < PIC_NUM_PINS; i++)
 210                __clear_bit(irq_source_id, &s->irq_states[i]);
 211        pic_unlock(s);
 212}
 213
 214/*
 215 * acknowledge interrupt 'irq'
 216 */
 217static inline void pic_intack(struct kvm_kpic_state *s, int irq)
 218{
 219        s->isr |= 1 << irq;
 220        /*
 221         * We don't clear a level sensitive interrupt here
 222         */
 223        if (!(s->elcr & (1 << irq)))
 224                s->irr &= ~(1 << irq);
 225
 226        if (s->auto_eoi) {
 227                if (s->rotate_on_auto_eoi)
 228                        s->priority_add = (irq + 1) & 7;
 229                pic_clear_isr(s, irq);
 230        }
 231
 232}
 233
 234int kvm_pic_read_irq(struct kvm *kvm)
 235{
 236        int irq, irq2, intno;
 237        struct kvm_pic *s = kvm->arch.vpic;
 238
 239        s->output = 0;
 240
 241        pic_lock(s);
 242        irq = pic_get_irq(&s->pics[0]);
 243        if (irq >= 0) {
 244                pic_intack(&s->pics[0], irq);
 245                if (irq == 2) {
 246                        irq2 = pic_get_irq(&s->pics[1]);
 247                        if (irq2 >= 0)
 248                                pic_intack(&s->pics[1], irq2);
 249                        else
 250                                /*
 251                                 * spurious IRQ on slave controller
 252                                 */
 253                                irq2 = 7;
 254                        intno = s->pics[1].irq_base + irq2;
 255                        irq = irq2 + 8;
 256                } else
 257                        intno = s->pics[0].irq_base + irq;
 258        } else {
 259                /*
 260                 * spurious IRQ on host controller
 261                 */
 262                irq = 7;
 263                intno = s->pics[0].irq_base + irq;
 264        }
 265        pic_update_irq(s);
 266        pic_unlock(s);
 267
 268        return intno;
 269}
 270
 271static void kvm_pic_reset(struct kvm_kpic_state *s)
 272{
 273        int irq, i;
 274        struct kvm_vcpu *vcpu;
 275        u8 edge_irr = s->irr & ~s->elcr;
 276        bool found = false;
 277
 278        s->last_irr = 0;
 279        s->irr &= s->elcr;
 280        s->imr = 0;
 281        s->priority_add = 0;
 282        s->special_mask = 0;
 283        s->read_reg_select = 0;
 284        if (!s->init4) {
 285                s->special_fully_nested_mode = 0;
 286                s->auto_eoi = 0;
 287        }
 288        s->init_state = 1;
 289
 290        kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
 291                if (kvm_apic_accept_pic_intr(vcpu)) {
 292                        found = true;
 293                        break;
 294                }
 295
 296
 297        if (!found)
 298                return;
 299
 300        for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
 301                if (edge_irr & (1 << irq))
 302                        pic_clear_isr(s, irq);
 303}
 304
 305static void pic_ioport_write(void *opaque, u32 addr, u32 val)
 306{
 307        struct kvm_kpic_state *s = opaque;
 308        int priority, cmd, irq;
 309
 310        addr &= 1;
 311        if (addr == 0) {
 312                if (val & 0x10) {
 313                        s->init4 = val & 1;
 314                        if (val & 0x02)
 315                                pr_pic_unimpl("single mode not supported");
 316                        if (val & 0x08)
 317                                pr_pic_unimpl(
 318                                                "level sensitive irq not supported");
 319                        kvm_pic_reset(s);
 320                } else if (val & 0x08) {
 321                        if (val & 0x04)
 322                                s->poll = 1;
 323                        if (val & 0x02)
 324                                s->read_reg_select = val & 1;
 325                        if (val & 0x40)
 326                                s->special_mask = (val >> 5) & 1;
 327                } else {
 328                        cmd = val >> 5;
 329                        switch (cmd) {
 330                        case 0:
 331                        case 4:
 332                                s->rotate_on_auto_eoi = cmd >> 2;
 333                                break;
 334                        case 1: /* end of interrupt */
 335                        case 5:
 336                                priority = get_priority(s, s->isr);
 337                                if (priority != 8) {
 338                                        irq = (priority + s->priority_add) & 7;
 339                                        if (cmd == 5)
 340                                                s->priority_add = (irq + 1) & 7;
 341                                        pic_clear_isr(s, irq);
 342                                        pic_update_irq(s->pics_state);
 343                                }
 344                                break;
 345                        case 3:
 346                                irq = val & 7;
 347                                pic_clear_isr(s, irq);
 348                                pic_update_irq(s->pics_state);
 349                                break;
 350                        case 6:
 351                                s->priority_add = (val + 1) & 7;
 352                                pic_update_irq(s->pics_state);
 353                                break;
 354                        case 7:
 355                                irq = val & 7;
 356                                s->priority_add = (irq + 1) & 7;
 357                                pic_clear_isr(s, irq);
 358                                pic_update_irq(s->pics_state);
 359                                break;
 360                        default:
 361                                break;  /* no operation */
 362                        }
 363                }
 364        } else
 365                switch (s->init_state) {
 366                case 0: { /* normal mode */
 367                        u8 imr_diff = s->imr ^ val,
 368                                off = (s == &s->pics_state->pics[0]) ? 0 : 8;
 369                        s->imr = val;
 370                        for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
 371                                if (imr_diff & (1 << irq))
 372                                        kvm_fire_mask_notifiers(
 373                                                s->pics_state->kvm,
 374                                                SELECT_PIC(irq + off),
 375                                                irq + off,
 376                                                !!(s->imr & (1 << irq)));
 377                        pic_update_irq(s->pics_state);
 378                        break;
 379                }
 380                case 1:
 381                        s->irq_base = val & 0xf8;
 382                        s->init_state = 2;
 383                        break;
 384                case 2:
 385                        if (s->init4)
 386                                s->init_state = 3;
 387                        else
 388                                s->init_state = 0;
 389                        break;
 390                case 3:
 391                        s->special_fully_nested_mode = (val >> 4) & 1;
 392                        s->auto_eoi = (val >> 1) & 1;
 393                        s->init_state = 0;
 394                        break;
 395                }
 396}
 397
 398static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
 399{
 400        int ret;
 401
 402        ret = pic_get_irq(s);
 403        if (ret >= 0) {
 404                if (addr1 >> 7) {
 405                        s->pics_state->pics[0].isr &= ~(1 << 2);
 406                        s->pics_state->pics[0].irr &= ~(1 << 2);
 407                }
 408                s->irr &= ~(1 << ret);
 409                pic_clear_isr(s, ret);
 410                if (addr1 >> 7 || ret != 2)
 411                        pic_update_irq(s->pics_state);
 412        } else {
 413                ret = 0x07;
 414                pic_update_irq(s->pics_state);
 415        }
 416
 417        return ret;
 418}
 419
 420static u32 pic_ioport_read(void *opaque, u32 addr)
 421{
 422        struct kvm_kpic_state *s = opaque;
 423        int ret;
 424
 425        if (s->poll) {
 426                ret = pic_poll_read(s, addr);
 427                s->poll = 0;
 428        } else
 429                if ((addr & 1) == 0)
 430                        if (s->read_reg_select)
 431                                ret = s->isr;
 432                        else
 433                                ret = s->irr;
 434                else
 435                        ret = s->imr;
 436        return ret;
 437}
 438
 439static void elcr_ioport_write(void *opaque, u32 addr, u32 val)
 440{
 441        struct kvm_kpic_state *s = opaque;
 442        s->elcr = val & s->elcr_mask;
 443}
 444
 445static u32 elcr_ioport_read(void *opaque, u32 addr1)
 446{
 447        struct kvm_kpic_state *s = opaque;
 448        return s->elcr;
 449}
 450
 451static int picdev_write(struct kvm_pic *s,
 452                         gpa_t addr, int len, const void *val)
 453{
 454        unsigned char data = *(unsigned char *)val;
 455
 456        if (len != 1) {
 457                pr_pic_unimpl("non byte write\n");
 458                return 0;
 459        }
 460        switch (addr) {
 461        case 0x20:
 462        case 0x21:
 463        case 0xa0:
 464        case 0xa1:
 465                pic_lock(s);
 466                pic_ioport_write(&s->pics[addr >> 7], addr, data);
 467                pic_unlock(s);
 468                break;
 469        case 0x4d0:
 470        case 0x4d1:
 471                pic_lock(s);
 472                elcr_ioport_write(&s->pics[addr & 1], addr, data);
 473                pic_unlock(s);
 474                break;
 475        default:
 476                return -EOPNOTSUPP;
 477        }
 478        return 0;
 479}
 480
 481static int picdev_read(struct kvm_pic *s,
 482                       gpa_t addr, int len, void *val)
 483{
 484        unsigned char *data = (unsigned char *)val;
 485
 486        if (len != 1) {
 487                memset(val, 0, len);
 488                pr_pic_unimpl("non byte read\n");
 489                return 0;
 490        }
 491        switch (addr) {
 492        case 0x20:
 493        case 0x21:
 494        case 0xa0:
 495        case 0xa1:
 496                pic_lock(s);
 497                *data = pic_ioport_read(&s->pics[addr >> 7], addr);
 498                pic_unlock(s);
 499                break;
 500        case 0x4d0:
 501        case 0x4d1:
 502                pic_lock(s);
 503                *data = elcr_ioport_read(&s->pics[addr & 1], addr);
 504                pic_unlock(s);
 505                break;
 506        default:
 507                return -EOPNOTSUPP;
 508        }
 509        return 0;
 510}
 511
 512static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 513                               gpa_t addr, int len, const void *val)
 514{
 515        return picdev_write(container_of(dev, struct kvm_pic, dev_master),
 516                            addr, len, val);
 517}
 518
 519static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 520                              gpa_t addr, int len, void *val)
 521{
 522        return picdev_read(container_of(dev, struct kvm_pic, dev_master),
 523                            addr, len, val);
 524}
 525
 526static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 527                              gpa_t addr, int len, const void *val)
 528{
 529        return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
 530                            addr, len, val);
 531}
 532
 533static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 534                             gpa_t addr, int len, void *val)
 535{
 536        return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
 537                            addr, len, val);
 538}
 539
 540static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 541                             gpa_t addr, int len, const void *val)
 542{
 543        return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
 544                            addr, len, val);
 545}
 546
 547static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 548                            gpa_t addr, int len, void *val)
 549{
 550        return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
 551                            addr, len, val);
 552}
 553
 554/*
 555 * callback when PIC0 irq status changed
 556 */
 557static void pic_irq_request(struct kvm *kvm, int level)
 558{
 559        struct kvm_pic *s = kvm->arch.vpic;
 560
 561        if (!s->output)
 562                s->wakeup_needed = true;
 563        s->output = level;
 564}
 565
 566static const struct kvm_io_device_ops picdev_master_ops = {
 567        .read     = picdev_master_read,
 568        .write    = picdev_master_write,
 569};
 570
 571static const struct kvm_io_device_ops picdev_slave_ops = {
 572        .read     = picdev_slave_read,
 573        .write    = picdev_slave_write,
 574};
 575
 576static const struct kvm_io_device_ops picdev_eclr_ops = {
 577        .read     = picdev_eclr_read,
 578        .write    = picdev_eclr_write,
 579};
 580
 581int kvm_pic_init(struct kvm *kvm)
 582{
 583        struct kvm_pic *s;
 584        int ret;
 585
 586        s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL_ACCOUNT);
 587        if (!s)
 588                return -ENOMEM;
 589        spin_lock_init(&s->lock);
 590        s->kvm = kvm;
 591        s->pics[0].elcr_mask = 0xf8;
 592        s->pics[1].elcr_mask = 0xde;
 593        s->pics[0].pics_state = s;
 594        s->pics[1].pics_state = s;
 595
 596        /*
 597         * Initialize PIO device
 598         */
 599        kvm_iodevice_init(&s->dev_master, &picdev_master_ops);
 600        kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops);
 601        kvm_iodevice_init(&s->dev_eclr, &picdev_eclr_ops);
 602        mutex_lock(&kvm->slots_lock);
 603        ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2,
 604                                      &s->dev_master);
 605        if (ret < 0)
 606                goto fail_unlock;
 607
 608        ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave);
 609        if (ret < 0)
 610                goto fail_unreg_2;
 611
 612        ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_eclr);
 613        if (ret < 0)
 614                goto fail_unreg_1;
 615
 616        mutex_unlock(&kvm->slots_lock);
 617
 618        kvm->arch.vpic = s;
 619
 620        return 0;
 621
 622fail_unreg_1:
 623        kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave);
 624
 625fail_unreg_2:
 626        kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master);
 627
 628fail_unlock:
 629        mutex_unlock(&kvm->slots_lock);
 630
 631        kfree(s);
 632
 633        return ret;
 634}
 635
 636void kvm_pic_destroy(struct kvm *kvm)
 637{
 638        struct kvm_pic *vpic = kvm->arch.vpic;
 639
 640        if (!vpic)
 641                return;
 642
 643        mutex_lock(&kvm->slots_lock);
 644        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
 645        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
 646        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
 647        mutex_unlock(&kvm->slots_lock);
 648
 649        kvm->arch.vpic = NULL;
 650        kfree(vpic);
 651}
 652