qemu/target/s390x/excp_helper.c
<<
>>
Prefs
   1/*
   2 * s390x exception / interrupt helpers
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2011 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qapi/error.h"
  23#include "cpu.h"
  24#include "internal.h"
  25#include "qemu/timer.h"
  26#include "exec/exec-all.h"
  27#include "exec/cpu_ldst.h"
  28#include "hw/s390x/ioinst.h"
  29#include "exec/address-spaces.h"
  30#ifndef CONFIG_USER_ONLY
  31#include "sysemu/sysemu.h"
  32#endif
  33
  34/* #define DEBUG_S390 */
  35/* #define DEBUG_S390_STDOUT */
  36
  37#ifdef DEBUG_S390
  38#ifdef DEBUG_S390_STDOUT
  39#define DPRINTF(fmt, ...) \
  40    do { fprintf(stderr, fmt, ## __VA_ARGS__); \
  41         if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
  42#else
  43#define DPRINTF(fmt, ...) \
  44    do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
  45#endif
  46#else
  47#define DPRINTF(fmt, ...) \
  48    do { } while (0)
  49#endif
  50
  51#if defined(CONFIG_USER_ONLY)
  52
  53void s390_cpu_do_interrupt(CPUState *cs)
  54{
  55    cs->exception_index = -1;
  56}
  57
  58int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
  59                              int rw, int mmu_idx)
  60{
  61    S390CPU *cpu = S390_CPU(cs);
  62
  63    trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
  64    /* On real machines this value is dropped into LowMem.  Since this
  65       is userland, simply put this someplace that cpu_loop can find it.  */
  66    cpu->env.__excp_addr = address;
  67    return 1;
  68}
  69
  70#else /* !CONFIG_USER_ONLY */
  71
  72static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
  73{
  74    switch (mmu_idx) {
  75    case MMU_PRIMARY_IDX:
  76        return PSW_ASC_PRIMARY;
  77    case MMU_SECONDARY_IDX:
  78        return PSW_ASC_SECONDARY;
  79    case MMU_HOME_IDX:
  80        return PSW_ASC_HOME;
  81    default:
  82        abort();
  83    }
  84}
  85
  86int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
  87                              int rw, int mmu_idx)
  88{
  89    S390CPU *cpu = S390_CPU(cs);
  90    CPUS390XState *env = &cpu->env;
  91    target_ulong vaddr, raddr;
  92    uint64_t asc;
  93    int prot;
  94
  95    DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
  96            __func__, orig_vaddr, rw, mmu_idx);
  97
  98    vaddr = orig_vaddr;
  99
 100    if (mmu_idx < MMU_REAL_IDX) {
 101        asc = cpu_mmu_idx_to_asc(mmu_idx);
 102        /* 31-Bit mode */
 103        if (!(env->psw.mask & PSW_MASK_64)) {
 104            vaddr &= 0x7fffffff;
 105        }
 106        if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
 107            return 1;
 108        }
 109    } else if (mmu_idx == MMU_REAL_IDX) {
 110        if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
 111            return 1;
 112        }
 113    } else {
 114        abort();
 115    }
 116
 117    /* check out of RAM access */
 118    if (!address_space_access_valid(&address_space_memory, raddr,
 119                                    TARGET_PAGE_SIZE, rw)) {
 120        DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
 121                (uint64_t)raddr, (uint64_t)ram_size);
 122        trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
 123        return 1;
 124    }
 125
 126    qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
 127            __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
 128
 129    tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
 130                 mmu_idx, TARGET_PAGE_SIZE);
 131
 132    return 0;
 133}
 134
 135static void do_program_interrupt(CPUS390XState *env)
 136{
 137    uint64_t mask, addr;
 138    LowCore *lowcore;
 139    int ilen = env->int_pgm_ilen;
 140
 141    if (ilen == ILEN_AUTO) {
 142        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
 143    }
 144    assert(ilen == 2 || ilen == 4 || ilen == 6);
 145
 146    switch (env->int_pgm_code) {
 147    case PGM_PER:
 148        if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
 149            break;
 150        }
 151        /* FALL THROUGH */
 152    case PGM_OPERATION:
 153    case PGM_PRIVILEGED:
 154    case PGM_EXECUTE:
 155    case PGM_PROTECTION:
 156    case PGM_ADDRESSING:
 157    case PGM_SPECIFICATION:
 158    case PGM_DATA:
 159    case PGM_FIXPT_OVERFLOW:
 160    case PGM_FIXPT_DIVIDE:
 161    case PGM_DEC_OVERFLOW:
 162    case PGM_DEC_DIVIDE:
 163    case PGM_HFP_EXP_OVERFLOW:
 164    case PGM_HFP_EXP_UNDERFLOW:
 165    case PGM_HFP_SIGNIFICANCE:
 166    case PGM_HFP_DIVIDE:
 167    case PGM_TRANS_SPEC:
 168    case PGM_SPECIAL_OP:
 169    case PGM_OPERAND:
 170    case PGM_HFP_SQRT:
 171    case PGM_PC_TRANS_SPEC:
 172    case PGM_ALET_SPEC:
 173    case PGM_MONITOR:
 174        /* advance the PSW if our exception is not nullifying */
 175        env->psw.addr += ilen;
 176        break;
 177    }
 178
 179    qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
 180                  __func__, env->int_pgm_code, ilen);
 181
 182    lowcore = cpu_map_lowcore(env);
 183
 184    /* Signal PER events with the exception.  */
 185    if (env->per_perc_atmid) {
 186        env->int_pgm_code |= PGM_PER;
 187        lowcore->per_address = cpu_to_be64(env->per_address);
 188        lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
 189        env->per_perc_atmid = 0;
 190    }
 191
 192    lowcore->pgm_ilen = cpu_to_be16(ilen);
 193    lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
 194    lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 195    lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
 196    mask = be64_to_cpu(lowcore->program_new_psw.mask);
 197    addr = be64_to_cpu(lowcore->program_new_psw.addr);
 198    lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
 199
 200    cpu_unmap_lowcore(lowcore);
 201
 202    DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
 203            env->int_pgm_code, ilen, env->psw.mask,
 204            env->psw.addr);
 205
 206    load_psw(env, mask, addr);
 207}
 208
 209static void do_svc_interrupt(CPUS390XState *env)
 210{
 211    uint64_t mask, addr;
 212    LowCore *lowcore;
 213
 214    lowcore = cpu_map_lowcore(env);
 215
 216    lowcore->svc_code = cpu_to_be16(env->int_svc_code);
 217    lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
 218    lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 219    lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
 220    mask = be64_to_cpu(lowcore->svc_new_psw.mask);
 221    addr = be64_to_cpu(lowcore->svc_new_psw.addr);
 222
 223    cpu_unmap_lowcore(lowcore);
 224
 225    load_psw(env, mask, addr);
 226
 227    /* When a PER event is pending, the PER exception has to happen
 228       immediately after the SERVICE CALL one.  */
 229    if (env->per_perc_atmid) {
 230        env->int_pgm_code = PGM_PER;
 231        env->int_pgm_ilen = env->int_svc_ilen;
 232        do_program_interrupt(env);
 233    }
 234}
 235
 236#define VIRTIO_SUBCODE_64 0x0D00
 237
 238static void do_ext_interrupt(CPUS390XState *env)
 239{
 240    S390CPU *cpu = s390_env_get_cpu(env);
 241    uint64_t mask, addr;
 242    uint16_t cpu_addr;
 243    LowCore *lowcore;
 244
 245    if (!(env->psw.mask & PSW_MASK_EXT)) {
 246        cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
 247    }
 248
 249    lowcore = cpu_map_lowcore(env);
 250
 251    if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
 252        (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
 253        lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
 254        cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
 255        g_assert(cpu_addr < S390_MAX_CPUS);
 256        lowcore->cpu_addr = cpu_to_be16(cpu_addr);
 257        clear_bit(cpu_addr, env->emergency_signals);
 258        if (bitmap_empty(env->emergency_signals, max_cpus)) {
 259            env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
 260        }
 261    } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
 262               (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
 263        lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
 264        lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
 265        env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
 266    } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
 267               (env->cregs[0] & CR0_CKC_SC)) {
 268        lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
 269        lowcore->cpu_addr = 0;
 270        env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
 271    } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
 272               (env->cregs[0] & CR0_CPU_TIMER_SC)) {
 273        lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
 274        lowcore->cpu_addr = 0;
 275        env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
 276    } else if ((env->pending_int & INTERRUPT_EXT_SERVICE) &&
 277               (env->cregs[0] & CR0_SERVICE_SC)) {
 278        /*
 279         * FIXME: floating IRQs should be considered by all CPUs and
 280         *        shuld not get cleared by CPU reset.
 281         */
 282        lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
 283        lowcore->ext_params = cpu_to_be32(env->service_param);
 284        lowcore->cpu_addr = 0;
 285        env->service_param = 0;
 286        env->pending_int &= ~INTERRUPT_EXT_SERVICE;
 287    } else {
 288        g_assert_not_reached();
 289    }
 290
 291    mask = be64_to_cpu(lowcore->external_new_psw.mask);
 292    addr = be64_to_cpu(lowcore->external_new_psw.addr);
 293    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 294    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
 295
 296    cpu_unmap_lowcore(lowcore);
 297
 298    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
 299            env->psw.mask, env->psw.addr);
 300
 301    load_psw(env, mask, addr);
 302}
 303
 304static void do_io_interrupt(CPUS390XState *env)
 305{
 306    S390CPU *cpu = s390_env_get_cpu(env);
 307    LowCore *lowcore;
 308    IOIntQueue *q;
 309    uint8_t isc;
 310    int disable = 1;
 311    int found = 0;
 312
 313    if (!(env->psw.mask & PSW_MASK_IO)) {
 314        cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
 315    }
 316
 317    for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
 318        uint64_t isc_bits;
 319
 320        if (env->io_index[isc] < 0) {
 321            continue;
 322        }
 323        if (env->io_index[isc] >= MAX_IO_QUEUE) {
 324            cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
 325                      isc, env->io_index[isc]);
 326        }
 327
 328        q = &env->io_queue[env->io_index[isc]][isc];
 329        isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
 330        if (!(env->cregs[6] & isc_bits)) {
 331            disable = 0;
 332            continue;
 333        }
 334        if (!found) {
 335            uint64_t mask, addr;
 336
 337            found = 1;
 338            lowcore = cpu_map_lowcore(env);
 339
 340            lowcore->subchannel_id = cpu_to_be16(q->id);
 341            lowcore->subchannel_nr = cpu_to_be16(q->nr);
 342            lowcore->io_int_parm = cpu_to_be32(q->parm);
 343            lowcore->io_int_word = cpu_to_be32(q->word);
 344            lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 345            lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
 346            mask = be64_to_cpu(lowcore->io_new_psw.mask);
 347            addr = be64_to_cpu(lowcore->io_new_psw.addr);
 348
 349            cpu_unmap_lowcore(lowcore);
 350
 351            env->io_index[isc]--;
 352
 353            DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
 354                    env->psw.mask, env->psw.addr);
 355            load_psw(env, mask, addr);
 356        }
 357        if (env->io_index[isc] >= 0) {
 358            disable = 0;
 359        }
 360        continue;
 361    }
 362
 363    if (disable) {
 364        env->pending_int &= ~INTERRUPT_IO;
 365    }
 366
 367}
 368
 369static void do_mchk_interrupt(CPUS390XState *env)
 370{
 371    S390CPU *cpu = s390_env_get_cpu(env);
 372    uint64_t mask, addr;
 373    LowCore *lowcore;
 374    MchkQueue *q;
 375    int i;
 376
 377    if (!(env->psw.mask & PSW_MASK_MCHECK)) {
 378        cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
 379    }
 380
 381    if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
 382        cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
 383    }
 384
 385    q = &env->mchk_queue[env->mchk_index];
 386
 387    if (q->type != 1) {
 388        /* Don't know how to handle this... */
 389        cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
 390    }
 391    if (!(env->cregs[14] & (1 << 28))) {
 392        /* CRW machine checks disabled */
 393        return;
 394    }
 395
 396    lowcore = cpu_map_lowcore(env);
 397
 398    for (i = 0; i < 16; i++) {
 399        lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
 400        lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
 401        lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
 402        lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
 403    }
 404    lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
 405    lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
 406    lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
 407    lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
 408    lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
 409    lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
 410    lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
 411
 412    lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
 413    lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
 414    lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 415    lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
 416    mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
 417    addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
 418
 419    cpu_unmap_lowcore(lowcore);
 420
 421    env->mchk_index--;
 422    if (env->mchk_index == -1) {
 423        env->pending_int &= ~INTERRUPT_MCHK;
 424    }
 425
 426    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
 427            env->psw.mask, env->psw.addr);
 428
 429    load_psw(env, mask, addr);
 430}
 431
 432void s390_cpu_do_interrupt(CPUState *cs)
 433{
 434    S390CPU *cpu = S390_CPU(cs);
 435    CPUS390XState *env = &cpu->env;
 436
 437    qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
 438                  __func__, cs->exception_index, env->psw.addr);
 439
 440    /* handle machine checks */
 441    if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
 442        cs->exception_index = EXCP_MCHK;
 443    }
 444    /* handle external interrupts */
 445    if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
 446        cs->exception_index = EXCP_EXT;
 447    }
 448    /* handle I/O interrupts */
 449    if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
 450        cs->exception_index = EXCP_IO;
 451    }
 452    /* RESTART interrupt */
 453    if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
 454        cs->exception_index = EXCP_RESTART;
 455    }
 456    /* STOP interrupt has least priority */
 457    if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
 458        cs->exception_index = EXCP_STOP;
 459    }
 460
 461    switch (cs->exception_index) {
 462    case EXCP_PGM:
 463        do_program_interrupt(env);
 464        break;
 465    case EXCP_SVC:
 466        do_svc_interrupt(env);
 467        break;
 468    case EXCP_EXT:
 469        do_ext_interrupt(env);
 470        break;
 471    case EXCP_IO:
 472        do_io_interrupt(env);
 473        break;
 474    case EXCP_MCHK:
 475        do_mchk_interrupt(env);
 476        break;
 477    case EXCP_RESTART:
 478        do_restart_interrupt(env);
 479        break;
 480    case EXCP_STOP:
 481        do_stop_interrupt(env);
 482        break;
 483    }
 484
 485    /* WAIT PSW during interrupt injection or STOP interrupt */
 486    if (cs->exception_index == EXCP_HLT) {
 487        /* don't trigger a cpu_loop_exit(), use an interrupt instead */
 488        cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
 489    }
 490    cs->exception_index = -1;
 491
 492    /* we might still have pending interrupts, but not deliverable */
 493    if (!env->pending_int) {
 494        cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
 495    }
 496}
 497
 498bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 499{
 500    if (interrupt_request & CPU_INTERRUPT_HARD) {
 501        S390CPU *cpu = S390_CPU(cs);
 502        CPUS390XState *env = &cpu->env;
 503
 504        if (env->ex_value) {
 505            /* Execution of the target insn is indivisible from
 506               the parent EXECUTE insn.  */
 507            return false;
 508        }
 509        if (s390_cpu_has_int(cpu)) {
 510            s390_cpu_do_interrupt(cs);
 511            return true;
 512        }
 513    }
 514    return false;
 515}
 516
 517void s390x_cpu_debug_excp_handler(CPUState *cs)
 518{
 519    S390CPU *cpu = S390_CPU(cs);
 520    CPUS390XState *env = &cpu->env;
 521    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
 522
 523    if (wp_hit && wp_hit->flags & BP_CPU) {
 524        /* FIXME: When the storage-alteration-space control bit is set,
 525           the exception should only be triggered if the memory access
 526           is done using an address space with the storage-alteration-event
 527           bit set.  We have no way to detect that with the current
 528           watchpoint code.  */
 529        cs->watchpoint_hit = NULL;
 530
 531        env->per_address = env->psw.addr;
 532        env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
 533        /* FIXME: We currently no way to detect the address space used
 534           to trigger the watchpoint.  For now just consider it is the
 535           current default ASC. This turn to be true except when MVCP
 536           and MVCS instrutions are not used.  */
 537        env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
 538
 539        /* Remove all watchpoints to re-execute the code.  A PER exception
 540           will be triggered, it will call load_psw which will recompute
 541           the watchpoints.  */
 542        cpu_watchpoint_remove_all(cs, BP_CPU);
 543        cpu_loop_exit_noexc(cs);
 544    }
 545}
 546
 547/* Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
 548   this is only for the atomic operations, for which we want to raise a
 549   specification exception.  */
 550void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
 551                                   MMUAccessType access_type,
 552                                   int mmu_idx, uintptr_t retaddr)
 553{
 554    S390CPU *cpu = S390_CPU(cs);
 555    CPUS390XState *env = &cpu->env;
 556
 557    if (retaddr) {
 558        cpu_restore_state(cs, retaddr);
 559    }
 560    program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
 561}
 562
 563#endif /* CONFIG_USER_ONLY */
 564