linux/drivers/hwtracing/coresight/coresight-cpu-debug.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2017 Linaro Limited. All rights reserved.
   4 *
   5 * Author: Leo Yan <leo.yan@linaro.org>
   6 */
   7#include <linux/amba/bus.h>
   8#include <linux/coresight.h>
   9#include <linux/cpu.h>
  10#include <linux/debugfs.h>
  11#include <linux/delay.h>
  12#include <linux/device.h>
  13#include <linux/err.h>
  14#include <linux/init.h>
  15#include <linux/io.h>
  16#include <linux/iopoll.h>
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/moduleparam.h>
  20#include <linux/pm_qos.h>
  21#include <linux/slab.h>
  22#include <linux/smp.h>
  23#include <linux/types.h>
  24#include <linux/uaccess.h>
  25
  26#include "coresight-priv.h"
  27
  28#define EDPCSR                          0x0A0
  29#define EDCIDSR                         0x0A4
  30#define EDVIDSR                         0x0A8
  31#define EDPCSR_HI                       0x0AC
  32#define EDOSLAR                         0x300
  33#define EDPRCR                          0x310
  34#define EDPRSR                          0x314
  35#define EDDEVID1                        0xFC4
  36#define EDDEVID                         0xFC8
  37
  38#define EDPCSR_PROHIBITED               0xFFFFFFFF
  39
  40/* bits definition for EDPCSR */
  41#define EDPCSR_THUMB                    BIT(0)
  42#define EDPCSR_ARM_INST_MASK            GENMASK(31, 2)
  43#define EDPCSR_THUMB_INST_MASK          GENMASK(31, 1)
  44
  45/* bits definition for EDPRCR */
  46#define EDPRCR_COREPURQ                 BIT(3)
  47#define EDPRCR_CORENPDRQ                BIT(0)
  48
  49/* bits definition for EDPRSR */
  50#define EDPRSR_DLK                      BIT(6)
  51#define EDPRSR_PU                       BIT(0)
  52
  53/* bits definition for EDVIDSR */
  54#define EDVIDSR_NS                      BIT(31)
  55#define EDVIDSR_E2                      BIT(30)
  56#define EDVIDSR_E3                      BIT(29)
  57#define EDVIDSR_HV                      BIT(28)
  58#define EDVIDSR_VMID                    GENMASK(7, 0)
  59
  60/*
  61 * bits definition for EDDEVID1:PSCROffset
  62 *
  63 * NOTE: armv8 and armv7 have different definition for the register,
  64 * so consolidate the bits definition as below:
  65 *
  66 * 0b0000 - Sample offset applies based on the instruction state, we
  67 *          rely on EDDEVID to check if EDPCSR is implemented or not
  68 * 0b0001 - No offset applies.
  69 * 0b0010 - No offset applies, but do not use in AArch32 mode
  70 *
  71 */
  72#define EDDEVID1_PCSR_OFFSET_MASK       GENMASK(3, 0)
  73#define EDDEVID1_PCSR_OFFSET_INS_SET    (0x0)
  74#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32     (0x2)
  75
  76/* bits definition for EDDEVID */
  77#define EDDEVID_PCSAMPLE_MODE           GENMASK(3, 0)
  78#define EDDEVID_IMPL_EDPCSR             (0x1)
  79#define EDDEVID_IMPL_EDPCSR_EDCIDSR     (0x2)
  80#define EDDEVID_IMPL_FULL               (0x3)
  81
  82#define DEBUG_WAIT_SLEEP                1000
  83#define DEBUG_WAIT_TIMEOUT              32000
  84
  85struct debug_drvdata {
  86        void __iomem    *base;
  87        struct device   *dev;
  88        int             cpu;
  89
  90        bool            edpcsr_present;
  91        bool            edcidsr_present;
  92        bool            edvidsr_present;
  93        bool            pc_has_offset;
  94
  95        u32             edpcsr;
  96        u32             edpcsr_hi;
  97        u32             edprsr;
  98        u32             edvidsr;
  99        u32             edcidsr;
 100};
 101
 102static DEFINE_MUTEX(debug_lock);
 103static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
 104static int debug_count;
 105static struct dentry *debug_debugfs_dir;
 106
 107static bool debug_enable;
 108module_param_named(enable, debug_enable, bool, 0600);
 109MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
 110
 111static void debug_os_unlock(struct debug_drvdata *drvdata)
 112{
 113        /* Unlocks the debug registers */
 114        writel_relaxed(0x0, drvdata->base + EDOSLAR);
 115
 116        /* Make sure the registers are unlocked before accessing */
 117        wmb();
 118}
 119
 120/*
 121 * According to ARM DDI 0487A.k, before access external debug
 122 * registers should firstly check the access permission; if any
 123 * below condition has been met then cannot access debug
 124 * registers to avoid lockup issue:
 125 *
 126 * - CPU power domain is powered off;
 127 * - The OS Double Lock is locked;
 128 *
 129 * By checking EDPRSR can get to know if meet these conditions.
 130 */
 131static bool debug_access_permitted(struct debug_drvdata *drvdata)
 132{
 133        /* CPU is powered off */
 134        if (!(drvdata->edprsr & EDPRSR_PU))
 135                return false;
 136
 137        /* The OS Double Lock is locked */
 138        if (drvdata->edprsr & EDPRSR_DLK)
 139                return false;
 140
 141        return true;
 142}
 143
 144static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
 145{
 146        u32 edprcr;
 147
 148try_again:
 149
 150        /*
 151         * Send request to power management controller and assert
 152         * DBGPWRUPREQ signal; if power management controller has
 153         * sane implementation, it should enable CPU power domain
 154         * in case CPU is in low power state.
 155         */
 156        edprcr = readl_relaxed(drvdata->base + EDPRCR);
 157        edprcr |= EDPRCR_COREPURQ;
 158        writel_relaxed(edprcr, drvdata->base + EDPRCR);
 159
 160        /* Wait for CPU to be powered up (timeout~=32ms) */
 161        if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
 162                        drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
 163                        DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
 164                /*
 165                 * Unfortunately the CPU cannot be powered up, so return
 166                 * back and later has no permission to access other
 167                 * registers. For this case, should disable CPU low power
 168                 * states to ensure CPU power domain is enabled!
 169                 */
 170                dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
 171                        __func__, drvdata->cpu);
 172                return;
 173        }
 174
 175        /*
 176         * At this point the CPU is powered up, so set the no powerdown
 177         * request bit so we don't lose power and emulate power down.
 178         */
 179        edprcr = readl_relaxed(drvdata->base + EDPRCR);
 180        edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
 181        writel_relaxed(edprcr, drvdata->base + EDPRCR);
 182
 183        drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
 184
 185        /* The core power domain got switched off on use, try again */
 186        if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
 187                goto try_again;
 188}
 189
 190static void debug_read_regs(struct debug_drvdata *drvdata)
 191{
 192        u32 save_edprcr;
 193
 194        CS_UNLOCK(drvdata->base);
 195
 196        /* Unlock os lock */
 197        debug_os_unlock(drvdata);
 198
 199        /* Save EDPRCR register */
 200        save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
 201
 202        /*
 203         * Ensure CPU power domain is enabled to let registers
 204         * are accessiable.
 205         */
 206        debug_force_cpu_powered_up(drvdata);
 207
 208        if (!debug_access_permitted(drvdata))
 209                goto out;
 210
 211        drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
 212
 213        /*
 214         * As described in ARM DDI 0487A.k, if the processing
 215         * element (PE) is in debug state, or sample-based
 216         * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
 217         * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
 218         * UNKNOWN state. So directly bail out for this case.
 219         */
 220        if (drvdata->edpcsr == EDPCSR_PROHIBITED)
 221                goto out;
 222
 223        /*
 224         * A read of the EDPCSR normally has the side-effect of
 225         * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
 226         * at this point it's safe to read value from them.
 227         */
 228        if (IS_ENABLED(CONFIG_64BIT))
 229                drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
 230
 231        if (drvdata->edcidsr_present)
 232                drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
 233
 234        if (drvdata->edvidsr_present)
 235                drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
 236
 237out:
 238        /* Restore EDPRCR register */
 239        writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
 240
 241        CS_LOCK(drvdata->base);
 242}
 243
 244#ifdef CONFIG_64BIT
 245static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
 246{
 247        return (unsigned long)drvdata->edpcsr_hi << 32 |
 248               (unsigned long)drvdata->edpcsr;
 249}
 250#else
 251static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
 252{
 253        unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
 254        unsigned long pc;
 255
 256        pc = (unsigned long)drvdata->edpcsr;
 257
 258        if (drvdata->pc_has_offset) {
 259                arm_inst_offset = 8;
 260                thumb_inst_offset = 4;
 261        }
 262
 263        /* Handle thumb instruction */
 264        if (pc & EDPCSR_THUMB) {
 265                pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
 266                return pc;
 267        }
 268
 269        /*
 270         * Handle arm instruction offset, if the arm instruction
 271         * is not 4 byte alignment then it's possible the case
 272         * for implementation defined; keep original value for this
 273         * case and print info for notice.
 274         */
 275        if (pc & BIT(1))
 276                dev_emerg(drvdata->dev,
 277                          "Instruction offset is implementation defined\n");
 278        else
 279                pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
 280
 281        return pc;
 282}
 283#endif
 284
 285static void debug_dump_regs(struct debug_drvdata *drvdata)
 286{
 287        struct device *dev = drvdata->dev;
 288        unsigned long pc;
 289
 290        dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
 291                  drvdata->edprsr,
 292                  drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
 293                  drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
 294
 295        if (!debug_access_permitted(drvdata)) {
 296                dev_emerg(dev, "No permission to access debug registers!\n");
 297                return;
 298        }
 299
 300        if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
 301                dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
 302                return;
 303        }
 304
 305        pc = debug_adjust_pc(drvdata);
 306        dev_emerg(dev, " EDPCSR:  %pS\n", (void *)pc);
 307
 308        if (drvdata->edcidsr_present)
 309                dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
 310
 311        if (drvdata->edvidsr_present)
 312                dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
 313                          drvdata->edvidsr,
 314                          drvdata->edvidsr & EDVIDSR_NS ?
 315                          "Non-secure" : "Secure",
 316                          drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
 317                                (drvdata->edvidsr & EDVIDSR_E2 ?
 318                                 "EL2" : "EL1/0"),
 319                          drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
 320                          drvdata->edvidsr & (u32)EDVIDSR_VMID);
 321}
 322
 323static void debug_init_arch_data(void *info)
 324{
 325        struct debug_drvdata *drvdata = info;
 326        u32 mode, pcsr_offset;
 327        u32 eddevid, eddevid1;
 328
 329        CS_UNLOCK(drvdata->base);
 330
 331        /* Read device info */
 332        eddevid  = readl_relaxed(drvdata->base + EDDEVID);
 333        eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
 334
 335        CS_LOCK(drvdata->base);
 336
 337        /* Parse implementation feature */
 338        mode = eddevid & EDDEVID_PCSAMPLE_MODE;
 339        pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
 340
 341        drvdata->edpcsr_present  = false;
 342        drvdata->edcidsr_present = false;
 343        drvdata->edvidsr_present = false;
 344        drvdata->pc_has_offset   = false;
 345
 346        switch (mode) {
 347        case EDDEVID_IMPL_FULL:
 348                drvdata->edvidsr_present = true;
 349                /* Fall through */
 350        case EDDEVID_IMPL_EDPCSR_EDCIDSR:
 351                drvdata->edcidsr_present = true;
 352                /* Fall through */
 353        case EDDEVID_IMPL_EDPCSR:
 354                /*
 355                 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
 356                 * define if has the offset for PC sampling value; if read
 357                 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
 358                 * module does not sample the instruction set state when
 359                 * armv8 CPU in AArch32 state.
 360                 */
 361                drvdata->edpcsr_present =
 362                        ((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
 363                         (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
 364
 365                drvdata->pc_has_offset =
 366                        (pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
 367                break;
 368        default:
 369                break;
 370        }
 371}
 372
 373/*
 374 * Dump out information on panic.
 375 */
 376static int debug_notifier_call(struct notifier_block *self,
 377                               unsigned long v, void *p)
 378{
 379        int cpu;
 380        struct debug_drvdata *drvdata;
 381
 382        mutex_lock(&debug_lock);
 383
 384        /* Bail out if the functionality is disabled */
 385        if (!debug_enable)
 386                goto skip_dump;
 387
 388        pr_emerg("ARM external debug module:\n");
 389
 390        for_each_possible_cpu(cpu) {
 391                drvdata = per_cpu(debug_drvdata, cpu);
 392                if (!drvdata)
 393                        continue;
 394
 395                dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
 396
 397                debug_read_regs(drvdata);
 398                debug_dump_regs(drvdata);
 399        }
 400
 401skip_dump:
 402        mutex_unlock(&debug_lock);
 403        return 0;
 404}
 405
 406static struct notifier_block debug_notifier = {
 407        .notifier_call = debug_notifier_call,
 408};
 409
 410static int debug_enable_func(void)
 411{
 412        struct debug_drvdata *drvdata;
 413        int cpu, ret = 0;
 414        cpumask_t mask;
 415
 416        /*
 417         * Use cpumask to track which debug power domains have
 418         * been powered on and use it to handle failure case.
 419         */
 420        cpumask_clear(&mask);
 421
 422        for_each_possible_cpu(cpu) {
 423                drvdata = per_cpu(debug_drvdata, cpu);
 424                if (!drvdata)
 425                        continue;
 426
 427                ret = pm_runtime_get_sync(drvdata->dev);
 428                if (ret < 0)
 429                        goto err;
 430                else
 431                        cpumask_set_cpu(cpu, &mask);
 432        }
 433
 434        return 0;
 435
 436err:
 437        /*
 438         * If pm_runtime_get_sync() has failed, need rollback on
 439         * all the other CPUs that have been enabled before that.
 440         */
 441        for_each_cpu(cpu, &mask) {
 442                drvdata = per_cpu(debug_drvdata, cpu);
 443                pm_runtime_put_noidle(drvdata->dev);
 444        }
 445
 446        return ret;
 447}
 448
 449static int debug_disable_func(void)
 450{
 451        struct debug_drvdata *drvdata;
 452        int cpu, ret, err = 0;
 453
 454        /*
 455         * Disable debug power domains, records the error and keep
 456         * circling through all other CPUs when an error has been
 457         * encountered.
 458         */
 459        for_each_possible_cpu(cpu) {
 460                drvdata = per_cpu(debug_drvdata, cpu);
 461                if (!drvdata)
 462                        continue;
 463
 464                ret = pm_runtime_put(drvdata->dev);
 465                if (ret < 0)
 466                        err = ret;
 467        }
 468
 469        return err;
 470}
 471
 472static ssize_t debug_func_knob_write(struct file *f,
 473                const char __user *buf, size_t count, loff_t *ppos)
 474{
 475        u8 val;
 476        int ret;
 477
 478        ret = kstrtou8_from_user(buf, count, 2, &val);
 479        if (ret)
 480                return ret;
 481
 482        mutex_lock(&debug_lock);
 483
 484        if (val == debug_enable)
 485                goto out;
 486
 487        if (val)
 488                ret = debug_enable_func();
 489        else
 490                ret = debug_disable_func();
 491
 492        if (ret) {
 493                pr_err("%s: unable to %s debug function: %d\n",
 494                       __func__, val ? "enable" : "disable", ret);
 495                goto err;
 496        }
 497
 498        debug_enable = val;
 499out:
 500        ret = count;
 501err:
 502        mutex_unlock(&debug_lock);
 503        return ret;
 504}
 505
 506static ssize_t debug_func_knob_read(struct file *f,
 507                char __user *ubuf, size_t count, loff_t *ppos)
 508{
 509        ssize_t ret;
 510        char buf[3];
 511
 512        mutex_lock(&debug_lock);
 513        snprintf(buf, sizeof(buf), "%d\n", debug_enable);
 514        mutex_unlock(&debug_lock);
 515
 516        ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
 517        return ret;
 518}
 519
 520static const struct file_operations debug_func_knob_fops = {
 521        .open   = simple_open,
 522        .read   = debug_func_knob_read,
 523        .write  = debug_func_knob_write,
 524};
 525
 526static int debug_func_init(void)
 527{
 528        int ret;
 529
 530        /* Create debugfs node */
 531        debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
 532        debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
 533                            &debug_func_knob_fops);
 534
 535        /* Register function to be called for panic */
 536        ret = atomic_notifier_chain_register(&panic_notifier_list,
 537                                             &debug_notifier);
 538        if (ret) {
 539                pr_err("%s: unable to register notifier: %d\n",
 540                       __func__, ret);
 541                goto err;
 542        }
 543
 544        return 0;
 545
 546err:
 547        debugfs_remove_recursive(debug_debugfs_dir);
 548        return ret;
 549}
 550
 551static void debug_func_exit(void)
 552{
 553        atomic_notifier_chain_unregister(&panic_notifier_list,
 554                                         &debug_notifier);
 555        debugfs_remove_recursive(debug_debugfs_dir);
 556}
 557
 558static int debug_probe(struct amba_device *adev, const struct amba_id *id)
 559{
 560        void __iomem *base;
 561        struct device *dev = &adev->dev;
 562        struct debug_drvdata *drvdata;
 563        struct resource *res = &adev->res;
 564        int ret;
 565
 566        drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 567        if (!drvdata)
 568                return -ENOMEM;
 569
 570        drvdata->cpu = coresight_get_cpu(dev);
 571        if (drvdata->cpu < 0)
 572                return drvdata->cpu;
 573
 574        if (per_cpu(debug_drvdata, drvdata->cpu)) {
 575                dev_err(dev, "CPU%d drvdata has already been initialized\n",
 576                        drvdata->cpu);
 577                return -EBUSY;
 578        }
 579
 580        drvdata->dev = &adev->dev;
 581        amba_set_drvdata(adev, drvdata);
 582
 583        /* Validity for the resource is already checked by the AMBA core */
 584        base = devm_ioremap_resource(dev, res);
 585        if (IS_ERR(base))
 586                return PTR_ERR(base);
 587
 588        drvdata->base = base;
 589
 590        get_online_cpus();
 591        per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
 592        ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
 593                                       drvdata, 1);
 594        put_online_cpus();
 595
 596        if (ret) {
 597                dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
 598                goto err;
 599        }
 600
 601        if (!drvdata->edpcsr_present) {
 602                dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
 603                        drvdata->cpu);
 604                ret = -ENXIO;
 605                goto err;
 606        }
 607
 608        if (!debug_count++) {
 609                ret = debug_func_init();
 610                if (ret)
 611                        goto err_func_init;
 612        }
 613
 614        mutex_lock(&debug_lock);
 615        /* Turn off debug power domain if debugging is disabled */
 616        if (!debug_enable)
 617                pm_runtime_put(dev);
 618        mutex_unlock(&debug_lock);
 619
 620        dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
 621        return 0;
 622
 623err_func_init:
 624        debug_count--;
 625err:
 626        per_cpu(debug_drvdata, drvdata->cpu) = NULL;
 627        return ret;
 628}
 629
 630static int debug_remove(struct amba_device *adev)
 631{
 632        struct device *dev = &adev->dev;
 633        struct debug_drvdata *drvdata = amba_get_drvdata(adev);
 634
 635        per_cpu(debug_drvdata, drvdata->cpu) = NULL;
 636
 637        mutex_lock(&debug_lock);
 638        /* Turn off debug power domain before rmmod the module */
 639        if (debug_enable)
 640                pm_runtime_put(dev);
 641        mutex_unlock(&debug_lock);
 642
 643        if (!--debug_count)
 644                debug_func_exit();
 645
 646        return 0;
 647}
 648
 649static const struct amba_id debug_ids[] = {
 650        {       /* Debug for Cortex-A53 */
 651                .id     = 0x000bbd03,
 652                .mask   = 0x000fffff,
 653        },
 654        {       /* Debug for Cortex-A57 */
 655                .id     = 0x000bbd07,
 656                .mask   = 0x000fffff,
 657        },
 658        {       /* Debug for Cortex-A72 */
 659                .id     = 0x000bbd08,
 660                .mask   = 0x000fffff,
 661        },
 662        {       /* Debug for Cortex-A73 */
 663                .id     = 0x000bbd09,
 664                .mask   = 0x000fffff,
 665        },
 666        { 0, 0 },
 667};
 668
 669static struct amba_driver debug_driver = {
 670        .drv = {
 671                .name   = "coresight-cpu-debug",
 672                .suppress_bind_attrs = true,
 673        },
 674        .probe          = debug_probe,
 675        .remove         = debug_remove,
 676        .id_table       = debug_ids,
 677};
 678
 679module_amba_driver(debug_driver);
 680
 681MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
 682MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
 683MODULE_LICENSE("GPL");
 684