linux/drivers/cpufreq/qcom-cpufreq-hw.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
   4 */
   5
   6#include <linux/bitfield.h>
   7#include <linux/cpufreq.h>
   8#include <linux/init.h>
   9#include <linux/interconnect.h>
  10#include <linux/interrupt.h>
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/of_address.h>
  14#include <linux/of_platform.h>
  15#include <linux/pm_opp.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18
  19#define LUT_MAX_ENTRIES                 40U
  20#define LUT_SRC                         GENMASK(31, 30)
  21#define LUT_L_VAL                       GENMASK(7, 0)
  22#define LUT_CORE_COUNT                  GENMASK(18, 16)
  23#define LUT_VOLT                        GENMASK(11, 0)
  24#define CLK_HW_DIV                      2
  25#define LUT_TURBO_IND                   1
  26
  27#define HZ_PER_KHZ                      1000
  28
  29struct qcom_cpufreq_soc_data {
  30        u32 reg_enable;
  31        u32 reg_freq_lut;
  32        u32 reg_volt_lut;
  33        u32 reg_current_vote;
  34        u32 reg_perf_state;
  35        u8 lut_row_size;
  36};
  37
  38struct qcom_cpufreq_data {
  39        void __iomem *base;
  40        struct resource *res;
  41        const struct qcom_cpufreq_soc_data *soc_data;
  42
  43        /*
  44         * Mutex to synchronize between de-init sequence and re-starting LMh
  45         * polling/interrupts
  46         */
  47        struct mutex throttle_lock;
  48        int throttle_irq;
  49        char irq_name[15];
  50        bool cancel_throttle;
  51        struct delayed_work throttle_work;
  52        struct cpufreq_policy *policy;
  53};
  54
  55static unsigned long cpu_hw_rate, xo_rate;
  56static bool icc_scaling_enabled;
  57
  58static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
  59                               unsigned long freq_khz)
  60{
  61        unsigned long freq_hz = freq_khz * 1000;
  62        struct dev_pm_opp *opp;
  63        struct device *dev;
  64        int ret;
  65
  66        dev = get_cpu_device(policy->cpu);
  67        if (!dev)
  68                return -ENODEV;
  69
  70        opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
  71        if (IS_ERR(opp))
  72                return PTR_ERR(opp);
  73
  74        ret = dev_pm_opp_set_opp(dev, opp);
  75        dev_pm_opp_put(opp);
  76        return ret;
  77}
  78
  79static int qcom_cpufreq_update_opp(struct device *cpu_dev,
  80                                   unsigned long freq_khz,
  81                                   unsigned long volt)
  82{
  83        unsigned long freq_hz = freq_khz * 1000;
  84        int ret;
  85
  86        /* Skip voltage update if the opp table is not available */
  87        if (!icc_scaling_enabled)
  88                return dev_pm_opp_add(cpu_dev, freq_hz, volt);
  89
  90        ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
  91        if (ret) {
  92                dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
  93                return ret;
  94        }
  95
  96        return dev_pm_opp_enable(cpu_dev, freq_hz);
  97}
  98
  99static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
 100                                        unsigned int index)
 101{
 102        struct qcom_cpufreq_data *data = policy->driver_data;
 103        const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
 104        unsigned long freq = policy->freq_table[index].frequency;
 105
 106        writel_relaxed(index, data->base + soc_data->reg_perf_state);
 107
 108        if (icc_scaling_enabled)
 109                qcom_cpufreq_set_bw(policy, freq);
 110
 111        return 0;
 112}
 113
 114static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
 115{
 116        struct qcom_cpufreq_data *data;
 117        const struct qcom_cpufreq_soc_data *soc_data;
 118        struct cpufreq_policy *policy;
 119        unsigned int index;
 120
 121        policy = cpufreq_cpu_get_raw(cpu);
 122        if (!policy)
 123                return 0;
 124
 125        data = policy->driver_data;
 126        soc_data = data->soc_data;
 127
 128        index = readl_relaxed(data->base + soc_data->reg_perf_state);
 129        index = min(index, LUT_MAX_ENTRIES - 1);
 130
 131        return policy->freq_table[index].frequency;
 132}
 133
 134static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
 135                                                unsigned int target_freq)
 136{
 137        struct qcom_cpufreq_data *data = policy->driver_data;
 138        const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
 139        unsigned int index;
 140
 141        index = policy->cached_resolved_idx;
 142        writel_relaxed(index, data->base + soc_data->reg_perf_state);
 143
 144        return policy->freq_table[index].frequency;
 145}
 146
 147static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
 148                                    struct cpufreq_policy *policy)
 149{
 150        u32 data, src, lval, i, core_count, prev_freq = 0, freq;
 151        u32 volt;
 152        struct cpufreq_frequency_table  *table;
 153        struct dev_pm_opp *opp;
 154        unsigned long rate;
 155        int ret;
 156        struct qcom_cpufreq_data *drv_data = policy->driver_data;
 157        const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
 158
 159        table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
 160        if (!table)
 161                return -ENOMEM;
 162
 163        ret = dev_pm_opp_of_add_table(cpu_dev);
 164        if (!ret) {
 165                /* Disable all opps and cross-validate against LUT later */
 166                icc_scaling_enabled = true;
 167                for (rate = 0; ; rate++) {
 168                        opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
 169                        if (IS_ERR(opp))
 170                                break;
 171
 172                        dev_pm_opp_put(opp);
 173                        dev_pm_opp_disable(cpu_dev, rate);
 174                }
 175        } else if (ret != -ENODEV) {
 176                dev_err(cpu_dev, "Invalid opp table in device tree\n");
 177                return ret;
 178        } else {
 179                policy->fast_switch_possible = true;
 180                icc_scaling_enabled = false;
 181        }
 182
 183        for (i = 0; i < LUT_MAX_ENTRIES; i++) {
 184                data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
 185                                      i * soc_data->lut_row_size);
 186                src = FIELD_GET(LUT_SRC, data);
 187                lval = FIELD_GET(LUT_L_VAL, data);
 188                core_count = FIELD_GET(LUT_CORE_COUNT, data);
 189
 190                data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
 191                                      i * soc_data->lut_row_size);
 192                volt = FIELD_GET(LUT_VOLT, data) * 1000;
 193
 194                if (src)
 195                        freq = xo_rate * lval / 1000;
 196                else
 197                        freq = cpu_hw_rate / 1000;
 198
 199                if (freq != prev_freq && core_count != LUT_TURBO_IND) {
 200                        if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
 201                                table[i].frequency = freq;
 202                                dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
 203                                freq, core_count);
 204                        } else {
 205                                dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
 206                                table[i].frequency = CPUFREQ_ENTRY_INVALID;
 207                        }
 208
 209                } else if (core_count == LUT_TURBO_IND) {
 210                        table[i].frequency = CPUFREQ_ENTRY_INVALID;
 211                }
 212
 213                /*
 214                 * Two of the same frequencies with the same core counts means
 215                 * end of table
 216                 */
 217                if (i > 0 && prev_freq == freq) {
 218                        struct cpufreq_frequency_table *prev = &table[i - 1];
 219
 220                        /*
 221                         * Only treat the last frequency that might be a boost
 222                         * as the boost frequency
 223                         */
 224                        if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
 225                                if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
 226                                        prev->frequency = prev_freq;
 227                                        prev->flags = CPUFREQ_BOOST_FREQ;
 228                                } else {
 229                                        dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
 230                                                 freq);
 231                                }
 232                        }
 233
 234                        break;
 235                }
 236
 237                prev_freq = freq;
 238        }
 239
 240        table[i].frequency = CPUFREQ_TABLE_END;
 241        policy->freq_table = table;
 242        dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
 243
 244        return 0;
 245}
 246
 247static void qcom_get_related_cpus(int index, struct cpumask *m)
 248{
 249        struct device_node *cpu_np;
 250        struct of_phandle_args args;
 251        int cpu, ret;
 252
 253        for_each_possible_cpu(cpu) {
 254                cpu_np = of_cpu_device_node_get(cpu);
 255                if (!cpu_np)
 256                        continue;
 257
 258                ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
 259                                                 "#freq-domain-cells", 0,
 260                                                 &args);
 261                of_node_put(cpu_np);
 262                if (ret < 0)
 263                        continue;
 264
 265                if (index == args.args[0])
 266                        cpumask_set_cpu(cpu, m);
 267        }
 268}
 269
 270static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
 271{
 272        unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
 273
 274        return (val & 0x3FF) * 19200;
 275}
 276
 277static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
 278{
 279        struct cpufreq_policy *policy = data->policy;
 280        int cpu = cpumask_first(policy->cpus);
 281        struct device *dev = get_cpu_device(cpu);
 282        unsigned long freq_hz, throttled_freq;
 283        struct dev_pm_opp *opp;
 284        unsigned int freq;
 285
 286        /*
 287         * Get the h/w throttled frequency, normalize it using the
 288         * registered opp table and use it to calculate thermal pressure.
 289         */
 290        freq = qcom_lmh_get_throttle_freq(data);
 291        freq_hz = freq * HZ_PER_KHZ;
 292
 293        opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
 294        if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
 295                dev_pm_opp_find_freq_ceil(dev, &freq_hz);
 296
 297        throttled_freq = freq_hz / HZ_PER_KHZ;
 298
 299        /* Update thermal pressure (the boost frequencies are accepted) */
 300        arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
 301
 302        /*
 303         * In the unlikely case policy is unregistered do not enable
 304         * polling or h/w interrupt
 305         */
 306        mutex_lock(&data->throttle_lock);
 307        if (data->cancel_throttle)
 308                goto out;
 309
 310        /*
 311         * If h/w throttled frequency is higher than what cpufreq has requested
 312         * for, then stop polling and switch back to interrupt mechanism.
 313         */
 314        if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
 315                enable_irq(data->throttle_irq);
 316        else
 317                mod_delayed_work(system_highpri_wq, &data->throttle_work,
 318                                 msecs_to_jiffies(10));
 319
 320out:
 321        mutex_unlock(&data->throttle_lock);
 322}
 323
 324static void qcom_lmh_dcvs_poll(struct work_struct *work)
 325{
 326        struct qcom_cpufreq_data *data;
 327
 328        data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
 329        qcom_lmh_dcvs_notify(data);
 330}
 331
 332static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
 333{
 334        struct qcom_cpufreq_data *c_data = data;
 335
 336        /* Disable interrupt and enable polling */
 337        disable_irq_nosync(c_data->throttle_irq);
 338        schedule_delayed_work(&c_data->throttle_work, 0);
 339
 340        return IRQ_HANDLED;
 341}
 342
 343static const struct qcom_cpufreq_soc_data qcom_soc_data = {
 344        .reg_enable = 0x0,
 345        .reg_freq_lut = 0x110,
 346        .reg_volt_lut = 0x114,
 347        .reg_current_vote = 0x704,
 348        .reg_perf_state = 0x920,
 349        .lut_row_size = 32,
 350};
 351
 352static const struct qcom_cpufreq_soc_data epss_soc_data = {
 353        .reg_enable = 0x0,
 354        .reg_freq_lut = 0x100,
 355        .reg_volt_lut = 0x200,
 356        .reg_perf_state = 0x320,
 357        .lut_row_size = 4,
 358};
 359
 360static const struct of_device_id qcom_cpufreq_hw_match[] = {
 361        { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
 362        { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
 363        {}
 364};
 365MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
 366
 367static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
 368{
 369        struct qcom_cpufreq_data *data = policy->driver_data;
 370        struct platform_device *pdev = cpufreq_get_driver_data();
 371        int ret;
 372
 373        /*
 374         * Look for LMh interrupt. If no interrupt line is specified /
 375         * if there is an error, allow cpufreq to be enabled as usual.
 376         */
 377        data->throttle_irq = platform_get_irq_optional(pdev, index);
 378        if (data->throttle_irq == -ENXIO)
 379                return 0;
 380        if (data->throttle_irq < 0)
 381                return data->throttle_irq;
 382
 383        data->cancel_throttle = false;
 384        data->policy = policy;
 385
 386        mutex_init(&data->throttle_lock);
 387        INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
 388
 389        snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
 390        ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
 391                                   IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
 392        if (ret) {
 393                dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
 394                return 0;
 395        }
 396
 397        ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
 398        if (ret)
 399                dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
 400                        data->irq_name, data->throttle_irq);
 401
 402        return 0;
 403}
 404
 405static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
 406{
 407        if (data->throttle_irq <= 0)
 408                return;
 409
 410        mutex_lock(&data->throttle_lock);
 411        data->cancel_throttle = true;
 412        mutex_unlock(&data->throttle_lock);
 413
 414        cancel_delayed_work_sync(&data->throttle_work);
 415        free_irq(data->throttle_irq, data);
 416}
 417
 418static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
 419{
 420        struct platform_device *pdev = cpufreq_get_driver_data();
 421        struct device *dev = &pdev->dev;
 422        struct of_phandle_args args;
 423        struct device_node *cpu_np;
 424        struct device *cpu_dev;
 425        struct resource *res;
 426        void __iomem *base;
 427        struct qcom_cpufreq_data *data;
 428        int ret, index;
 429
 430        cpu_dev = get_cpu_device(policy->cpu);
 431        if (!cpu_dev) {
 432                pr_err("%s: failed to get cpu%d device\n", __func__,
 433                       policy->cpu);
 434                return -ENODEV;
 435        }
 436
 437        cpu_np = of_cpu_device_node_get(policy->cpu);
 438        if (!cpu_np)
 439                return -EINVAL;
 440
 441        ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
 442                                         "#freq-domain-cells", 0, &args);
 443        of_node_put(cpu_np);
 444        if (ret)
 445                return ret;
 446
 447        index = args.args[0];
 448
 449        res = platform_get_resource(pdev, IORESOURCE_MEM, index);
 450        if (!res) {
 451                dev_err(dev, "failed to get mem resource %d\n", index);
 452                return -ENODEV;
 453        }
 454
 455        if (!request_mem_region(res->start, resource_size(res), res->name)) {
 456                dev_err(dev, "failed to request resource %pR\n", res);
 457                return -EBUSY;
 458        }
 459
 460        base = ioremap(res->start, resource_size(res));
 461        if (!base) {
 462                dev_err(dev, "failed to map resource %pR\n", res);
 463                ret = -ENOMEM;
 464                goto release_region;
 465        }
 466
 467        data = kzalloc(sizeof(*data), GFP_KERNEL);
 468        if (!data) {
 469                ret = -ENOMEM;
 470                goto unmap_base;
 471        }
 472
 473        data->soc_data = of_device_get_match_data(&pdev->dev);
 474        data->base = base;
 475        data->res = res;
 476
 477        /* HW should be in enabled state to proceed */
 478        if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
 479                dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
 480                ret = -ENODEV;
 481                goto error;
 482        }
 483
 484        qcom_get_related_cpus(index, policy->cpus);
 485        if (!cpumask_weight(policy->cpus)) {
 486                dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
 487                ret = -ENOENT;
 488                goto error;
 489        }
 490
 491        policy->driver_data = data;
 492        policy->dvfs_possible_from_any_cpu = true;
 493
 494        ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
 495        if (ret) {
 496                dev_err(dev, "Domain-%d failed to read LUT\n", index);
 497                goto error;
 498        }
 499
 500        ret = dev_pm_opp_get_opp_count(cpu_dev);
 501        if (ret <= 0) {
 502                dev_err(cpu_dev, "Failed to add OPPs\n");
 503                ret = -ENODEV;
 504                goto error;
 505        }
 506
 507        if (policy_has_boost_freq(policy)) {
 508                ret = cpufreq_enable_boost_support();
 509                if (ret)
 510                        dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
 511        }
 512
 513        ret = qcom_cpufreq_hw_lmh_init(policy, index);
 514        if (ret)
 515                goto error;
 516
 517        return 0;
 518error:
 519        kfree(data);
 520unmap_base:
 521        iounmap(base);
 522release_region:
 523        release_mem_region(res->start, resource_size(res));
 524        return ret;
 525}
 526
 527static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
 528{
 529        struct device *cpu_dev = get_cpu_device(policy->cpu);
 530        struct qcom_cpufreq_data *data = policy->driver_data;
 531        struct resource *res = data->res;
 532        void __iomem *base = data->base;
 533
 534        dev_pm_opp_remove_all_dynamic(cpu_dev);
 535        dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
 536        qcom_cpufreq_hw_lmh_exit(data);
 537        kfree(policy->freq_table);
 538        kfree(data);
 539        iounmap(base);
 540        release_mem_region(res->start, resource_size(res));
 541
 542        return 0;
 543}
 544
 545static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
 546{
 547        struct qcom_cpufreq_data *data = policy->driver_data;
 548
 549        if (data->throttle_irq >= 0)
 550                enable_irq(data->throttle_irq);
 551}
 552
 553static struct freq_attr *qcom_cpufreq_hw_attr[] = {
 554        &cpufreq_freq_attr_scaling_available_freqs,
 555        &cpufreq_freq_attr_scaling_boost_freqs,
 556        NULL
 557};
 558
 559static struct cpufreq_driver cpufreq_qcom_hw_driver = {
 560        .flags          = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
 561                          CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
 562                          CPUFREQ_IS_COOLING_DEV,
 563        .verify         = cpufreq_generic_frequency_table_verify,
 564        .target_index   = qcom_cpufreq_hw_target_index,
 565        .get            = qcom_cpufreq_hw_get,
 566        .init           = qcom_cpufreq_hw_cpu_init,
 567        .exit           = qcom_cpufreq_hw_cpu_exit,
 568        .register_em    = cpufreq_register_em_with_opp,
 569        .fast_switch    = qcom_cpufreq_hw_fast_switch,
 570        .name           = "qcom-cpufreq-hw",
 571        .attr           = qcom_cpufreq_hw_attr,
 572        .ready          = qcom_cpufreq_ready,
 573};
 574
 575static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
 576{
 577        struct device *cpu_dev;
 578        struct clk *clk;
 579        int ret;
 580
 581        clk = clk_get(&pdev->dev, "xo");
 582        if (IS_ERR(clk))
 583                return PTR_ERR(clk);
 584
 585        xo_rate = clk_get_rate(clk);
 586        clk_put(clk);
 587
 588        clk = clk_get(&pdev->dev, "alternate");
 589        if (IS_ERR(clk))
 590                return PTR_ERR(clk);
 591
 592        cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
 593        clk_put(clk);
 594
 595        cpufreq_qcom_hw_driver.driver_data = pdev;
 596
 597        /* Check for optional interconnect paths on CPU0 */
 598        cpu_dev = get_cpu_device(0);
 599        if (!cpu_dev)
 600                return -EPROBE_DEFER;
 601
 602        ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
 603        if (ret)
 604                return ret;
 605
 606        ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
 607        if (ret)
 608                dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
 609        else
 610                dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
 611
 612        return ret;
 613}
 614
 615static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
 616{
 617        return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
 618}
 619
 620static struct platform_driver qcom_cpufreq_hw_driver = {
 621        .probe = qcom_cpufreq_hw_driver_probe,
 622        .remove = qcom_cpufreq_hw_driver_remove,
 623        .driver = {
 624                .name = "qcom-cpufreq-hw",
 625                .of_match_table = qcom_cpufreq_hw_match,
 626        },
 627};
 628
 629static int __init qcom_cpufreq_hw_init(void)
 630{
 631        return platform_driver_register(&qcom_cpufreq_hw_driver);
 632}
 633postcore_initcall(qcom_cpufreq_hw_init);
 634
 635static void __exit qcom_cpufreq_hw_exit(void)
 636{
 637        platform_driver_unregister(&qcom_cpufreq_hw_driver);
 638}
 639module_exit(qcom_cpufreq_hw_exit);
 640
 641MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
 642MODULE_LICENSE("GPL v2");
 643