linux/drivers/net/ethernet/ti/am65-cpts.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* TI K3 AM65x Common Platform Time Sync
   3 *
   4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
   5 *
   6 */
   7
   8#include <linux/clk.h>
   9#include <linux/clk-provider.h>
  10#include <linux/err.h>
  11#include <linux/if_vlan.h>
  12#include <linux/interrupt.h>
  13#include <linux/module.h>
  14#include <linux/netdevice.h>
  15#include <linux/net_tstamp.h>
  16#include <linux/of.h>
  17#include <linux/of_irq.h>
  18#include <linux/platform_device.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/ptp_classify.h>
  21#include <linux/ptp_clock_kernel.h>
  22
  23#include "am65-cpts.h"
  24
  25struct am65_genf_regs {
  26        u32 comp_lo;    /* Comparison Low Value 0:31 */
  27        u32 comp_hi;    /* Comparison High Value 32:63 */
  28        u32 control;    /* control */
  29        u32 length;     /* Length */
  30        u32 ppm_low;    /* PPM Load Low Value 0:31 */
  31        u32 ppm_hi;     /* PPM Load High Value 32:63 */
  32        u32 ts_nudge;   /* Nudge value */
  33} __aligned(32) __packed;
  34
  35#define AM65_CPTS_GENF_MAX_NUM 9
  36#define AM65_CPTS_ESTF_MAX_NUM 8
  37
  38struct am65_cpts_regs {
  39        u32 idver;              /* Identification and version */
  40        u32 control;            /* Time sync control */
  41        u32 rftclk_sel;         /* Reference Clock Select Register */
  42        u32 ts_push;            /* Time stamp event push */
  43        u32 ts_load_val_lo;     /* Time Stamp Load Low Value 0:31 */
  44        u32 ts_load_en;         /* Time stamp load enable */
  45        u32 ts_comp_lo;         /* Time Stamp Comparison Low Value 0:31 */
  46        u32 ts_comp_length;     /* Time Stamp Comparison Length */
  47        u32 intstat_raw;        /* Time sync interrupt status raw */
  48        u32 intstat_masked;     /* Time sync interrupt status masked */
  49        u32 int_enable;         /* Time sync interrupt enable */
  50        u32 ts_comp_nudge;      /* Time Stamp Comparison Nudge Value */
  51        u32 event_pop;          /* Event interrupt pop */
  52        u32 event_0;            /* Event Time Stamp lo 0:31 */
  53        u32 event_1;            /* Event Type Fields */
  54        u32 event_2;            /* Event Type Fields domain */
  55        u32 event_3;            /* Event Time Stamp hi 32:63 */
  56        u32 ts_load_val_hi;     /* Time Stamp Load High Value 32:63 */
  57        u32 ts_comp_hi;         /* Time Stamp Comparison High Value 32:63 */
  58        u32 ts_add_val;         /* Time Stamp Add value */
  59        u32 ts_ppm_low;         /* Time Stamp PPM Load Low Value 0:31 */
  60        u32 ts_ppm_hi;          /* Time Stamp PPM Load High Value 32:63 */
  61        u32 ts_nudge;           /* Time Stamp Nudge value */
  62        u32 reserv[33];
  63        struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
  64        struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
  65};
  66
  67/* CONTROL_REG */
  68#define AM65_CPTS_CONTROL_EN                    BIT(0)
  69#define AM65_CPTS_CONTROL_INT_TEST              BIT(1)
  70#define AM65_CPTS_CONTROL_TS_COMP_POLARITY      BIT(2)
  71#define AM65_CPTS_CONTROL_TSTAMP_EN             BIT(3)
  72#define AM65_CPTS_CONTROL_SEQUENCE_EN           BIT(4)
  73#define AM65_CPTS_CONTROL_64MODE                BIT(5)
  74#define AM65_CPTS_CONTROL_TS_COMP_TOG           BIT(6)
  75#define AM65_CPTS_CONTROL_TS_PPM_DIR            BIT(7)
  76#define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN        BIT(8)
  77#define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN        BIT(9)
  78#define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN        BIT(10)
  79#define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN        BIT(11)
  80#define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN        BIT(12)
  81#define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN        BIT(13)
  82#define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN        BIT(14)
  83#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN        BIT(15)
  84#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET    (8)
  85
  86#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK      (0xF)
  87#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT     (28)
  88
  89/* RFTCLK_SEL_REG */
  90#define AM65_CPTS_RFTCLK_SEL_MASK               (0x1F)
  91
  92/* TS_PUSH_REG */
  93#define AM65_CPTS_TS_PUSH                       BIT(0)
  94
  95/* TS_LOAD_EN_REG */
  96#define AM65_CPTS_TS_LOAD_EN                    BIT(0)
  97
  98/* INTSTAT_RAW_REG */
  99#define AM65_CPTS_INTSTAT_RAW_TS_PEND           BIT(0)
 100
 101/* INTSTAT_MASKED_REG */
 102#define AM65_CPTS_INTSTAT_MASKED_TS_PEND        BIT(0)
 103
 104/* INT_ENABLE_REG */
 105#define AM65_CPTS_INT_ENABLE_TS_PEND_EN         BIT(0)
 106
 107/* TS_COMP_NUDGE_REG */
 108#define AM65_CPTS_TS_COMP_NUDGE_MASK            (0xFF)
 109
 110/* EVENT_POP_REG */
 111#define AM65_CPTS_EVENT_POP                     BIT(0)
 112
 113/* EVENT_1_REG */
 114#define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK      GENMASK(15, 0)
 115
 116#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK     GENMASK(19, 16)
 117#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT    (16)
 118
 119#define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK       GENMASK(23, 20)
 120#define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT      (20)
 121
 122#define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK      GENMASK(28, 24)
 123#define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT     (24)
 124
 125/* EVENT_2_REG */
 126#define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK       (0xFF)
 127#define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT      (0)
 128
 129enum {
 130        AM65_CPTS_EV_PUSH,      /* Time Stamp Push Event */
 131        AM65_CPTS_EV_ROLL,      /* Time Stamp Rollover Event */
 132        AM65_CPTS_EV_HALF,      /* Time Stamp Half Rollover Event */
 133        AM65_CPTS_EV_HW,                /* Hardware Time Stamp Push Event */
 134        AM65_CPTS_EV_RX,                /* Ethernet Receive Event */
 135        AM65_CPTS_EV_TX,                /* Ethernet Transmit Event */
 136        AM65_CPTS_EV_TS_COMP,   /* Time Stamp Compare Event */
 137        AM65_CPTS_EV_HOST,      /* Host Transmit Event */
 138};
 139
 140struct am65_cpts_event {
 141        struct list_head list;
 142        unsigned long tmo;
 143        u32 event1;
 144        u32 event2;
 145        u64 timestamp;
 146};
 147
 148#define AM65_CPTS_FIFO_DEPTH            (16)
 149#define AM65_CPTS_MAX_EVENTS            (32)
 150#define AM65_CPTS_EVENT_RX_TX_TIMEOUT   (20) /* ms */
 151#define AM65_CPTS_SKB_TX_WORK_TIMEOUT   1 /* jiffies */
 152#define AM65_CPTS_MIN_PPM               0x400
 153
 154struct am65_cpts {
 155        struct device *dev;
 156        struct am65_cpts_regs __iomem *reg;
 157        struct ptp_clock_info ptp_info;
 158        struct ptp_clock *ptp_clock;
 159        int phc_index;
 160        struct clk_hw *clk_mux_hw;
 161        struct device_node *clk_mux_np;
 162        struct clk *refclk;
 163        u32 refclk_freq;
 164        struct list_head events;
 165        struct list_head pool;
 166        struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
 167        spinlock_t lock; /* protects events lists*/
 168        u32 ext_ts_inputs;
 169        u32 genf_num;
 170        u32 ts_add_val;
 171        int irq;
 172        struct mutex ptp_clk_lock; /* PHC access sync */
 173        u64 timestamp;
 174        u32 genf_enable;
 175        u32 hw_ts_enable;
 176        struct sk_buff_head txq;
 177};
 178
 179struct am65_cpts_skb_cb_data {
 180        unsigned long tmo;
 181        u32 skb_mtype_seqid;
 182};
 183
 184#define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
 185#define am65_cpts_read32(c, r) readl(&(c)->reg->r)
 186
 187static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
 188{
 189        u32 val;
 190
 191        val = upper_32_bits(start_tstamp);
 192        am65_cpts_write32(cpts, val, ts_load_val_hi);
 193        val = lower_32_bits(start_tstamp);
 194        am65_cpts_write32(cpts, val, ts_load_val_lo);
 195
 196        am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
 197}
 198
 199static void am65_cpts_set_add_val(struct am65_cpts *cpts)
 200{
 201        /* select coefficient according to the rate */
 202        cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
 203
 204        am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
 205}
 206
 207static void am65_cpts_disable(struct am65_cpts *cpts)
 208{
 209        am65_cpts_write32(cpts, 0, control);
 210        am65_cpts_write32(cpts, 0, int_enable);
 211}
 212
 213static int am65_cpts_event_get_port(struct am65_cpts_event *event)
 214{
 215        return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
 216                AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
 217}
 218
 219static int am65_cpts_event_get_type(struct am65_cpts_event *event)
 220{
 221        return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
 222                AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
 223}
 224
 225static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
 226{
 227        struct list_head *this, *next;
 228        struct am65_cpts_event *event;
 229        int removed = 0;
 230
 231        list_for_each_safe(this, next, &cpts->events) {
 232                event = list_entry(this, struct am65_cpts_event, list);
 233                if (time_after(jiffies, event->tmo)) {
 234                        list_del_init(&event->list);
 235                        list_add(&event->list, &cpts->pool);
 236                        ++removed;
 237                }
 238        }
 239
 240        if (removed)
 241                dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
 242        return removed ? 0 : -1;
 243}
 244
 245static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
 246                                     struct am65_cpts_event *event)
 247{
 248        u32 r = am65_cpts_read32(cpts, intstat_raw);
 249
 250        if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
 251                event->timestamp = am65_cpts_read32(cpts, event_0);
 252                event->event1 = am65_cpts_read32(cpts, event_1);
 253                event->event2 = am65_cpts_read32(cpts, event_2);
 254                event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
 255                am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
 256                return false;
 257        }
 258        return true;
 259}
 260
 261static int am65_cpts_fifo_read(struct am65_cpts *cpts)
 262{
 263        struct ptp_clock_event pevent;
 264        struct am65_cpts_event *event;
 265        bool schedule = false;
 266        int i, type, ret = 0;
 267        unsigned long flags;
 268
 269        spin_lock_irqsave(&cpts->lock, flags);
 270        for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
 271                event = list_first_entry_or_null(&cpts->pool,
 272                                                 struct am65_cpts_event, list);
 273
 274                if (!event) {
 275                        if (am65_cpts_cpts_purge_events(cpts)) {
 276                                dev_err(cpts->dev, "cpts: event pool empty\n");
 277                                ret = -1;
 278                                goto out;
 279                        }
 280                        continue;
 281                }
 282
 283                if (am65_cpts_fifo_pop_event(cpts, event))
 284                        break;
 285
 286                type = am65_cpts_event_get_type(event);
 287                switch (type) {
 288                case AM65_CPTS_EV_PUSH:
 289                        cpts->timestamp = event->timestamp;
 290                        dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
 291                                cpts->timestamp);
 292                        break;
 293                case AM65_CPTS_EV_RX:
 294                case AM65_CPTS_EV_TX:
 295                        event->tmo = jiffies +
 296                                msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
 297
 298                        list_del_init(&event->list);
 299                        list_add_tail(&event->list, &cpts->events);
 300
 301                        dev_dbg(cpts->dev,
 302                                "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
 303                                event->event1, event->event2,
 304                                event->timestamp);
 305                        schedule = true;
 306                        break;
 307                case AM65_CPTS_EV_HW:
 308                        pevent.index = am65_cpts_event_get_port(event) - 1;
 309                        pevent.timestamp = event->timestamp;
 310                        pevent.type = PTP_CLOCK_EXTTS;
 311                        dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
 312                                pevent.index, event->timestamp);
 313
 314                        ptp_clock_event(cpts->ptp_clock, &pevent);
 315                        break;
 316                case AM65_CPTS_EV_HOST:
 317                        break;
 318                case AM65_CPTS_EV_ROLL:
 319                case AM65_CPTS_EV_HALF:
 320                case AM65_CPTS_EV_TS_COMP:
 321                        dev_dbg(cpts->dev,
 322                                "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
 323                                type,
 324                                event->event1, event->event2,
 325                                event->timestamp);
 326                        break;
 327                default:
 328                        dev_err(cpts->dev, "cpts: unknown event type\n");
 329                        ret = -1;
 330                        goto out;
 331                }
 332        }
 333
 334out:
 335        spin_unlock_irqrestore(&cpts->lock, flags);
 336
 337        if (schedule)
 338                ptp_schedule_worker(cpts->ptp_clock, 0);
 339
 340        return ret;
 341}
 342
 343static u64 am65_cpts_gettime(struct am65_cpts *cpts,
 344                             struct ptp_system_timestamp *sts)
 345{
 346        unsigned long flags;
 347        u64 val = 0;
 348
 349        /* temporarily disable cpts interrupt to avoid intentional
 350         * doubled read. Interrupt can be in-flight - it's Ok.
 351         */
 352        am65_cpts_write32(cpts, 0, int_enable);
 353
 354        /* use spin_lock_irqsave() here as it has to run very fast */
 355        spin_lock_irqsave(&cpts->lock, flags);
 356        ptp_read_system_prets(sts);
 357        am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
 358        am65_cpts_read32(cpts, ts_push);
 359        ptp_read_system_postts(sts);
 360        spin_unlock_irqrestore(&cpts->lock, flags);
 361
 362        am65_cpts_fifo_read(cpts);
 363
 364        am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
 365
 366        val = cpts->timestamp;
 367
 368        return val;
 369}
 370
 371static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
 372{
 373        struct am65_cpts *cpts = dev_id;
 374
 375        if (am65_cpts_fifo_read(cpts))
 376                dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
 377
 378        return IRQ_HANDLED;
 379}
 380
 381/* PTP clock operations */
 382static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 383{
 384        struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
 385        int neg_adj = 0;
 386        u64 adj_period;
 387        u32 val;
 388
 389        if (ppb < 0) {
 390                neg_adj = 1;
 391                ppb = -ppb;
 392        }
 393
 394        /* base freq = 1GHz = 1 000 000 000
 395         * ppb_norm = ppb * base_freq / clock_freq;
 396         * ppm_norm = ppb_norm / 1000
 397         * adj_period = 1 000 000 / ppm_norm
 398         * adj_period = 1 000 000 000 / ppb_norm
 399         * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
 400         * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
 401         * adj_period = clock_freq / ppb
 402         */
 403        adj_period = div_u64(cpts->refclk_freq, ppb);
 404
 405        mutex_lock(&cpts->ptp_clk_lock);
 406
 407        val = am65_cpts_read32(cpts, control);
 408        if (neg_adj)
 409                val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
 410        else
 411                val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
 412        am65_cpts_write32(cpts, val, control);
 413
 414        val = upper_32_bits(adj_period) & 0x3FF;
 415        am65_cpts_write32(cpts, val, ts_ppm_hi);
 416        val = lower_32_bits(adj_period);
 417        am65_cpts_write32(cpts, val, ts_ppm_low);
 418
 419        mutex_unlock(&cpts->ptp_clk_lock);
 420
 421        return 0;
 422}
 423
 424static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 425{
 426        struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
 427        s64 ns;
 428
 429        mutex_lock(&cpts->ptp_clk_lock);
 430        ns = am65_cpts_gettime(cpts, NULL);
 431        ns += delta;
 432        am65_cpts_settime(cpts, ns);
 433        mutex_unlock(&cpts->ptp_clk_lock);
 434
 435        return 0;
 436}
 437
 438static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
 439                                  struct timespec64 *ts,
 440                                  struct ptp_system_timestamp *sts)
 441{
 442        struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
 443        u64 ns;
 444
 445        mutex_lock(&cpts->ptp_clk_lock);
 446        ns = am65_cpts_gettime(cpts, sts);
 447        mutex_unlock(&cpts->ptp_clk_lock);
 448        *ts = ns_to_timespec64(ns);
 449
 450        return 0;
 451}
 452
 453u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
 454{
 455        u64 ns;
 456
 457        /* reuse ptp_clk_lock as it serialize ts push */
 458        mutex_lock(&cpts->ptp_clk_lock);
 459        ns = am65_cpts_gettime(cpts, NULL);
 460        mutex_unlock(&cpts->ptp_clk_lock);
 461
 462        return ns;
 463}
 464EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
 465
 466static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
 467                                 const struct timespec64 *ts)
 468{
 469        struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
 470        u64 ns;
 471
 472        ns = timespec64_to_ns(ts);
 473        mutex_lock(&cpts->ptp_clk_lock);
 474        am65_cpts_settime(cpts, ns);
 475        mutex_unlock(&cpts->ptp_clk_lock);
 476
 477        return 0;
 478}
 479
 480static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
 481{
 482        u32 v;
 483
 484        v = am65_cpts_read32(cpts, control);
 485        if (on) {
 486                v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
 487                cpts->hw_ts_enable |= BIT(index);
 488        } else {
 489                v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
 490                cpts->hw_ts_enable &= ~BIT(index);
 491        }
 492        am65_cpts_write32(cpts, v, control);
 493}
 494
 495static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
 496{
 497        if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
 498                return 0;
 499
 500        mutex_lock(&cpts->ptp_clk_lock);
 501        am65_cpts_extts_enable_hw(cpts, index, on);
 502        mutex_unlock(&cpts->ptp_clk_lock);
 503
 504        dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
 505                __func__, index, on ? "enabled" : "disabled");
 506
 507        return 0;
 508}
 509
 510int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
 511                          struct am65_cpts_estf_cfg *cfg)
 512{
 513        u64 cycles;
 514        u32 val;
 515
 516        cycles = cfg->ns_period * cpts->refclk_freq;
 517        cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
 518        if (cycles > U32_MAX)
 519                return -EINVAL;
 520
 521        /* according to TRM should be zeroed */
 522        am65_cpts_write32(cpts, 0, estf[idx].length);
 523
 524        val = upper_32_bits(cfg->ns_start);
 525        am65_cpts_write32(cpts, val, estf[idx].comp_hi);
 526        val = lower_32_bits(cfg->ns_start);
 527        am65_cpts_write32(cpts, val, estf[idx].comp_lo);
 528        val = lower_32_bits(cycles);
 529        am65_cpts_write32(cpts, val, estf[idx].length);
 530
 531        dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
 532
 533        return 0;
 534}
 535EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
 536
 537void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
 538{
 539        am65_cpts_write32(cpts, 0, estf[idx].length);
 540
 541        dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
 542}
 543EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
 544
 545static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
 546                                       struct ptp_perout_request *req, int on)
 547{
 548        u64 ns_period, ns_start, cycles;
 549        struct timespec64 ts;
 550        u32 val;
 551
 552        if (on) {
 553                ts.tv_sec = req->period.sec;
 554                ts.tv_nsec = req->period.nsec;
 555                ns_period = timespec64_to_ns(&ts);
 556
 557                cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
 558
 559                ts.tv_sec = req->start.sec;
 560                ts.tv_nsec = req->start.nsec;
 561                ns_start = timespec64_to_ns(&ts);
 562
 563                val = upper_32_bits(ns_start);
 564                am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
 565                val = lower_32_bits(ns_start);
 566                am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
 567                val = lower_32_bits(cycles);
 568                am65_cpts_write32(cpts, val, genf[req->index].length);
 569
 570                cpts->genf_enable |= BIT(req->index);
 571        } else {
 572                am65_cpts_write32(cpts, 0, genf[req->index].length);
 573
 574                cpts->genf_enable &= ~BIT(req->index);
 575        }
 576}
 577
 578static int am65_cpts_perout_enable(struct am65_cpts *cpts,
 579                                   struct ptp_perout_request *req, int on)
 580{
 581        if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
 582                return 0;
 583
 584        mutex_lock(&cpts->ptp_clk_lock);
 585        am65_cpts_perout_enable_hw(cpts, req, on);
 586        mutex_unlock(&cpts->ptp_clk_lock);
 587
 588        dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
 589                __func__, req->index, on ? "enabled" : "disabled");
 590
 591        return 0;
 592}
 593
 594static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
 595                                struct ptp_clock_request *rq, int on)
 596{
 597        struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
 598
 599        switch (rq->type) {
 600        case PTP_CLK_REQ_EXTTS:
 601                return am65_cpts_extts_enable(cpts, rq->extts.index, on);
 602        case PTP_CLK_REQ_PEROUT:
 603                return am65_cpts_perout_enable(cpts, &rq->perout, on);
 604        default:
 605                break;
 606        }
 607
 608        return -EOPNOTSUPP;
 609}
 610
 611static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
 612
 613static struct ptp_clock_info am65_ptp_info = {
 614        .owner          = THIS_MODULE,
 615        .name           = "CTPS timer",
 616        .adjfreq        = am65_cpts_ptp_adjfreq,
 617        .adjtime        = am65_cpts_ptp_adjtime,
 618        .gettimex64     = am65_cpts_ptp_gettimex,
 619        .settime64      = am65_cpts_ptp_settime,
 620        .enable         = am65_cpts_ptp_enable,
 621        .do_aux_work    = am65_cpts_ts_work,
 622};
 623
 624static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
 625                                  struct am65_cpts_event *event)
 626{
 627        struct sk_buff_head txq_list;
 628        struct sk_buff *skb, *tmp;
 629        unsigned long flags;
 630        bool found = false;
 631        u32 mtype_seqid;
 632
 633        mtype_seqid = event->event1 &
 634                      (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
 635                       AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
 636                       AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
 637
 638        __skb_queue_head_init(&txq_list);
 639
 640        spin_lock_irqsave(&cpts->txq.lock, flags);
 641        skb_queue_splice_init(&cpts->txq, &txq_list);
 642        spin_unlock_irqrestore(&cpts->txq.lock, flags);
 643
 644        /* no need to grab txq.lock as access is always done under cpts->lock */
 645        skb_queue_walk_safe(&txq_list, skb, tmp) {
 646                struct skb_shared_hwtstamps ssh;
 647                struct am65_cpts_skb_cb_data *skb_cb =
 648                                        (struct am65_cpts_skb_cb_data *)skb->cb;
 649
 650                if (mtype_seqid == skb_cb->skb_mtype_seqid) {
 651                        u64 ns = event->timestamp;
 652
 653                        memset(&ssh, 0, sizeof(ssh));
 654                        ssh.hwtstamp = ns_to_ktime(ns);
 655                        skb_tstamp_tx(skb, &ssh);
 656                        found = true;
 657                        __skb_unlink(skb, &txq_list);
 658                        dev_consume_skb_any(skb);
 659                        dev_dbg(cpts->dev,
 660                                "match tx timestamp mtype_seqid %08x\n",
 661                                mtype_seqid);
 662                        break;
 663                }
 664
 665                if (time_after(jiffies, skb_cb->tmo)) {
 666                        /* timeout any expired skbs over 100 ms */
 667                        dev_dbg(cpts->dev,
 668                                "expiring tx timestamp mtype_seqid %08x\n",
 669                                mtype_seqid);
 670                        __skb_unlink(skb, &txq_list);
 671                        dev_consume_skb_any(skb);
 672                }
 673        }
 674
 675        spin_lock_irqsave(&cpts->txq.lock, flags);
 676        skb_queue_splice(&txq_list, &cpts->txq);
 677        spin_unlock_irqrestore(&cpts->txq.lock, flags);
 678
 679        return found;
 680}
 681
 682static void am65_cpts_find_ts(struct am65_cpts *cpts)
 683{
 684        struct am65_cpts_event *event;
 685        struct list_head *this, *next;
 686        LIST_HEAD(events_free);
 687        unsigned long flags;
 688        LIST_HEAD(events);
 689
 690        spin_lock_irqsave(&cpts->lock, flags);
 691        list_splice_init(&cpts->events, &events);
 692        spin_unlock_irqrestore(&cpts->lock, flags);
 693
 694        list_for_each_safe(this, next, &events) {
 695                event = list_entry(this, struct am65_cpts_event, list);
 696                if (am65_cpts_match_tx_ts(cpts, event) ||
 697                    time_after(jiffies, event->tmo)) {
 698                        list_del_init(&event->list);
 699                        list_add(&event->list, &events_free);
 700                }
 701        }
 702
 703        spin_lock_irqsave(&cpts->lock, flags);
 704        list_splice_tail(&events, &cpts->events);
 705        list_splice_tail(&events_free, &cpts->pool);
 706        spin_unlock_irqrestore(&cpts->lock, flags);
 707}
 708
 709static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
 710{
 711        struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
 712        unsigned long flags;
 713        long delay = -1;
 714
 715        am65_cpts_find_ts(cpts);
 716
 717        spin_lock_irqsave(&cpts->txq.lock, flags);
 718        if (!skb_queue_empty(&cpts->txq))
 719                delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
 720        spin_unlock_irqrestore(&cpts->txq.lock, flags);
 721
 722        return delay;
 723}
 724
 725/**
 726 * am65_cpts_rx_enable - enable rx timestamping
 727 * @cpts: cpts handle
 728 * @skb: packet
 729 *
 730 * This functions enables rx packets timestamping. The CPTS can timestamp all
 731 * rx packets.
 732 */
 733void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
 734{
 735        u32 val;
 736
 737        mutex_lock(&cpts->ptp_clk_lock);
 738        val = am65_cpts_read32(cpts, control);
 739        if (en)
 740                val |= AM65_CPTS_CONTROL_TSTAMP_EN;
 741        else
 742                val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
 743        am65_cpts_write32(cpts, val, control);
 744        mutex_unlock(&cpts->ptp_clk_lock);
 745}
 746EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
 747
 748static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
 749{
 750        unsigned int ptp_class = ptp_classify_raw(skb);
 751        u8 *msgtype, *data = skb->data;
 752        unsigned int offset = 0;
 753        __be16 *seqid;
 754
 755        if (ptp_class == PTP_CLASS_NONE)
 756                return 0;
 757
 758        if (ptp_class & PTP_CLASS_VLAN)
 759                offset += VLAN_HLEN;
 760
 761        switch (ptp_class & PTP_CLASS_PMASK) {
 762        case PTP_CLASS_IPV4:
 763                offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
 764                break;
 765        case PTP_CLASS_IPV6:
 766                offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
 767                break;
 768        case PTP_CLASS_L2:
 769                offset += ETH_HLEN;
 770                break;
 771        default:
 772                return 0;
 773        }
 774
 775        if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
 776                return 0;
 777
 778        if (unlikely(ptp_class & PTP_CLASS_V1))
 779                msgtype = data + offset + OFF_PTP_CONTROL;
 780        else
 781                msgtype = data + offset;
 782
 783        seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
 784        *mtype_seqid = (*msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
 785                        AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
 786        *mtype_seqid |= (ntohs(*seqid) & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
 787
 788        return 1;
 789}
 790
 791/**
 792 * am65_cpts_tx_timestamp - save tx packet for timestamping
 793 * @cpts: cpts handle
 794 * @skb: packet
 795 *
 796 * This functions saves tx packet for timestamping if packet can be timestamped.
 797 * The future processing is done in from PTP auxiliary worker.
 798 */
 799void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
 800{
 801        struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
 802
 803        if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
 804                return;
 805
 806        /* add frame to queue for processing later.
 807         * The periodic FIFO check will handle this.
 808         */
 809        skb_get(skb);
 810        /* get the timestamp for timeouts */
 811        skb_cb->tmo = jiffies + msecs_to_jiffies(100);
 812        skb_queue_tail(&cpts->txq, skb);
 813        ptp_schedule_worker(cpts->ptp_clock, 0);
 814}
 815EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
 816
 817/**
 818 * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
 819 * @cpts: cpts handle
 820 * @skb: packet
 821 *
 822 * This functions should be called from .xmit().
 823 * It checks if packet can be timestamped, fills internal cpts data
 824 * in skb-cb and marks packet as SKBTX_IN_PROGRESS.
 825 */
 826void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
 827{
 828        struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
 829        int ret;
 830
 831        if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
 832                return;
 833
 834        ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
 835        if (!ret)
 836                return;
 837        skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
 838                                   AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
 839
 840        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 841}
 842EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
 843
 844int am65_cpts_phc_index(struct am65_cpts *cpts)
 845{
 846        return cpts->phc_index;
 847}
 848EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
 849
 850static void cpts_free_clk_mux(void *data)
 851{
 852        struct am65_cpts *cpts = data;
 853
 854        of_clk_del_provider(cpts->clk_mux_np);
 855        clk_hw_unregister_mux(cpts->clk_mux_hw);
 856        of_node_put(cpts->clk_mux_np);
 857}
 858
 859static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
 860                                 struct device_node *node)
 861{
 862        unsigned int num_parents;
 863        const char **parent_names;
 864        char *clk_mux_name;
 865        void __iomem *reg;
 866        int ret = -EINVAL;
 867
 868        cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
 869        if (!cpts->clk_mux_np)
 870                return 0;
 871
 872        num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
 873        if (num_parents < 1) {
 874                dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
 875                        cpts->clk_mux_np);
 876                goto mux_fail;
 877        }
 878
 879        parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
 880                                    GFP_KERNEL);
 881        if (!parent_names) {
 882                ret = -ENOMEM;
 883                goto mux_fail;
 884        }
 885
 886        of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
 887
 888        clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
 889                                      dev_name(cpts->dev), cpts->clk_mux_np);
 890        if (!clk_mux_name) {
 891                ret = -ENOMEM;
 892                goto mux_fail;
 893        }
 894
 895        reg = &cpts->reg->rftclk_sel;
 896        /* dev must be NULL to avoid recursive incrementing
 897         * of module refcnt
 898         */
 899        cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
 900                                               parent_names, num_parents,
 901                                               0, reg, 0, 5, 0, NULL);
 902        if (IS_ERR(cpts->clk_mux_hw)) {
 903                ret = PTR_ERR(cpts->clk_mux_hw);
 904                goto mux_fail;
 905        }
 906
 907        ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
 908                                     cpts->clk_mux_hw);
 909        if (ret)
 910                goto clk_hw_register;
 911
 912        ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
 913        if (ret)
 914                dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
 915
 916        return ret;
 917
 918clk_hw_register:
 919        clk_hw_unregister_mux(cpts->clk_mux_hw);
 920mux_fail:
 921        of_node_put(cpts->clk_mux_np);
 922        return ret;
 923}
 924
 925static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
 926{
 927        u32 prop[2];
 928
 929        if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
 930                cpts->ext_ts_inputs = prop[0];
 931
 932        if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
 933                cpts->genf_num = prop[0];
 934
 935        return cpts_of_mux_clk_setup(cpts, node);
 936}
 937
 938static void am65_cpts_release(void *data)
 939{
 940        struct am65_cpts *cpts = data;
 941
 942        ptp_clock_unregister(cpts->ptp_clock);
 943        am65_cpts_disable(cpts);
 944        clk_disable_unprepare(cpts->refclk);
 945}
 946
 947struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
 948                                   struct device_node *node)
 949{
 950        struct am65_cpts *cpts;
 951        int ret, i;
 952
 953        cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
 954        if (!cpts)
 955                return ERR_PTR(-ENOMEM);
 956
 957        cpts->dev = dev;
 958        cpts->reg = (struct am65_cpts_regs __iomem *)regs;
 959
 960        cpts->irq = of_irq_get_byname(node, "cpts");
 961        if (cpts->irq <= 0) {
 962                ret = cpts->irq ?: -ENXIO;
 963                if (ret != -EPROBE_DEFER)
 964                        dev_err(dev, "Failed to get IRQ number (err = %d)\n",
 965                                ret);
 966                return ERR_PTR(ret);
 967        }
 968
 969        ret = am65_cpts_of_parse(cpts, node);
 970        if (ret)
 971                return ERR_PTR(ret);
 972
 973        mutex_init(&cpts->ptp_clk_lock);
 974        INIT_LIST_HEAD(&cpts->events);
 975        INIT_LIST_HEAD(&cpts->pool);
 976        spin_lock_init(&cpts->lock);
 977        skb_queue_head_init(&cpts->txq);
 978
 979        for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
 980                list_add(&cpts->pool_data[i].list, &cpts->pool);
 981
 982        cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
 983        if (IS_ERR(cpts->refclk)) {
 984                ret = PTR_ERR(cpts->refclk);
 985                if (ret != -EPROBE_DEFER)
 986                        dev_err(dev, "Failed to get refclk %d\n", ret);
 987                return ERR_PTR(ret);
 988        }
 989
 990        ret = clk_prepare_enable(cpts->refclk);
 991        if (ret) {
 992                dev_err(dev, "Failed to enable refclk %d\n", ret);
 993                return ERR_PTR(ret);
 994        }
 995
 996        cpts->refclk_freq = clk_get_rate(cpts->refclk);
 997
 998        am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
 999        cpts->ptp_info = am65_ptp_info;
1000
1001        if (cpts->ext_ts_inputs)
1002                cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
1003        if (cpts->genf_num)
1004                cpts->ptp_info.n_per_out = cpts->genf_num;
1005
1006        am65_cpts_set_add_val(cpts);
1007
1008        am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN | AM65_CPTS_CONTROL_64MODE,
1009                          control);
1010        am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
1011
1012        /* set time to the current system time */
1013        am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
1014
1015        cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
1016        if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
1017                dev_err(dev, "Failed to register ptp clk %ld\n",
1018                        PTR_ERR(cpts->ptp_clock));
1019                if (!cpts->ptp_clock)
1020                        ret = -ENODEV;
1021                goto refclk_disable;
1022        }
1023        cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
1024
1025        ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
1026        if (ret) {
1027                dev_err(dev, "failed to add ptpclk reset action %d", ret);
1028                return ERR_PTR(ret);
1029        }
1030
1031        ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
1032                                        am65_cpts_interrupt,
1033                                        IRQF_ONESHOT, dev_name(dev), cpts);
1034        if (ret < 0) {
1035                dev_err(cpts->dev, "error attaching irq %d\n", ret);
1036                return ERR_PTR(ret);
1037        }
1038
1039        dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
1040                 am65_cpts_read32(cpts, idver),
1041                 cpts->refclk_freq, cpts->ts_add_val);
1042
1043        return cpts;
1044
1045refclk_disable:
1046        clk_disable_unprepare(cpts->refclk);
1047        return ERR_PTR(ret);
1048}
1049EXPORT_SYMBOL_GPL(am65_cpts_create);
1050
1051static int am65_cpts_probe(struct platform_device *pdev)
1052{
1053        struct device_node *node = pdev->dev.of_node;
1054        struct device *dev = &pdev->dev;
1055        struct am65_cpts *cpts;
1056        struct resource *res;
1057        void __iomem *base;
1058
1059        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpts");
1060        base = devm_ioremap_resource(dev, res);
1061        if (IS_ERR(base))
1062                return PTR_ERR(base);
1063
1064        cpts = am65_cpts_create(dev, base, node);
1065        return PTR_ERR_OR_ZERO(cpts);
1066}
1067
1068static const struct of_device_id am65_cpts_of_match[] = {
1069        { .compatible = "ti,am65-cpts", },
1070        { .compatible = "ti,j721e-cpts", },
1071        {},
1072};
1073MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
1074
1075static struct platform_driver am65_cpts_driver = {
1076        .probe          = am65_cpts_probe,
1077        .driver         = {
1078                .name   = "am65-cpts",
1079                .of_match_table = am65_cpts_of_match,
1080        },
1081};
1082module_platform_driver(am65_cpts_driver);
1083
1084MODULE_LICENSE("GPL v2");
1085MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
1086MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");
1087