linux/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/clocksource.h>
  34#include <linux/highmem.h>
  35#include <rdma/mlx5-abi.h>
  36#include "lib/eq.h"
  37#include "en.h"
  38#include "clock.h"
  39
  40enum {
  41        MLX5_CYCLES_SHIFT       = 23
  42};
  43
  44enum {
  45        MLX5_PIN_MODE_IN                = 0x0,
  46        MLX5_PIN_MODE_OUT               = 0x1,
  47};
  48
  49enum {
  50        MLX5_OUT_PATTERN_PULSE          = 0x0,
  51        MLX5_OUT_PATTERN_PERIODIC       = 0x1,
  52};
  53
  54enum {
  55        MLX5_EVENT_MODE_DISABLE = 0x0,
  56        MLX5_EVENT_MODE_REPETETIVE      = 0x1,
  57        MLX5_EVENT_MODE_ONCE_TILL_ARM   = 0x2,
  58};
  59
  60enum {
  61        MLX5_MTPPS_FS_ENABLE                    = BIT(0x0),
  62        MLX5_MTPPS_FS_PATTERN                   = BIT(0x2),
  63        MLX5_MTPPS_FS_PIN_MODE                  = BIT(0x3),
  64        MLX5_MTPPS_FS_TIME_STAMP                = BIT(0x4),
  65        MLX5_MTPPS_FS_OUT_PULSE_DURATION        = BIT(0x5),
  66        MLX5_MTPPS_FS_ENH_OUT_PER_ADJ           = BIT(0x7),
  67};
  68
  69static u64 read_internal_timer(const struct cyclecounter *cc)
  70{
  71        struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
  72        struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
  73                                                  clock);
  74
  75        return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
  76}
  77
  78static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
  79{
  80        struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
  81        struct mlx5_clock *clock = &mdev->clock;
  82        u32 sign;
  83
  84        if (!clock_info)
  85                return;
  86
  87        sign = smp_load_acquire(&clock_info->sign);
  88        smp_store_mb(clock_info->sign,
  89                     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
  90
  91        clock_info->cycles = clock->tc.cycle_last;
  92        clock_info->mult   = clock->cycles.mult;
  93        clock_info->nsec   = clock->tc.nsec;
  94        clock_info->frac   = clock->tc.frac;
  95
  96        smp_store_release(&clock_info->sign,
  97                          sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
  98}
  99
 100static void mlx5_pps_out(struct work_struct *work)
 101{
 102        struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
 103                                                 out_work);
 104        struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
 105                                                pps_info);
 106        struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
 107                                                  clock);
 108        u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
 109        unsigned long flags;
 110        int i;
 111
 112        for (i = 0; i < clock->ptp_info.n_pins; i++) {
 113                u64 tstart;
 114
 115                write_seqlock_irqsave(&clock->lock, flags);
 116                tstart = clock->pps_info.start[i];
 117                clock->pps_info.start[i] = 0;
 118                write_sequnlock_irqrestore(&clock->lock, flags);
 119                if (!tstart)
 120                        continue;
 121
 122                MLX5_SET(mtpps_reg, in, pin, i);
 123                MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
 124                MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
 125                mlx5_set_mtpps(mdev, in, sizeof(in));
 126        }
 127}
 128
 129static void mlx5_timestamp_overflow(struct work_struct *work)
 130{
 131        struct delayed_work *dwork = to_delayed_work(work);
 132        struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
 133                                                overflow_work);
 134        unsigned long flags;
 135
 136        write_seqlock_irqsave(&clock->lock, flags);
 137        timecounter_read(&clock->tc);
 138        mlx5_update_clock_info_page(clock->mdev);
 139        write_sequnlock_irqrestore(&clock->lock, flags);
 140        schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
 141}
 142
 143static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
 144                            const struct timespec64 *ts)
 145{
 146        struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
 147                                                 ptp_info);
 148        u64 ns = timespec64_to_ns(ts);
 149        unsigned long flags;
 150
 151        write_seqlock_irqsave(&clock->lock, flags);
 152        timecounter_init(&clock->tc, &clock->cycles, ns);
 153        mlx5_update_clock_info_page(clock->mdev);
 154        write_sequnlock_irqrestore(&clock->lock, flags);
 155
 156        return 0;
 157}
 158
 159static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
 160                             struct ptp_system_timestamp *sts)
 161{
 162        struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
 163                                                ptp_info);
 164        struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
 165                                                  clock);
 166        unsigned long flags;
 167        u64 cycles, ns;
 168
 169        write_seqlock_irqsave(&clock->lock, flags);
 170        cycles = mlx5_read_internal_timer(mdev, sts);
 171        ns = timecounter_cyc2time(&clock->tc, cycles);
 172        write_sequnlock_irqrestore(&clock->lock, flags);
 173
 174        *ts = ns_to_timespec64(ns);
 175
 176        return 0;
 177}
 178
 179static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 180{
 181        struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
 182                                                ptp_info);
 183        unsigned long flags;
 184
 185        write_seqlock_irqsave(&clock->lock, flags);
 186        timecounter_adjtime(&clock->tc, delta);
 187        mlx5_update_clock_info_page(clock->mdev);
 188        write_sequnlock_irqrestore(&clock->lock, flags);
 189
 190        return 0;
 191}
 192
 193static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
 194{
 195        u64 adj;
 196        u32 diff;
 197        unsigned long flags;
 198        int neg_adj = 0;
 199        struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
 200                                                ptp_info);
 201
 202        if (delta < 0) {
 203                neg_adj = 1;
 204                delta = -delta;
 205        }
 206
 207        adj = clock->nominal_c_mult;
 208        adj *= delta;
 209        diff = div_u64(adj, 1000000000ULL);
 210
 211        write_seqlock_irqsave(&clock->lock, flags);
 212        timecounter_read(&clock->tc);
 213        clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
 214                                       clock->nominal_c_mult + diff;
 215        mlx5_update_clock_info_page(clock->mdev);
 216        write_sequnlock_irqrestore(&clock->lock, flags);
 217
 218        return 0;
 219}
 220
 221static int mlx5_extts_configure(struct ptp_clock_info *ptp,
 222                                struct ptp_clock_request *rq,
 223                                int on)
 224{
 225        struct mlx5_clock *clock =
 226                        container_of(ptp, struct mlx5_clock, ptp_info);
 227        struct mlx5_core_dev *mdev =
 228                        container_of(clock, struct mlx5_core_dev, clock);
 229        u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
 230        u32 field_select = 0;
 231        u8 pin_mode = 0;
 232        u8 pattern = 0;
 233        int pin = -1;
 234        int err = 0;
 235
 236        if (!MLX5_PPS_CAP(mdev))
 237                return -EOPNOTSUPP;
 238
 239        /* Reject requests with unsupported flags */
 240        if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
 241                                PTP_RISING_EDGE |
 242                                PTP_FALLING_EDGE |
 243                                PTP_STRICT_FLAGS))
 244                return -EOPNOTSUPP;
 245
 246        /* Reject requests to enable time stamping on both edges. */
 247        if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
 248            (rq->extts.flags & PTP_ENABLE_FEATURE) &&
 249            (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
 250                return -EOPNOTSUPP;
 251
 252        /* Reject requests with unsupported flags */
 253        if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
 254                                PTP_RISING_EDGE |
 255                                PTP_FALLING_EDGE))
 256                return -EOPNOTSUPP;
 257
 258        if (rq->extts.index >= clock->ptp_info.n_pins)
 259                return -EINVAL;
 260
 261        if (on) {
 262                pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
 263                if (pin < 0)
 264                        return -EBUSY;
 265                pin_mode = MLX5_PIN_MODE_IN;
 266                pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
 267                field_select = MLX5_MTPPS_FS_PIN_MODE |
 268                               MLX5_MTPPS_FS_PATTERN |
 269                               MLX5_MTPPS_FS_ENABLE;
 270        } else {
 271                pin = rq->extts.index;
 272                field_select = MLX5_MTPPS_FS_ENABLE;
 273        }
 274
 275        MLX5_SET(mtpps_reg, in, pin, pin);
 276        MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
 277        MLX5_SET(mtpps_reg, in, pattern, pattern);
 278        MLX5_SET(mtpps_reg, in, enable, on);
 279        MLX5_SET(mtpps_reg, in, field_select, field_select);
 280
 281        err = mlx5_set_mtpps(mdev, in, sizeof(in));
 282        if (err)
 283                return err;
 284
 285        return mlx5_set_mtppse(mdev, pin, 0,
 286                               MLX5_EVENT_MODE_REPETETIVE & on);
 287}
 288
 289static int mlx5_perout_configure(struct ptp_clock_info *ptp,
 290                                 struct ptp_clock_request *rq,
 291                                 int on)
 292{
 293        struct mlx5_clock *clock =
 294                        container_of(ptp, struct mlx5_clock, ptp_info);
 295        struct mlx5_core_dev *mdev =
 296                        container_of(clock, struct mlx5_core_dev, clock);
 297        u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
 298        u64 nsec_now, nsec_delta, time_stamp = 0;
 299        u64 cycles_now, cycles_delta;
 300        struct timespec64 ts;
 301        unsigned long flags;
 302        u32 field_select = 0;
 303        u8 pin_mode = 0;
 304        u8 pattern = 0;
 305        int pin = -1;
 306        int err = 0;
 307        s64 ns;
 308
 309        if (!MLX5_PPS_CAP(mdev))
 310                return -EOPNOTSUPP;
 311
 312        /* Reject requests with unsupported flags */
 313        if (rq->perout.flags)
 314                return -EOPNOTSUPP;
 315
 316        if (rq->perout.index >= clock->ptp_info.n_pins)
 317                return -EINVAL;
 318
 319        if (on) {
 320                pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
 321                                   rq->perout.index);
 322                if (pin < 0)
 323                        return -EBUSY;
 324
 325                pin_mode = MLX5_PIN_MODE_OUT;
 326                pattern = MLX5_OUT_PATTERN_PERIODIC;
 327                ts.tv_sec = rq->perout.period.sec;
 328                ts.tv_nsec = rq->perout.period.nsec;
 329                ns = timespec64_to_ns(&ts);
 330
 331                if ((ns >> 1) != 500000000LL)
 332                        return -EINVAL;
 333
 334                ts.tv_sec = rq->perout.start.sec;
 335                ts.tv_nsec = rq->perout.start.nsec;
 336                ns = timespec64_to_ns(&ts);
 337                cycles_now = mlx5_read_internal_timer(mdev, NULL);
 338                write_seqlock_irqsave(&clock->lock, flags);
 339                nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
 340                nsec_delta = ns - nsec_now;
 341                cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
 342                                         clock->cycles.mult);
 343                write_sequnlock_irqrestore(&clock->lock, flags);
 344                time_stamp = cycles_now + cycles_delta;
 345                field_select = MLX5_MTPPS_FS_PIN_MODE |
 346                               MLX5_MTPPS_FS_PATTERN |
 347                               MLX5_MTPPS_FS_ENABLE |
 348                               MLX5_MTPPS_FS_TIME_STAMP;
 349        } else {
 350                pin = rq->perout.index;
 351                field_select = MLX5_MTPPS_FS_ENABLE;
 352        }
 353
 354        MLX5_SET(mtpps_reg, in, pin, pin);
 355        MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
 356        MLX5_SET(mtpps_reg, in, pattern, pattern);
 357        MLX5_SET(mtpps_reg, in, enable, on);
 358        MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
 359        MLX5_SET(mtpps_reg, in, field_select, field_select);
 360
 361        err = mlx5_set_mtpps(mdev, in, sizeof(in));
 362        if (err)
 363                return err;
 364
 365        return mlx5_set_mtppse(mdev, pin, 0,
 366                               MLX5_EVENT_MODE_REPETETIVE & on);
 367}
 368
 369static int mlx5_pps_configure(struct ptp_clock_info *ptp,
 370                              struct ptp_clock_request *rq,
 371                              int on)
 372{
 373        struct mlx5_clock *clock =
 374                        container_of(ptp, struct mlx5_clock, ptp_info);
 375
 376        clock->pps_info.enabled = !!on;
 377        return 0;
 378}
 379
 380static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
 381                           struct ptp_clock_request *rq,
 382                           int on)
 383{
 384        switch (rq->type) {
 385        case PTP_CLK_REQ_EXTTS:
 386                return mlx5_extts_configure(ptp, rq, on);
 387        case PTP_CLK_REQ_PEROUT:
 388                return mlx5_perout_configure(ptp, rq, on);
 389        case PTP_CLK_REQ_PPS:
 390                return mlx5_pps_configure(ptp, rq, on);
 391        default:
 392                return -EOPNOTSUPP;
 393        }
 394        return 0;
 395}
 396
 397static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
 398                           enum ptp_pin_function func, unsigned int chan)
 399{
 400        return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
 401}
 402
 403static const struct ptp_clock_info mlx5_ptp_clock_info = {
 404        .owner          = THIS_MODULE,
 405        .name           = "mlx5_p2p",
 406        .max_adj        = 100000000,
 407        .n_alarm        = 0,
 408        .n_ext_ts       = 0,
 409        .n_per_out      = 0,
 410        .n_pins         = 0,
 411        .pps            = 0,
 412        .adjfreq        = mlx5_ptp_adjfreq,
 413        .adjtime        = mlx5_ptp_adjtime,
 414        .gettimex64     = mlx5_ptp_gettimex,
 415        .settime64      = mlx5_ptp_settime,
 416        .enable         = NULL,
 417        .verify         = NULL,
 418};
 419
 420static int mlx5_init_pin_config(struct mlx5_clock *clock)
 421{
 422        int i;
 423
 424        clock->ptp_info.pin_config =
 425                        kcalloc(clock->ptp_info.n_pins,
 426                                sizeof(*clock->ptp_info.pin_config),
 427                                GFP_KERNEL);
 428        if (!clock->ptp_info.pin_config)
 429                return -ENOMEM;
 430        clock->ptp_info.enable = mlx5_ptp_enable;
 431        clock->ptp_info.verify = mlx5_ptp_verify;
 432        clock->ptp_info.pps = 1;
 433
 434        for (i = 0; i < clock->ptp_info.n_pins; i++) {
 435                snprintf(clock->ptp_info.pin_config[i].name,
 436                         sizeof(clock->ptp_info.pin_config[i].name),
 437                         "mlx5_pps%d", i);
 438                clock->ptp_info.pin_config[i].index = i;
 439                clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
 440                clock->ptp_info.pin_config[i].chan = i;
 441        }
 442
 443        return 0;
 444}
 445
 446static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
 447{
 448        struct mlx5_clock *clock = &mdev->clock;
 449        u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
 450
 451        mlx5_query_mtpps(mdev, out, sizeof(out));
 452
 453        clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
 454                                          cap_number_of_pps_pins);
 455        clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
 456                                            cap_max_num_of_pps_in_pins);
 457        clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
 458                                             cap_max_num_of_pps_out_pins);
 459
 460        clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
 461        clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
 462        clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
 463        clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
 464        clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
 465        clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
 466        clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
 467        clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
 468}
 469
 470static int mlx5_pps_event(struct notifier_block *nb,
 471                          unsigned long type, void *data)
 472{
 473        struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
 474        struct mlx5_core_dev *mdev = clock->mdev;
 475        struct ptp_clock_event ptp_event;
 476        u64 cycles_now, cycles_delta;
 477        u64 nsec_now, nsec_delta, ns;
 478        struct mlx5_eqe *eqe = data;
 479        int pin = eqe->data.pps.pin;
 480        struct timespec64 ts;
 481        unsigned long flags;
 482
 483        switch (clock->ptp_info.pin_config[pin].func) {
 484        case PTP_PF_EXTTS:
 485                ptp_event.index = pin;
 486                ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
 487                                        be64_to_cpu(eqe->data.pps.time_stamp));
 488                if (clock->pps_info.enabled) {
 489                        ptp_event.type = PTP_CLOCK_PPSUSR;
 490                        ptp_event.pps_times.ts_real =
 491                                        ns_to_timespec64(ptp_event.timestamp);
 492                } else {
 493                        ptp_event.type = PTP_CLOCK_EXTTS;
 494                }
 495                /* TODOL clock->ptp can be NULL if ptp_clock_register failes */
 496                ptp_clock_event(clock->ptp, &ptp_event);
 497                break;
 498        case PTP_PF_PEROUT:
 499                mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
 500                cycles_now = mlx5_read_internal_timer(mdev, NULL);
 501                ts.tv_sec += 1;
 502                ts.tv_nsec = 0;
 503                ns = timespec64_to_ns(&ts);
 504                write_seqlock_irqsave(&clock->lock, flags);
 505                nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
 506                nsec_delta = ns - nsec_now;
 507                cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
 508                                         clock->cycles.mult);
 509                clock->pps_info.start[pin] = cycles_now + cycles_delta;
 510                schedule_work(&clock->pps_info.out_work);
 511                write_sequnlock_irqrestore(&clock->lock, flags);
 512                break;
 513        default:
 514                mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
 515                              clock->ptp_info.pin_config[pin].func);
 516        }
 517
 518        return NOTIFY_OK;
 519}
 520
 521void mlx5_init_clock(struct mlx5_core_dev *mdev)
 522{
 523        struct mlx5_clock *clock = &mdev->clock;
 524        u64 overflow_cycles;
 525        u64 ns;
 526        u64 frac = 0;
 527        u32 dev_freq;
 528
 529        dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
 530        if (!dev_freq) {
 531                mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
 532                return;
 533        }
 534        seqlock_init(&clock->lock);
 535        clock->cycles.read = read_internal_timer;
 536        clock->cycles.shift = MLX5_CYCLES_SHIFT;
 537        clock->cycles.mult = clocksource_khz2mult(dev_freq,
 538                                                  clock->cycles.shift);
 539        clock->nominal_c_mult = clock->cycles.mult;
 540        clock->cycles.mask = CLOCKSOURCE_MASK(41);
 541        clock->mdev = mdev;
 542
 543        timecounter_init(&clock->tc, &clock->cycles,
 544                         ktime_to_ns(ktime_get_real()));
 545
 546        /* Calculate period in seconds to call the overflow watchdog - to make
 547         * sure counter is checked at least twice every wrap around.
 548         * The period is calculated as the minimum between max HW cycles count
 549         * (The clock source mask) and max amount of cycles that can be
 550         * multiplied by clock multiplier where the result doesn't exceed
 551         * 64bits.
 552         */
 553        overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
 554        overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
 555
 556        ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
 557                                 frac, &frac);
 558        do_div(ns, NSEC_PER_SEC / HZ);
 559        clock->overflow_period = ns;
 560
 561        mdev->clock_info =
 562                (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
 563        if (mdev->clock_info) {
 564                mdev->clock_info->nsec = clock->tc.nsec;
 565                mdev->clock_info->cycles = clock->tc.cycle_last;
 566                mdev->clock_info->mask = clock->cycles.mask;
 567                mdev->clock_info->mult = clock->nominal_c_mult;
 568                mdev->clock_info->shift = clock->cycles.shift;
 569                mdev->clock_info->frac = clock->tc.frac;
 570                mdev->clock_info->overflow_period = clock->overflow_period;
 571        }
 572
 573        INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
 574        INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
 575        if (clock->overflow_period)
 576                schedule_delayed_work(&clock->overflow_work, 0);
 577        else
 578                mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
 579
 580        /* Configure the PHC */
 581        clock->ptp_info = mlx5_ptp_clock_info;
 582
 583        /* Initialize 1PPS data structures */
 584        if (MLX5_PPS_CAP(mdev))
 585                mlx5_get_pps_caps(mdev);
 586        if (clock->ptp_info.n_pins)
 587                mlx5_init_pin_config(clock);
 588
 589        clock->ptp = ptp_clock_register(&clock->ptp_info,
 590                                        &mdev->pdev->dev);
 591        if (IS_ERR(clock->ptp)) {
 592                mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
 593                               PTR_ERR(clock->ptp));
 594                clock->ptp = NULL;
 595        }
 596
 597        MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
 598        mlx5_eq_notifier_register(mdev, &clock->pps_nb);
 599}
 600
 601void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
 602{
 603        struct mlx5_clock *clock = &mdev->clock;
 604
 605        if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
 606                return;
 607
 608        mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
 609        if (clock->ptp) {
 610                ptp_clock_unregister(clock->ptp);
 611                clock->ptp = NULL;
 612        }
 613
 614        cancel_work_sync(&clock->pps_info.out_work);
 615        cancel_delayed_work_sync(&clock->overflow_work);
 616
 617        if (mdev->clock_info) {
 618                free_page((unsigned long)mdev->clock_info);
 619                mdev->clock_info = NULL;
 620        }
 621
 622        kfree(clock->ptp_info.pin_config);
 623}
 624