linux/drivers/firmware/arm_scmi/notify.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * System Control and Management Interface (SCMI) Notification support
   4 *
   5 * Copyright (C) 2020-2021 ARM Ltd.
   6 */
   7/**
   8 * DOC: Theory of operation
   9 *
  10 * SCMI Protocol specification allows the platform to signal events to
  11 * interested agents via notification messages: this is an implementation
  12 * of the dispatch and delivery of such notifications to the interested users
  13 * inside the Linux kernel.
  14 *
  15 * An SCMI Notification core instance is initialized for each active platform
  16 * instance identified by the means of the usual &struct scmi_handle.
  17 *
  18 * Each SCMI Protocol implementation, during its initialization, registers with
  19 * this core its set of supported events using scmi_register_protocol_events():
  20 * all the needed descriptors are stored in the &struct registered_protocols and
  21 * &struct registered_events arrays.
  22 *
  23 * Kernel users interested in some specific event can register their callbacks
  24 * providing the usual notifier_block descriptor, since this core implements
  25 * events' delivery using the standard Kernel notification chains machinery.
  26 *
  27 * Given the number of possible events defined by SCMI and the extensibility
  28 * of the SCMI Protocol itself, the underlying notification chains are created
  29 * and destroyed dynamically on demand depending on the number of users
  30 * effectively registered for an event, so that no support structures or chains
  31 * are allocated until at least one user has registered a notifier_block for
  32 * such event. Similarly, events' generation itself is enabled at the platform
  33 * level only after at least one user has registered, and it is shutdown after
  34 * the last user for that event has gone.
  35 *
  36 * All users provided callbacks and allocated notification-chains are stored in
  37 * the @registered_events_handlers hashtable. Callbacks' registration requests
  38 * for still to be registered events are instead kept in the dedicated common
  39 * hashtable @pending_events_handlers.
  40 *
  41 * An event is identified univocally by the tuple (proto_id, evt_id, src_id)
  42 * and is served by its own dedicated notification chain; information contained
  43 * in such tuples is used, in a few different ways, to generate the needed
  44 * hash-keys.
  45 *
  46 * Here proto_id and evt_id are simply the protocol_id and message_id numbers
  47 * as described in the SCMI Protocol specification, while src_id represents an
  48 * optional, protocol dependent, source identifier (like domain_id, perf_id
  49 * or sensor_id and so forth).
  50 *
  51 * Upon reception of a notification message from the platform the SCMI RX ISR
  52 * passes the received message payload and some ancillary information (including
  53 * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which
  54 * pushes the event-data itself on a protocol-dedicated kfifo queue for further
  55 * deferred processing as specified in @scmi_events_dispatcher().
  56 *
  57 * Each protocol has it own dedicated work_struct and worker which, once kicked
  58 * by the ISR, takes care to empty its own dedicated queue, deliverying the
  59 * queued items into the proper notification-chain: notifications processing can
  60 * proceed concurrently on distinct workers only between events belonging to
  61 * different protocols while delivery of events within the same protocol is
  62 * still strictly sequentially ordered by time of arrival.
  63 *
  64 * Events' information is then extracted from the SCMI Notification messages and
  65 * conveyed, converted into a custom per-event report struct, as the void *data
  66 * param to the user callback provided by the registered notifier_block, so that
  67 * from the user perspective his callback will look invoked like:
  68 *
  69 * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)
  70 *
  71 */
  72
  73#define dev_fmt(fmt) "SCMI Notifications - " fmt
  74#define pr_fmt(fmt) "SCMI Notifications - " fmt
  75
  76#include <linux/bitfield.h>
  77#include <linux/bug.h>
  78#include <linux/compiler.h>
  79#include <linux/device.h>
  80#include <linux/err.h>
  81#include <linux/hashtable.h>
  82#include <linux/kernel.h>
  83#include <linux/ktime.h>
  84#include <linux/kfifo.h>
  85#include <linux/list.h>
  86#include <linux/mutex.h>
  87#include <linux/notifier.h>
  88#include <linux/refcount.h>
  89#include <linux/scmi_protocol.h>
  90#include <linux/slab.h>
  91#include <linux/types.h>
  92#include <linux/workqueue.h>
  93
  94#include "common.h"
  95#include "notify.h"
  96
  97#define SCMI_MAX_PROTO          256
  98
  99#define PROTO_ID_MASK           GENMASK(31, 24)
 100#define EVT_ID_MASK             GENMASK(23, 16)
 101#define SRC_ID_MASK             GENMASK(15, 0)
 102
 103/*
 104 * Builds an unsigned 32bit key from the given input tuple to be used
 105 * as a key in hashtables.
 106 */
 107#define MAKE_HASH_KEY(p, e, s)                  \
 108        (FIELD_PREP(PROTO_ID_MASK, (p)) |       \
 109           FIELD_PREP(EVT_ID_MASK, (e)) |       \
 110           FIELD_PREP(SRC_ID_MASK, (s)))
 111
 112#define MAKE_ALL_SRCS_KEY(p, e)         MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
 113
 114/*
 115 * Assumes that the stored obj includes its own hash-key in a field named 'key':
 116 * with this simplification this macro can be equally used for all the objects'
 117 * types hashed by this implementation.
 118 *
 119 * @__ht: The hashtable name
 120 * @__obj: A pointer to the object type to be retrieved from the hashtable;
 121 *         it will be used as a cursor while scanning the hastable and it will
 122 *         be possibly left as NULL when @__k is not found
 123 * @__k: The key to search for
 124 */
 125#define KEY_FIND(__ht, __obj, __k)                              \
 126({                                                              \
 127        typeof(__k) k_ = __k;                                   \
 128        typeof(__obj) obj_;                                     \
 129                                                                \
 130        hash_for_each_possible((__ht), obj_, hash, k_)          \
 131                if (obj_->key == k_)                            \
 132                        break;                                  \
 133        __obj = obj_;                                           \
 134})
 135
 136#define KEY_XTRACT_PROTO_ID(key)        FIELD_GET(PROTO_ID_MASK, (key))
 137#define KEY_XTRACT_EVT_ID(key)          FIELD_GET(EVT_ID_MASK, (key))
 138#define KEY_XTRACT_SRC_ID(key)          FIELD_GET(SRC_ID_MASK, (key))
 139
 140/*
 141 * A set of macros used to access safely @registered_protocols and
 142 * @registered_events arrays; these are fixed in size and each entry is possibly
 143 * populated at protocols' registration time and then only read but NEVER
 144 * modified or removed.
 145 */
 146#define SCMI_GET_PROTO(__ni, __pid)                                     \
 147({                                                                      \
 148        typeof(__ni) ni_ = __ni;                                        \
 149        struct scmi_registered_events_desc *__pd = NULL;                \
 150                                                                        \
 151        if (ni_)                                                        \
 152                __pd = READ_ONCE(ni_->registered_protocols[(__pid)]);   \
 153        __pd;                                                           \
 154})
 155
 156#define SCMI_GET_REVT_FROM_PD(__pd, __eid)                              \
 157({                                                                      \
 158        typeof(__pd) pd_ = __pd;                                        \
 159        typeof(__eid) eid_ = __eid;                                     \
 160        struct scmi_registered_event *__revt = NULL;                    \
 161                                                                        \
 162        if (pd_ && eid_ < pd_->num_events)                              \
 163                __revt = READ_ONCE(pd_->registered_events[eid_]);       \
 164        __revt;                                                         \
 165})
 166
 167#define SCMI_GET_REVT(__ni, __pid, __eid)                               \
 168({                                                                      \
 169        struct scmi_registered_event *__revt;                           \
 170        struct scmi_registered_events_desc *__pd;                       \
 171                                                                        \
 172        __pd = SCMI_GET_PROTO((__ni), (__pid));                         \
 173        __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid));                  \
 174        __revt;                                                         \
 175})
 176
 177/* A couple of utility macros to limit cruft when calling protocols' helpers */
 178#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state)           \
 179({                                                              \
 180        typeof(revt) r = revt;                                  \
 181        r->proto->ops->set_notify_enabled(r->proto->ph,         \
 182                                        (eid), (sid), (state)); \
 183})
 184
 185#define REVT_NOTIFY_ENABLE(revt, eid, sid)                      \
 186        REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
 187
 188#define REVT_NOTIFY_DISABLE(revt, eid, sid)                     \
 189        REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
 190
 191#define REVT_FILL_REPORT(revt, ...)                             \
 192({                                                              \
 193        typeof(revt) r = revt;                                  \
 194        r->proto->ops->fill_custom_report(r->proto->ph,         \
 195                                          __VA_ARGS__);         \
 196})
 197
 198#define SCMI_PENDING_HASH_SZ            4
 199#define SCMI_REGISTERED_HASH_SZ         6
 200
 201struct scmi_registered_events_desc;
 202
 203/**
 204 * struct scmi_notify_instance  - Represents an instance of the notification
 205 * core
 206 * @gid: GroupID used for devres
 207 * @handle: A reference to the platform instance
 208 * @init_work: A work item to perform final initializations of pending handlers
 209 * @notify_wq: A reference to the allocated Kernel cmwq
 210 * @pending_mtx: A mutex to protect @pending_events_handlers
 211 * @registered_protocols: A statically allocated array containing pointers to
 212 *                        all the registered protocol-level specific information
 213 *                        related to events' handling
 214 * @pending_events_handlers: An hashtable containing all pending events'
 215 *                           handlers descriptors
 216 *
 217 * Each platform instance, represented by a handle, has its own instance of
 218 * the notification subsystem represented by this structure.
 219 */
 220struct scmi_notify_instance {
 221        void                    *gid;
 222        struct scmi_handle      *handle;
 223        struct work_struct      init_work;
 224        struct workqueue_struct *notify_wq;
 225        /* lock to protect pending_events_handlers */
 226        struct mutex            pending_mtx;
 227        struct scmi_registered_events_desc      **registered_protocols;
 228        DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
 229};
 230
 231/**
 232 * struct events_queue  - Describes a queue and its associated worker
 233 * @sz: Size in bytes of the related kfifo
 234 * @kfifo: A dedicated Kernel kfifo descriptor
 235 * @notify_work: A custom work item bound to this queue
 236 * @wq: A reference to the associated workqueue
 237 *
 238 * Each protocol has its own dedicated events_queue descriptor.
 239 */
 240struct events_queue {
 241        size_t                  sz;
 242        struct kfifo            kfifo;
 243        struct work_struct      notify_work;
 244        struct workqueue_struct *wq;
 245};
 246
 247/**
 248 * struct scmi_event_header  - A utility header
 249 * @timestamp: The timestamp, in nanoseconds (boottime), which was associated
 250 *             to this event as soon as it entered the SCMI RX ISR
 251 * @payld_sz: Effective size of the embedded message payload which follows
 252 * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)
 253 * @payld: A reference to the embedded event payload
 254 *
 255 * This header is prepended to each received event message payload before
 256 * queueing it on the related &struct events_queue.
 257 */
 258struct scmi_event_header {
 259        ktime_t timestamp;
 260        size_t payld_sz;
 261        unsigned char evt_id;
 262        unsigned char payld[];
 263};
 264
 265struct scmi_registered_event;
 266
 267/**
 268 * struct scmi_registered_events_desc  - Protocol Specific information
 269 * @id: Protocol ID
 270 * @ops: Protocol specific and event-related operations
 271 * @equeue: The embedded per-protocol events_queue
 272 * @ni: A reference to the initialized instance descriptor
 273 * @eh: A reference to pre-allocated buffer to be used as a scratch area by the
 274 *      deferred worker when fetching data from the kfifo
 275 * @eh_sz: Size of the pre-allocated buffer @eh
 276 * @in_flight: A reference to an in flight &struct scmi_registered_event
 277 * @num_events: Number of events in @registered_events
 278 * @registered_events: A dynamically allocated array holding all the registered
 279 *                     events' descriptors, whose fixed-size is determined at
 280 *                     compile time.
 281 * @registered_mtx: A mutex to protect @registered_events_handlers
 282 * @ph: SCMI protocol handle reference
 283 * @registered_events_handlers: An hashtable containing all events' handlers
 284 *                              descriptors registered for this protocol
 285 *
 286 * All protocols that register at least one event have their protocol-specific
 287 * information stored here, together with the embedded allocated events_queue.
 288 * These descriptors are stored in the @registered_protocols array at protocol
 289 * registration time.
 290 *
 291 * Once these descriptors are successfully registered, they are NEVER again
 292 * removed or modified since protocols do not unregister ever, so that, once
 293 * we safely grab a NON-NULL reference from the array we can keep it and use it.
 294 */
 295struct scmi_registered_events_desc {
 296        u8                              id;
 297        const struct scmi_event_ops     *ops;
 298        struct events_queue             equeue;
 299        struct scmi_notify_instance     *ni;
 300        struct scmi_event_header        *eh;
 301        size_t                          eh_sz;
 302        void                            *in_flight;
 303        int                             num_events;
 304        struct scmi_registered_event    **registered_events;
 305        /* mutex to protect registered_events_handlers */
 306        struct mutex                    registered_mtx;
 307        const struct scmi_protocol_handle       *ph;
 308        DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
 309};
 310
 311/**
 312 * struct scmi_registered_event  - Event Specific Information
 313 * @proto: A reference to the associated protocol descriptor
 314 * @evt: A reference to the associated event descriptor (as provided at
 315 *       registration time)
 316 * @report: A pre-allocated buffer used by the deferred worker to fill a
 317 *          customized event report
 318 * @num_sources: The number of possible sources for this event as stated at
 319 *               events' registration time
 320 * @sources: A reference to a dynamically allocated array used to refcount the
 321 *           events' enable requests for all the existing sources
 322 * @sources_mtx: A mutex to serialize the access to @sources
 323 *
 324 * All registered events are represented by one of these structures that are
 325 * stored in the @registered_events array at protocol registration time.
 326 *
 327 * Once these descriptors are successfully registered, they are NEVER again
 328 * removed or modified since protocols do not unregister ever, so that once we
 329 * safely grab a NON-NULL reference from the table we can keep it and use it.
 330 */
 331struct scmi_registered_event {
 332        struct scmi_registered_events_desc *proto;
 333        const struct scmi_event *evt;
 334        void            *report;
 335        u32             num_sources;
 336        refcount_t      *sources;
 337        /* locking to serialize the access to sources */
 338        struct mutex    sources_mtx;
 339};
 340
 341/**
 342 * struct scmi_event_handler  - Event handler information
 343 * @key: The used hashkey
 344 * @users: A reference count for number of active users for this handler
 345 * @r_evt: A reference to the associated registered event; when this is NULL
 346 *         this handler is pending, which means that identifies a set of
 347 *         callbacks intended to be attached to an event which is still not
 348 *         known nor registered by any protocol at that point in time
 349 * @chain: The notification chain dedicated to this specific event tuple
 350 * @hash: The hlist_node used for collision handling
 351 * @enabled: A boolean which records if event's generation has been already
 352 *           enabled for this handler as a whole
 353 *
 354 * This structure collects all the information needed to process a received
 355 * event identified by the tuple (proto_id, evt_id, src_id).
 356 * These descriptors are stored in a per-protocol @registered_events_handlers
 357 * table using as a key a value derived from that tuple.
 358 */
 359struct scmi_event_handler {
 360        u32                             key;
 361        refcount_t                      users;
 362        struct scmi_registered_event    *r_evt;
 363        struct blocking_notifier_head   chain;
 364        struct hlist_node               hash;
 365        bool                            enabled;
 366};
 367
 368#define IS_HNDL_PENDING(hndl)   (!(hndl)->r_evt)
 369
 370static struct scmi_event_handler *
 371scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
 372static void scmi_put_active_handler(struct scmi_notify_instance *ni,
 373                                    struct scmi_event_handler *hndl);
 374static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
 375                                      struct scmi_event_handler *hndl);
 376
 377/**
 378 * scmi_lookup_and_call_event_chain()  - Lookup the proper chain and call it
 379 * @ni: A reference to the notification instance to use
 380 * @evt_key: The key to use to lookup the related notification chain
 381 * @report: The customized event-specific report to pass down to the callbacks
 382 *          as their *data parameter.
 383 */
 384static inline void
 385scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
 386                                 u32 evt_key, void *report)
 387{
 388        int ret;
 389        struct scmi_event_handler *hndl;
 390
 391        /*
 392         * Here ensure the event handler cannot vanish while using it.
 393         * It is legitimate, though, for an handler not to be found at all here,
 394         * e.g. when it has been unregistered by the user after some events had
 395         * already been queued.
 396         */
 397        hndl = scmi_get_active_handler(ni, evt_key);
 398        if (!hndl)
 399                return;
 400
 401        ret = blocking_notifier_call_chain(&hndl->chain,
 402                                           KEY_XTRACT_EVT_ID(evt_key),
 403                                           report);
 404        /* Notifiers are NOT supposed to cut the chain ... */
 405        WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
 406
 407        scmi_put_active_handler(ni, hndl);
 408}
 409
 410/**
 411 * scmi_process_event_header()  - Dequeue and process an event header
 412 * @eq: The queue to use
 413 * @pd: The protocol descriptor to use
 414 *
 415 * Read an event header from the protocol queue into the dedicated scratch
 416 * buffer and looks for a matching registered event; in case an anomalously
 417 * sized read is detected just flush the queue.
 418 *
 419 * Return:
 420 * * a reference to the matching registered event when found
 421 * * ERR_PTR(-EINVAL) when NO registered event could be found
 422 * * NULL when the queue is empty
 423 */
 424static inline struct scmi_registered_event *
 425scmi_process_event_header(struct events_queue *eq,
 426                          struct scmi_registered_events_desc *pd)
 427{
 428        unsigned int outs;
 429        struct scmi_registered_event *r_evt;
 430
 431        outs = kfifo_out(&eq->kfifo, pd->eh,
 432                         sizeof(struct scmi_event_header));
 433        if (!outs)
 434                return NULL;
 435        if (outs != sizeof(struct scmi_event_header)) {
 436                dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
 437                kfifo_reset_out(&eq->kfifo);
 438                return NULL;
 439        }
 440
 441        r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
 442        if (!r_evt)
 443                r_evt = ERR_PTR(-EINVAL);
 444
 445        return r_evt;
 446}
 447
 448/**
 449 * scmi_process_event_payload()  - Dequeue and process an event payload
 450 * @eq: The queue to use
 451 * @pd: The protocol descriptor to use
 452 * @r_evt: The registered event descriptor to use
 453 *
 454 * Read an event payload from the protocol queue into the dedicated scratch
 455 * buffer, fills a custom report and then look for matching event handlers and
 456 * call them; skip any unknown event (as marked by scmi_process_event_header())
 457 * and in case an anomalously sized read is detected just flush the queue.
 458 *
 459 * Return: False when the queue is empty
 460 */
 461static inline bool
 462scmi_process_event_payload(struct events_queue *eq,
 463                           struct scmi_registered_events_desc *pd,
 464                           struct scmi_registered_event *r_evt)
 465{
 466        u32 src_id, key;
 467        unsigned int outs;
 468        void *report = NULL;
 469
 470        outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
 471        if (!outs)
 472                return false;
 473
 474        /* Any in-flight event has now been officially processed */
 475        pd->in_flight = NULL;
 476
 477        if (outs != pd->eh->payld_sz) {
 478                dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
 479                kfifo_reset_out(&eq->kfifo);
 480                return false;
 481        }
 482
 483        if (IS_ERR(r_evt)) {
 484                dev_warn(pd->ni->handle->dev,
 485                         "SKIP UNKNOWN EVT - proto:%X  evt:%d\n",
 486                         pd->id, pd->eh->evt_id);
 487                return true;
 488        }
 489
 490        report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
 491                                  pd->eh->payld, pd->eh->payld_sz,
 492                                  r_evt->report, &src_id);
 493        if (!report) {
 494                dev_err(pd->ni->handle->dev,
 495                        "report not available - proto:%X  evt:%d\n",
 496                        pd->id, pd->eh->evt_id);
 497                return true;
 498        }
 499
 500        /* At first search for a generic ALL src_ids handler... */
 501        key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
 502        scmi_lookup_and_call_event_chain(pd->ni, key, report);
 503
 504        /* ...then search for any specific src_id */
 505        key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
 506        scmi_lookup_and_call_event_chain(pd->ni, key, report);
 507
 508        return true;
 509}
 510
 511/**
 512 * scmi_events_dispatcher()  - Common worker logic for all work items.
 513 * @work: The work item to use, which is associated to a dedicated events_queue
 514 *
 515 * Logic:
 516 *  1. dequeue one pending RX notification (queued in SCMI RX ISR context)
 517 *  2. generate a custom event report from the received event message
 518 *  3. lookup for any registered ALL_SRC_IDs handler:
 519 *    - > call the related notification chain passing in the report
 520 *  4. lookup for any registered specific SRC_ID handler:
 521 *    - > call the related notification chain passing in the report
 522 *
 523 * Note that:
 524 * * a dedicated per-protocol kfifo queue is used: in this way an anomalous
 525 *   flood of events cannot saturate other protocols' queues.
 526 * * each per-protocol queue is associated to a distinct work_item, which
 527 *   means, in turn, that:
 528 *   + all protocols can process their dedicated queues concurrently
 529 *     (since notify_wq:max_active != 1)
 530 *   + anyway at most one worker instance is allowed to run on the same queue
 531 *     concurrently: this ensures that we can have only one concurrent
 532 *     reader/writer on the associated kfifo, so that we can use it lock-less
 533 *
 534 * Context: Process context.
 535 */
 536static void scmi_events_dispatcher(struct work_struct *work)
 537{
 538        struct events_queue *eq;
 539        struct scmi_registered_events_desc *pd;
 540        struct scmi_registered_event *r_evt;
 541
 542        eq = container_of(work, struct events_queue, notify_work);
 543        pd = container_of(eq, struct scmi_registered_events_desc, equeue);
 544        /*
 545         * In order to keep the queue lock-less and the number of memcopies
 546         * to the bare minimum needed, the dispatcher accounts for the
 547         * possibility of per-protocol in-flight events: i.e. an event whose
 548         * reception could end up being split across two subsequent runs of this
 549         * worker, first the header, then the payload.
 550         */
 551        do {
 552                if (!pd->in_flight) {
 553                        r_evt = scmi_process_event_header(eq, pd);
 554                        if (!r_evt)
 555                                break;
 556                        pd->in_flight = r_evt;
 557                } else {
 558                        r_evt = pd->in_flight;
 559                }
 560        } while (scmi_process_event_payload(eq, pd, r_evt));
 561}
 562
 563/**
 564 * scmi_notify()  - Queues a notification for further deferred processing
 565 * @handle: The handle identifying the platform instance from which the
 566 *          dispatched event is generated
 567 * @proto_id: Protocol ID
 568 * @evt_id: Event ID (msgID)
 569 * @buf: Event Message Payload (without the header)
 570 * @len: Event Message Payload size
 571 * @ts: RX Timestamp in nanoseconds (boottime)
 572 *
 573 * Context: Called in interrupt context to queue a received event for
 574 * deferred processing.
 575 *
 576 * Return: 0 on Success
 577 */
 578int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
 579                const void *buf, size_t len, ktime_t ts)
 580{
 581        struct scmi_registered_event *r_evt;
 582        struct scmi_event_header eh;
 583        struct scmi_notify_instance *ni;
 584
 585        ni = scmi_notification_instance_data_get(handle);
 586        if (!ni)
 587                return 0;
 588
 589        r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
 590        if (!r_evt)
 591                return -EINVAL;
 592
 593        if (len > r_evt->evt->max_payld_sz) {
 594                dev_err(handle->dev, "discard badly sized message\n");
 595                return -EINVAL;
 596        }
 597        if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
 598                dev_warn(handle->dev,
 599                         "queue full, dropping proto_id:%d  evt_id:%d  ts:%lld\n",
 600                         proto_id, evt_id, ktime_to_ns(ts));
 601                return -ENOMEM;
 602        }
 603
 604        eh.timestamp = ts;
 605        eh.evt_id = evt_id;
 606        eh.payld_sz = len;
 607        /*
 608         * Header and payload are enqueued with two distinct kfifo_in() (so non
 609         * atomic), but this situation is handled properly on the consumer side
 610         * with in-flight events tracking.
 611         */
 612        kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
 613        kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
 614        /*
 615         * Don't care about return value here since we just want to ensure that
 616         * a work is queued all the times whenever some items have been pushed
 617         * on the kfifo:
 618         * - if work was already queued it will simply fail to queue a new one
 619         *   since it is not needed
 620         * - if work was not queued already it will be now, even in case work
 621         *   was in fact already running: this behavior avoids any possible race
 622         *   when this function pushes new items onto the kfifos after the
 623         *   related executing worker had already determined the kfifo to be
 624         *   empty and it was terminating.
 625         */
 626        queue_work(r_evt->proto->equeue.wq,
 627                   &r_evt->proto->equeue.notify_work);
 628
 629        return 0;
 630}
 631
 632/**
 633 * scmi_kfifo_free()  - Devres action helper to free the kfifo
 634 * @kfifo: The kfifo to free
 635 */
 636static void scmi_kfifo_free(void *kfifo)
 637{
 638        kfifo_free((struct kfifo *)kfifo);
 639}
 640
 641/**
 642 * scmi_initialize_events_queue()  - Allocate/Initialize a kfifo buffer
 643 * @ni: A reference to the notification instance to use
 644 * @equeue: The events_queue to initialize
 645 * @sz: Size of the kfifo buffer to allocate
 646 *
 647 * Allocate a buffer for the kfifo and initialize it.
 648 *
 649 * Return: 0 on Success
 650 */
 651static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
 652                                        struct events_queue *equeue, size_t sz)
 653{
 654        int ret;
 655
 656        if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
 657                return -ENOMEM;
 658        /* Size could have been roundup to power-of-two */
 659        equeue->sz = kfifo_size(&equeue->kfifo);
 660
 661        ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
 662                                       &equeue->kfifo);
 663        if (ret)
 664                return ret;
 665
 666        INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
 667        equeue->wq = ni->notify_wq;
 668
 669        return ret;
 670}
 671
 672/**
 673 * scmi_allocate_registered_events_desc()  - Allocate a registered events'
 674 * descriptor
 675 * @ni: A reference to the &struct scmi_notify_instance notification instance
 676 *      to use
 677 * @proto_id: Protocol ID
 678 * @queue_sz: Size of the associated queue to allocate
 679 * @eh_sz: Size of the event header scratch area to pre-allocate
 680 * @num_events: Number of events to support (size of @registered_events)
 681 * @ops: Pointer to a struct holding references to protocol specific helpers
 682 *       needed during events handling
 683 *
 684 * It is supposed to be called only once for each protocol at protocol
 685 * initialization time, so it warns if the requested protocol is found already
 686 * registered.
 687 *
 688 * Return: The allocated and registered descriptor on Success
 689 */
 690static struct scmi_registered_events_desc *
 691scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
 692                                     u8 proto_id, size_t queue_sz, size_t eh_sz,
 693                                     int num_events,
 694                                     const struct scmi_event_ops *ops)
 695{
 696        int ret;
 697        struct scmi_registered_events_desc *pd;
 698
 699        /* Ensure protocols are up to date */
 700        smp_rmb();
 701        if (WARN_ON(ni->registered_protocols[proto_id]))
 702                return ERR_PTR(-EINVAL);
 703
 704        pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
 705        if (!pd)
 706                return ERR_PTR(-ENOMEM);
 707        pd->id = proto_id;
 708        pd->ops = ops;
 709        pd->ni = ni;
 710
 711        ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
 712        if (ret)
 713                return ERR_PTR(ret);
 714
 715        pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
 716        if (!pd->eh)
 717                return ERR_PTR(-ENOMEM);
 718        pd->eh_sz = eh_sz;
 719
 720        pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
 721                                             sizeof(char *), GFP_KERNEL);
 722        if (!pd->registered_events)
 723                return ERR_PTR(-ENOMEM);
 724        pd->num_events = num_events;
 725
 726        /* Initialize per protocol handlers table */
 727        mutex_init(&pd->registered_mtx);
 728        hash_init(pd->registered_events_handlers);
 729
 730        return pd;
 731}
 732
 733/**
 734 * scmi_register_protocol_events()  - Register Protocol Events with the core
 735 * @handle: The handle identifying the platform instance against which the
 736 *          protocol's events are registered
 737 * @proto_id: Protocol ID
 738 * @ph: SCMI protocol handle.
 739 * @ee: A structure describing the events supported by this protocol.
 740 *
 741 * Used by SCMI Protocols initialization code to register with the notification
 742 * core the list of supported events and their descriptors: takes care to
 743 * pre-allocate and store all needed descriptors, scratch buffers and event
 744 * queues.
 745 *
 746 * Return: 0 on Success
 747 */
 748int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
 749                                  const struct scmi_protocol_handle *ph,
 750                                  const struct scmi_protocol_events *ee)
 751{
 752        int i;
 753        unsigned int num_sources;
 754        size_t payld_sz = 0;
 755        struct scmi_registered_events_desc *pd;
 756        struct scmi_notify_instance *ni;
 757        const struct scmi_event *evt;
 758
 759        if (!ee || !ee->ops || !ee->evts || !ph ||
 760            (!ee->num_sources && !ee->ops->get_num_sources))
 761                return -EINVAL;
 762
 763        ni = scmi_notification_instance_data_get(handle);
 764        if (!ni)
 765                return -ENOMEM;
 766
 767        /* num_sources cannot be <= 0 */
 768        if (ee->num_sources) {
 769                num_sources = ee->num_sources;
 770        } else {
 771                int nsrc = ee->ops->get_num_sources(ph);
 772
 773                if (nsrc <= 0)
 774                        return -EINVAL;
 775                num_sources = nsrc;
 776        }
 777
 778        evt = ee->evts;
 779        for (i = 0; i < ee->num_events; i++)
 780                payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
 781        payld_sz += sizeof(struct scmi_event_header);
 782
 783        pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz,
 784                                                  payld_sz, ee->num_events,
 785                                                  ee->ops);
 786        if (IS_ERR(pd))
 787                return PTR_ERR(pd);
 788
 789        pd->ph = ph;
 790        for (i = 0; i < ee->num_events; i++, evt++) {
 791                struct scmi_registered_event *r_evt;
 792
 793                r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
 794                                     GFP_KERNEL);
 795                if (!r_evt)
 796                        return -ENOMEM;
 797                r_evt->proto = pd;
 798                r_evt->evt = evt;
 799
 800                r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
 801                                              sizeof(refcount_t), GFP_KERNEL);
 802                if (!r_evt->sources)
 803                        return -ENOMEM;
 804                r_evt->num_sources = num_sources;
 805                mutex_init(&r_evt->sources_mtx);
 806
 807                r_evt->report = devm_kzalloc(ni->handle->dev,
 808                                             evt->max_report_sz, GFP_KERNEL);
 809                if (!r_evt->report)
 810                        return -ENOMEM;
 811
 812                pd->registered_events[i] = r_evt;
 813                /* Ensure events are updated */
 814                smp_wmb();
 815                dev_dbg(handle->dev, "registered event - %lX\n",
 816                        MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
 817        }
 818
 819        /* Register protocol and events...it will never be removed */
 820        ni->registered_protocols[proto_id] = pd;
 821        /* Ensure protocols are updated */
 822        smp_wmb();
 823
 824        /*
 825         * Finalize any pending events' handler which could have been waiting
 826         * for this protocol's events registration.
 827         */
 828        schedule_work(&ni->init_work);
 829
 830        return 0;
 831}
 832
 833/**
 834 * scmi_deregister_protocol_events  - Deregister protocol events with the core
 835 * @handle: The handle identifying the platform instance against which the
 836 *          protocol's events are registered
 837 * @proto_id: Protocol ID
 838 */
 839void scmi_deregister_protocol_events(const struct scmi_handle *handle,
 840                                     u8 proto_id)
 841{
 842        struct scmi_notify_instance *ni;
 843        struct scmi_registered_events_desc *pd;
 844
 845        ni = scmi_notification_instance_data_get(handle);
 846        if (!ni)
 847                return;
 848
 849        pd = ni->registered_protocols[proto_id];
 850        if (!pd)
 851                return;
 852
 853        ni->registered_protocols[proto_id] = NULL;
 854        /* Ensure protocols are updated */
 855        smp_wmb();
 856
 857        cancel_work_sync(&pd->equeue.notify_work);
 858}
 859
 860/**
 861 * scmi_allocate_event_handler()  - Allocate Event handler
 862 * @ni: A reference to the notification instance to use
 863 * @evt_key: 32bit key uniquely bind to the event identified by the tuple
 864 *           (proto_id, evt_id, src_id)
 865 *
 866 * Allocate an event handler and related notification chain associated with
 867 * the provided event handler key.
 868 * Note that, at this point, a related registered_event is still to be
 869 * associated to this handler descriptor (hndl->r_evt == NULL), so the handler
 870 * is initialized as pending.
 871 *
 872 * Context: Assumes to be called with @pending_mtx already acquired.
 873 * Return: the freshly allocated structure on Success
 874 */
 875static struct scmi_event_handler *
 876scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
 877{
 878        struct scmi_event_handler *hndl;
 879
 880        hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
 881        if (!hndl)
 882                return NULL;
 883        hndl->key = evt_key;
 884        BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
 885        refcount_set(&hndl->users, 1);
 886        /* New handlers are created pending */
 887        hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
 888
 889        return hndl;
 890}
 891
 892/**
 893 * scmi_free_event_handler()  - Free the provided Event handler
 894 * @hndl: The event handler structure to free
 895 *
 896 * Context: Assumes to be called with proper locking acquired depending
 897 *          on the situation.
 898 */
 899static void scmi_free_event_handler(struct scmi_event_handler *hndl)
 900{
 901        hash_del(&hndl->hash);
 902        kfree(hndl);
 903}
 904
 905/**
 906 * scmi_bind_event_handler()  - Helper to attempt binding an handler to an event
 907 * @ni: A reference to the notification instance to use
 908 * @hndl: The event handler to bind
 909 *
 910 * If an associated registered event is found, move the handler from the pending
 911 * into the registered table.
 912 *
 913 * Context: Assumes to be called with @pending_mtx already acquired.
 914 *
 915 * Return: 0 on Success
 916 */
 917static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
 918                                          struct scmi_event_handler *hndl)
 919{
 920        struct scmi_registered_event *r_evt;
 921
 922        r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
 923                              KEY_XTRACT_EVT_ID(hndl->key));
 924        if (!r_evt)
 925                return -EINVAL;
 926
 927        /*
 928         * Remove from pending and insert into registered while getting hold
 929         * of protocol instance.
 930         */
 931        hash_del(&hndl->hash);
 932        /*
 933         * Acquire protocols only for NON pending handlers, so as NOT to trigger
 934         * protocol initialization when a notifier is registered against a still
 935         * not registered protocol, since it would make little sense to force init
 936         * protocols for which still no SCMI driver user exists: they wouldn't
 937         * emit any event anyway till some SCMI driver starts using it.
 938         */
 939        scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key));
 940        hndl->r_evt = r_evt;
 941
 942        mutex_lock(&r_evt->proto->registered_mtx);
 943        hash_add(r_evt->proto->registered_events_handlers,
 944                 &hndl->hash, hndl->key);
 945        mutex_unlock(&r_evt->proto->registered_mtx);
 946
 947        return 0;
 948}
 949
 950/**
 951 * scmi_valid_pending_handler()  - Helper to check pending status of handlers
 952 * @ni: A reference to the notification instance to use
 953 * @hndl: The event handler to check
 954 *
 955 * An handler is considered pending when its r_evt == NULL, because the related
 956 * event was still unknown at handler's registration time; anyway, since all
 957 * protocols register their supported events once for all at protocols'
 958 * initialization time, a pending handler cannot be considered valid anymore if
 959 * the underlying event (which it is waiting for), belongs to an already
 960 * initialized and registered protocol.
 961 *
 962 * Return: 0 on Success
 963 */
 964static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
 965                                             struct scmi_event_handler *hndl)
 966{
 967        struct scmi_registered_events_desc *pd;
 968
 969        if (!IS_HNDL_PENDING(hndl))
 970                return -EINVAL;
 971
 972        pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
 973        if (pd)
 974                return -EINVAL;
 975
 976        return 0;
 977}
 978
 979/**
 980 * scmi_register_event_handler()  - Register whenever possible an Event handler
 981 * @ni: A reference to the notification instance to use
 982 * @hndl: The event handler to register
 983 *
 984 * At first try to bind an event handler to its associated event, then check if
 985 * it was at least a valid pending handler: if it was not bound nor valid return
 986 * false.
 987 *
 988 * Valid pending incomplete bindings will be periodically retried by a dedicated
 989 * worker which is kicked each time a new protocol completes its own
 990 * registration phase.
 991 *
 992 * Context: Assumes to be called with @pending_mtx acquired.
 993 *
 994 * Return: 0 on Success
 995 */
 996static int scmi_register_event_handler(struct scmi_notify_instance *ni,
 997                                       struct scmi_event_handler *hndl)
 998{
 999        int ret;
1000
1001        ret = scmi_bind_event_handler(ni, hndl);
1002        if (!ret) {
1003                dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
1004                        hndl->key);
1005        } else {
1006                ret = scmi_valid_pending_handler(ni, hndl);
1007                if (!ret)
1008                        dev_dbg(ni->handle->dev,
1009                                "registered PENDING handler - key:%X\n",
1010                                hndl->key);
1011        }
1012
1013        return ret;
1014}
1015
1016/**
1017 * __scmi_event_handler_get_ops()  - Utility to get or create an event handler
1018 * @ni: A reference to the notification instance to use
1019 * @evt_key: The event key to use
1020 * @create: A boolean flag to specify if a handler must be created when
1021 *          not already existent
1022 *
1023 * Search for the desired handler matching the key in both the per-protocol
1024 * registered table and the common pending table:
1025 * * if found adjust users refcount
1026 * * if not found and @create is true, create and register the new handler:
1027 *   handler could end up being registered as pending if no matching event
1028 *   could be found.
1029 *
1030 * An handler is guaranteed to reside in one and only one of the tables at
1031 * any one time; to ensure this the whole search and create is performed
1032 * holding the @pending_mtx lock, with @registered_mtx additionally acquired
1033 * if needed.
1034 *
1035 * Note that when a nested acquisition of these mutexes is needed the locking
1036 * order is always (same as in @init_work):
1037 * 1. pending_mtx
1038 * 2. registered_mtx
1039 *
1040 * Events generation is NOT enabled right after creation within this routine
1041 * since at creation time we usually want to have all setup and ready before
1042 * events really start flowing.
1043 *
1044 * Return: A properly refcounted handler on Success, NULL on Failure
1045 */
1046static inline struct scmi_event_handler *
1047__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
1048                             u32 evt_key, bool create)
1049{
1050        struct scmi_registered_event *r_evt;
1051        struct scmi_event_handler *hndl = NULL;
1052
1053        r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
1054                              KEY_XTRACT_EVT_ID(evt_key));
1055
1056        mutex_lock(&ni->pending_mtx);
1057        /* Search registered events at first ... if possible at all */
1058        if (r_evt) {
1059                mutex_lock(&r_evt->proto->registered_mtx);
1060                hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
1061                                hndl, evt_key);
1062                if (hndl)
1063                        refcount_inc(&hndl->users);
1064                mutex_unlock(&r_evt->proto->registered_mtx);
1065        }
1066
1067        /* ...then amongst pending. */
1068        if (!hndl) {
1069                hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
1070                if (hndl)
1071                        refcount_inc(&hndl->users);
1072        }
1073
1074        /* Create if still not found and required */
1075        if (!hndl && create) {
1076                hndl = scmi_allocate_event_handler(ni, evt_key);
1077                if (hndl && scmi_register_event_handler(ni, hndl)) {
1078                        dev_dbg(ni->handle->dev,
1079                                "purging UNKNOWN handler - key:%X\n",
1080                                hndl->key);
1081                        /* this hndl can be only a pending one */
1082                        scmi_put_handler_unlocked(ni, hndl);
1083                        hndl = NULL;
1084                }
1085        }
1086        mutex_unlock(&ni->pending_mtx);
1087
1088        return hndl;
1089}
1090
1091static struct scmi_event_handler *
1092scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
1093{
1094        return __scmi_event_handler_get_ops(ni, evt_key, false);
1095}
1096
1097static struct scmi_event_handler *
1098scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
1099{
1100        return __scmi_event_handler_get_ops(ni, evt_key, true);
1101}
1102
1103/**
1104 * scmi_get_active_handler()  - Helper to get active handlers only
1105 * @ni: A reference to the notification instance to use
1106 * @evt_key: The event key to use
1107 *
1108 * Search for the desired handler matching the key only in the per-protocol
1109 * table of registered handlers: this is called only from the dispatching path
1110 * so want to be as quick as possible and do not care about pending.
1111 *
1112 * Return: A properly refcounted active handler
1113 */
1114static struct scmi_event_handler *
1115scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
1116{
1117        struct scmi_registered_event *r_evt;
1118        struct scmi_event_handler *hndl = NULL;
1119
1120        r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
1121                              KEY_XTRACT_EVT_ID(evt_key));
1122        if (r_evt) {
1123                mutex_lock(&r_evt->proto->registered_mtx);
1124                hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
1125                                hndl, evt_key);
1126                if (hndl)
1127                        refcount_inc(&hndl->users);
1128                mutex_unlock(&r_evt->proto->registered_mtx);
1129        }
1130
1131        return hndl;
1132}
1133
1134/**
1135 * __scmi_enable_evt()  - Enable/disable events generation
1136 * @r_evt: The registered event to act upon
1137 * @src_id: The src_id to act upon
1138 * @enable: The action to perform: true->Enable, false->Disable
1139 *
1140 * Takes care of proper refcounting while performing enable/disable: handles
1141 * the special case of ALL sources requests by itself.
1142 * Returns successfully if at least one of the required src_id has been
1143 * successfully enabled/disabled.
1144 *
1145 * Return: 0 on Success
1146 */
1147static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
1148                                    u32 src_id, bool enable)
1149{
1150        int retvals = 0;
1151        u32 num_sources;
1152        refcount_t *sid;
1153
1154        if (src_id == SRC_ID_MASK) {
1155                src_id = 0;
1156                num_sources = r_evt->num_sources;
1157        } else if (src_id < r_evt->num_sources) {
1158                num_sources = 1;
1159        } else {
1160                return -EINVAL;
1161        }
1162
1163        mutex_lock(&r_evt->sources_mtx);
1164        if (enable) {
1165                for (; num_sources; src_id++, num_sources--) {
1166                        int ret = 0;
1167
1168                        sid = &r_evt->sources[src_id];
1169                        if (refcount_read(sid) == 0) {
1170                                ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
1171                                                         src_id);
1172                                if (!ret)
1173                                        refcount_set(sid, 1);
1174                        } else {
1175                                refcount_inc(sid);
1176                        }
1177                        retvals += !ret;
1178                }
1179        } else {
1180                for (; num_sources; src_id++, num_sources--) {
1181                        sid = &r_evt->sources[src_id];
1182                        if (refcount_dec_and_test(sid))
1183                                REVT_NOTIFY_DISABLE(r_evt,
1184                                                    r_evt->evt->id, src_id);
1185                }
1186                retvals = 1;
1187        }
1188        mutex_unlock(&r_evt->sources_mtx);
1189
1190        return retvals ? 0 : -EINVAL;
1191}
1192
1193static int scmi_enable_events(struct scmi_event_handler *hndl)
1194{
1195        int ret = 0;
1196
1197        if (!hndl->enabled) {
1198                ret = __scmi_enable_evt(hndl->r_evt,
1199                                        KEY_XTRACT_SRC_ID(hndl->key), true);
1200                if (!ret)
1201                        hndl->enabled = true;
1202        }
1203
1204        return ret;
1205}
1206
1207static int scmi_disable_events(struct scmi_event_handler *hndl)
1208{
1209        int ret = 0;
1210
1211        if (hndl->enabled) {
1212                ret = __scmi_enable_evt(hndl->r_evt,
1213                                        KEY_XTRACT_SRC_ID(hndl->key), false);
1214                if (!ret)
1215                        hndl->enabled = false;
1216        }
1217
1218        return ret;
1219}
1220
1221/**
1222 * scmi_put_handler_unlocked()  - Put an event handler
1223 * @ni: A reference to the notification instance to use
1224 * @hndl: The event handler to act upon
1225 *
1226 * After having got exclusive access to the registered handlers hashtable,
1227 * update the refcount and if @hndl is no more in use by anyone:
1228 * * ask for events' generation disabling
1229 * * unregister and free the handler itself
1230 *
1231 * Context: Assumes all the proper locking has been managed by the caller.
1232 *
1233 * Return: True if handler was freed (users dropped to zero)
1234 */
1235static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
1236                                      struct scmi_event_handler *hndl)
1237{
1238        bool freed = false;
1239
1240        if (refcount_dec_and_test(&hndl->users)) {
1241                if (!IS_HNDL_PENDING(hndl))
1242                        scmi_disable_events(hndl);
1243                scmi_free_event_handler(hndl);
1244                freed = true;
1245        }
1246
1247        return freed;
1248}
1249
1250static void scmi_put_handler(struct scmi_notify_instance *ni,
1251                             struct scmi_event_handler *hndl)
1252{
1253        bool freed;
1254        u8 protocol_id;
1255        struct scmi_registered_event *r_evt = hndl->r_evt;
1256
1257        mutex_lock(&ni->pending_mtx);
1258        if (r_evt) {
1259                protocol_id = r_evt->proto->id;
1260                mutex_lock(&r_evt->proto->registered_mtx);
1261        }
1262
1263        freed = scmi_put_handler_unlocked(ni, hndl);
1264
1265        if (r_evt) {
1266                mutex_unlock(&r_evt->proto->registered_mtx);
1267                /*
1268                 * Only registered handler acquired protocol; must be here
1269                 * released only AFTER unlocking registered_mtx, since
1270                 * releasing a protocol can trigger its de-initialization
1271                 * (ie. including r_evt and registered_mtx)
1272                 */
1273                if (freed)
1274                        scmi_protocol_release(ni->handle, protocol_id);
1275        }
1276        mutex_unlock(&ni->pending_mtx);
1277}
1278
1279static void scmi_put_active_handler(struct scmi_notify_instance *ni,
1280                                    struct scmi_event_handler *hndl)
1281{
1282        bool freed;
1283        struct scmi_registered_event *r_evt = hndl->r_evt;
1284        u8 protocol_id = r_evt->proto->id;
1285
1286        mutex_lock(&r_evt->proto->registered_mtx);
1287        freed = scmi_put_handler_unlocked(ni, hndl);
1288        mutex_unlock(&r_evt->proto->registered_mtx);
1289        if (freed)
1290                scmi_protocol_release(ni->handle, protocol_id);
1291}
1292
1293/**
1294 * scmi_event_handler_enable_events()  - Enable events associated to an handler
1295 * @hndl: The Event handler to act upon
1296 *
1297 * Return: 0 on Success
1298 */
1299static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
1300{
1301        if (scmi_enable_events(hndl)) {
1302                pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
1303                return -EINVAL;
1304        }
1305
1306        return 0;
1307}
1308
1309/**
1310 * scmi_notifier_register()  - Register a notifier_block for an event
1311 * @handle: The handle identifying the platform instance against which the
1312 *          callback is registered
1313 * @proto_id: Protocol ID
1314 * @evt_id: Event ID
1315 * @src_id: Source ID, when NULL register for events coming form ALL possible
1316 *          sources
1317 * @nb: A standard notifier block to register for the specified event
1318 *
1319 * Generic helper to register a notifier_block against a protocol event.
1320 *
1321 * A notifier_block @nb will be registered for each distinct event identified
1322 * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain
1323 * so that:
1324 *
1325 *      (proto_X, evt_Y, src_Z) --> chain_X_Y_Z
1326 *
1327 * @src_id meaning is protocol specific and identifies the origin of the event
1328 * (like domain_id, sensor_id and so forth).
1329 *
1330 * @src_id can be NULL to signify that the caller is interested in receiving
1331 * notifications from ALL the available sources for that protocol OR simply that
1332 * the protocol does not support distinct sources.
1333 *
1334 * As soon as one user for the specified tuple appears, an handler is created,
1335 * and that specific event's generation is enabled at the platform level, unless
1336 * an associated registered event is found missing, meaning that the needed
1337 * protocol is still to be initialized and the handler has just been registered
1338 * as still pending.
1339 *
1340 * Return: 0 on Success
1341 */
1342static int scmi_notifier_register(const struct scmi_handle *handle,
1343                                  u8 proto_id, u8 evt_id, const u32 *src_id,
1344                                  struct notifier_block *nb)
1345{
1346        int ret = 0;
1347        u32 evt_key;
1348        struct scmi_event_handler *hndl;
1349        struct scmi_notify_instance *ni;
1350
1351        ni = scmi_notification_instance_data_get(handle);
1352        if (!ni)
1353                return -ENODEV;
1354
1355        evt_key = MAKE_HASH_KEY(proto_id, evt_id,
1356                                src_id ? *src_id : SRC_ID_MASK);
1357        hndl = scmi_get_or_create_handler(ni, evt_key);
1358        if (!hndl)
1359                return -EINVAL;
1360
1361        blocking_notifier_chain_register(&hndl->chain, nb);
1362
1363        /* Enable events for not pending handlers */
1364        if (!IS_HNDL_PENDING(hndl)) {
1365                ret = scmi_event_handler_enable_events(hndl);
1366                if (ret)
1367                        scmi_put_handler(ni, hndl);
1368        }
1369
1370        return ret;
1371}
1372
1373/**
1374 * scmi_notifier_unregister()  - Unregister a notifier_block for an event
1375 * @handle: The handle identifying the platform instance against which the
1376 *          callback is unregistered
1377 * @proto_id: Protocol ID
1378 * @evt_id: Event ID
1379 * @src_id: Source ID
1380 * @nb: The notifier_block to unregister
1381 *
1382 * Takes care to unregister the provided @nb from the notification chain
1383 * associated to the specified event and, if there are no more users for the
1384 * event handler, frees also the associated event handler structures.
1385 * (this could possibly cause disabling of event's generation at platform level)
1386 *
1387 * Return: 0 on Success
1388 */
1389static int scmi_notifier_unregister(const struct scmi_handle *handle,
1390                                    u8 proto_id, u8 evt_id, const u32 *src_id,
1391                                    struct notifier_block *nb)
1392{
1393        u32 evt_key;
1394        struct scmi_event_handler *hndl;
1395        struct scmi_notify_instance *ni;
1396
1397        ni = scmi_notification_instance_data_get(handle);
1398        if (!ni)
1399                return -ENODEV;
1400
1401        evt_key = MAKE_HASH_KEY(proto_id, evt_id,
1402                                src_id ? *src_id : SRC_ID_MASK);
1403        hndl = scmi_get_handler(ni, evt_key);
1404        if (!hndl)
1405                return -EINVAL;
1406
1407        /*
1408         * Note that this chain unregistration call is safe on its own
1409         * being internally protected by an rwsem.
1410         */
1411        blocking_notifier_chain_unregister(&hndl->chain, nb);
1412        scmi_put_handler(ni, hndl);
1413
1414        /*
1415         * This balances the initial get issued in @scmi_notifier_register.
1416         * If this notifier_block happened to be the last known user callback
1417         * for this event, the handler is here freed and the event's generation
1418         * stopped.
1419         *
1420         * Note that, an ongoing concurrent lookup on the delivery workqueue
1421         * path could still hold the refcount to 1 even after this routine
1422         * completes: in such a case it will be the final put on the delivery
1423         * path which will finally free this unused handler.
1424         */
1425        scmi_put_handler(ni, hndl);
1426
1427        return 0;
1428}
1429
1430struct scmi_notifier_devres {
1431        const struct scmi_handle *handle;
1432        u8 proto_id;
1433        u8 evt_id;
1434        u32 __src_id;
1435        u32 *src_id;
1436        struct notifier_block *nb;
1437};
1438
1439static void scmi_devm_release_notifier(struct device *dev, void *res)
1440{
1441        struct scmi_notifier_devres *dres = res;
1442
1443        scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id,
1444                                 dres->src_id, dres->nb);
1445}
1446
1447/**
1448 * scmi_devm_notifier_register()  - Managed registration of a notifier_block
1449 * for an event
1450 * @sdev: A reference to an scmi_device whose embedded struct device is to
1451 *        be used for devres accounting.
1452 * @proto_id: Protocol ID
1453 * @evt_id: Event ID
1454 * @src_id: Source ID, when NULL register for events coming form ALL possible
1455 *          sources
1456 * @nb: A standard notifier block to register for the specified event
1457 *
1458 * Generic devres managed helper to register a notifier_block against a
1459 * protocol event.
1460 *
1461 * Return: 0 on Success
1462 */
1463static int scmi_devm_notifier_register(struct scmi_device *sdev,
1464                                       u8 proto_id, u8 evt_id,
1465                                       const u32 *src_id,
1466                                       struct notifier_block *nb)
1467{
1468        int ret;
1469        struct scmi_notifier_devres *dres;
1470
1471        dres = devres_alloc(scmi_devm_release_notifier,
1472                            sizeof(*dres), GFP_KERNEL);
1473        if (!dres)
1474                return -ENOMEM;
1475
1476        ret = scmi_notifier_register(sdev->handle, proto_id,
1477                                     evt_id, src_id, nb);
1478        if (ret) {
1479                devres_free(dres);
1480                return ret;
1481        }
1482
1483        dres->handle = sdev->handle;
1484        dres->proto_id = proto_id;
1485        dres->evt_id = evt_id;
1486        dres->nb = nb;
1487        if (src_id) {
1488                dres->__src_id = *src_id;
1489                dres->src_id = &dres->__src_id;
1490        } else {
1491                dres->src_id = NULL;
1492        }
1493        devres_add(&sdev->dev, dres);
1494
1495        return ret;
1496}
1497
1498static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
1499{
1500        struct scmi_notifier_devres *dres = res;
1501        struct scmi_notifier_devres *xres = data;
1502
1503        if (WARN_ON(!dres || !xres))
1504                return 0;
1505
1506        return dres->proto_id == xres->proto_id &&
1507                dres->evt_id == xres->evt_id &&
1508                dres->nb == xres->nb &&
1509                ((!dres->src_id && !xres->src_id) ||
1510                  (dres->src_id && xres->src_id &&
1511                   dres->__src_id == xres->__src_id));
1512}
1513
1514/**
1515 * scmi_devm_notifier_unregister()  - Managed un-registration of a
1516 * notifier_block for an event
1517 * @sdev: A reference to an scmi_device whose embedded struct device is to
1518 *        be used for devres accounting.
1519 * @proto_id: Protocol ID
1520 * @evt_id: Event ID
1521 * @src_id: Source ID, when NULL register for events coming form ALL possible
1522 *          sources
1523 * @nb: A standard notifier block to register for the specified event
1524 *
1525 * Generic devres managed helper to explicitly un-register a notifier_block
1526 * against a protocol event, which was previously registered using the above
1527 * @scmi_devm_notifier_register.
1528 *
1529 * Return: 0 on Success
1530 */
1531static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
1532                                         u8 proto_id, u8 evt_id,
1533                                         const u32 *src_id,
1534                                         struct notifier_block *nb)
1535{
1536        int ret;
1537        struct scmi_notifier_devres dres;
1538
1539        dres.handle = sdev->handle;
1540        dres.proto_id = proto_id;
1541        dres.evt_id = evt_id;
1542        if (src_id) {
1543                dres.__src_id = *src_id;
1544                dres.src_id = &dres.__src_id;
1545        } else {
1546                dres.src_id = NULL;
1547        }
1548
1549        ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
1550                             scmi_devm_notifier_match, &dres);
1551
1552        WARN_ON(ret);
1553
1554        return ret;
1555}
1556
1557/**
1558 * scmi_protocols_late_init()  - Worker for late initialization
1559 * @work: The work item to use associated to the proper SCMI instance
1560 *
1561 * This kicks in whenever a new protocol has completed its own registration via
1562 * scmi_register_protocol_events(): it is in charge of scanning the table of
1563 * pending handlers (registered by users while the related protocol was still
1564 * not initialized) and finalizing their initialization whenever possible;
1565 * invalid pending handlers are purged at this point in time.
1566 */
1567static void scmi_protocols_late_init(struct work_struct *work)
1568{
1569        int bkt;
1570        struct scmi_event_handler *hndl;
1571        struct scmi_notify_instance *ni;
1572        struct hlist_node *tmp;
1573
1574        ni = container_of(work, struct scmi_notify_instance, init_work);
1575
1576        /* Ensure protocols and events are up to date */
1577        smp_rmb();
1578
1579        mutex_lock(&ni->pending_mtx);
1580        hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
1581                int ret;
1582
1583                ret = scmi_bind_event_handler(ni, hndl);
1584                if (!ret) {
1585                        dev_dbg(ni->handle->dev,
1586                                "finalized PENDING handler - key:%X\n",
1587                                hndl->key);
1588                        ret = scmi_event_handler_enable_events(hndl);
1589                        if (ret) {
1590                                dev_dbg(ni->handle->dev,
1591                                        "purging INVALID handler - key:%X\n",
1592                                        hndl->key);
1593                                scmi_put_active_handler(ni, hndl);
1594                        }
1595                } else {
1596                        ret = scmi_valid_pending_handler(ni, hndl);
1597                        if (ret) {
1598                                dev_dbg(ni->handle->dev,
1599                                        "purging PENDING handler - key:%X\n",
1600                                        hndl->key);
1601                                /* this hndl can be only a pending one */
1602                                scmi_put_handler_unlocked(ni, hndl);
1603                        }
1604                }
1605        }
1606        mutex_unlock(&ni->pending_mtx);
1607}
1608
1609/*
1610 * notify_ops are attached to the handle so that can be accessed
1611 * directly from an scmi_driver to register its own notifiers.
1612 */
1613static const struct scmi_notify_ops notify_ops = {
1614        .devm_event_notifier_register = scmi_devm_notifier_register,
1615        .devm_event_notifier_unregister = scmi_devm_notifier_unregister,
1616        .event_notifier_register = scmi_notifier_register,
1617        .event_notifier_unregister = scmi_notifier_unregister,
1618};
1619
1620/**
1621 * scmi_notification_init()  - Initializes Notification Core Support
1622 * @handle: The handle identifying the platform instance to initialize
1623 *
1624 * This function lays out all the basic resources needed by the notification
1625 * core instance identified by the provided handle: once done, all of the
1626 * SCMI Protocols can register their events with the core during their own
1627 * initializations.
1628 *
1629 * Note that failing to initialize the core notifications support does not
1630 * cause the whole SCMI Protocols stack to fail its initialization.
1631 *
1632 * SCMI Notification Initialization happens in 2 steps:
1633 * * initialization: basic common allocations (this function)
1634 * * registration: protocols asynchronously come into life and registers their
1635 *                 own supported list of events with the core; this causes
1636 *                 further per-protocol allocations
1637 *
1638 * Any user's callback registration attempt, referring a still not registered
1639 * event, will be registered as pending and finalized later (if possible)
1640 * by scmi_protocols_late_init() work.
1641 * This allows for lazy initialization of SCMI Protocols due to late (or
1642 * missing) SCMI drivers' modules loading.
1643 *
1644 * Return: 0 on Success
1645 */
1646int scmi_notification_init(struct scmi_handle *handle)
1647{
1648        void *gid;
1649        struct scmi_notify_instance *ni;
1650
1651        gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1652        if (!gid)
1653                return -ENOMEM;
1654
1655        ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
1656        if (!ni)
1657                goto err;
1658
1659        ni->gid = gid;
1660        ni->handle = handle;
1661
1662        ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
1663                                                sizeof(char *), GFP_KERNEL);
1664        if (!ni->registered_protocols)
1665                goto err;
1666
1667        ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
1668                                        WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
1669                                        0);
1670        if (!ni->notify_wq)
1671                goto err;
1672
1673        mutex_init(&ni->pending_mtx);
1674        hash_init(ni->pending_events_handlers);
1675
1676        INIT_WORK(&ni->init_work, scmi_protocols_late_init);
1677
1678        scmi_notification_instance_data_set(handle, ni);
1679        handle->notify_ops = &notify_ops;
1680        /* Ensure handle is up to date */
1681        smp_wmb();
1682
1683        dev_info(handle->dev, "Core Enabled.\n");
1684
1685        devres_close_group(handle->dev, ni->gid);
1686
1687        return 0;
1688
1689err:
1690        dev_warn(handle->dev, "Initialization Failed.\n");
1691        devres_release_group(handle->dev, gid);
1692        return -ENOMEM;
1693}
1694
1695/**
1696 * scmi_notification_exit()  - Shutdown and clean Notification core
1697 * @handle: The handle identifying the platform instance to shutdown
1698 */
1699void scmi_notification_exit(struct scmi_handle *handle)
1700{
1701        struct scmi_notify_instance *ni;
1702
1703        ni = scmi_notification_instance_data_get(handle);
1704        if (!ni)
1705                return;
1706        scmi_notification_instance_data_set(handle, NULL);
1707
1708        /* Destroy while letting pending work complete */
1709        destroy_workqueue(ni->notify_wq);
1710
1711        devres_release_group(ni->handle->dev, ni->gid);
1712}
1713