linux/kernel/padata.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * padata.c - generic interface to process data streams in parallel
   4 *
   5 * See Documentation/core-api/padata.rst for more information.
   6 *
   7 * Copyright (C) 2008, 2009 secunet Security Networks AG
   8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
   9 *
  10 * Copyright (c) 2020 Oracle and/or its affiliates.
  11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
  12 *
  13 * This program is free software; you can redistribute it and/or modify it
  14 * under the terms and conditions of the GNU General Public License,
  15 * version 2, as published by the Free Software Foundation.
  16 *
  17 * This program is distributed in the hope it will be useful, but WITHOUT
  18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  20 * more details.
  21 *
  22 * You should have received a copy of the GNU General Public License along with
  23 * this program; if not, write to the Free Software Foundation, Inc.,
  24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  25 */
  26
  27#include <linux/completion.h>
  28#include <linux/export.h>
  29#include <linux/cpumask.h>
  30#include <linux/err.h>
  31#include <linux/cpu.h>
  32#include <linux/padata.h>
  33#include <linux/mutex.h>
  34#include <linux/sched.h>
  35#include <linux/slab.h>
  36#include <linux/sysfs.h>
  37#include <linux/rcupdate.h>
  38
  39#define PADATA_WORK_ONSTACK     1       /* Work's memory is on stack */
  40
  41struct padata_work {
  42        struct work_struct      pw_work;
  43        struct list_head        pw_list;  /* padata_free_works linkage */
  44        void                    *pw_data;
  45};
  46
  47static DEFINE_SPINLOCK(padata_works_lock);
  48static struct padata_work *padata_works;
  49static LIST_HEAD(padata_free_works);
  50
  51struct padata_mt_job_state {
  52        spinlock_t              lock;
  53        struct completion       completion;
  54        struct padata_mt_job    *job;
  55        int                     nworks;
  56        int                     nworks_fini;
  57        unsigned long           chunk_size;
  58};
  59
  60static void padata_free_pd(struct parallel_data *pd);
  61static void __init padata_mt_helper(struct work_struct *work);
  62
  63static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
  64{
  65        int cpu, target_cpu;
  66
  67        target_cpu = cpumask_first(pd->cpumask.pcpu);
  68        for (cpu = 0; cpu < cpu_index; cpu++)
  69                target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
  70
  71        return target_cpu;
  72}
  73
  74static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
  75{
  76        /*
  77         * Hash the sequence numbers to the cpus by taking
  78         * seq_nr mod. number of cpus in use.
  79         */
  80        int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
  81
  82        return padata_index_to_cpu(pd, cpu_index);
  83}
  84
  85static struct padata_work *padata_work_alloc(void)
  86{
  87        struct padata_work *pw;
  88
  89        lockdep_assert_held(&padata_works_lock);
  90
  91        if (list_empty(&padata_free_works))
  92                return NULL;    /* No more work items allowed to be queued. */
  93
  94        pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
  95        list_del(&pw->pw_list);
  96        return pw;
  97}
  98
  99static void padata_work_init(struct padata_work *pw, work_func_t work_fn,
 100                             void *data, int flags)
 101{
 102        if (flags & PADATA_WORK_ONSTACK)
 103                INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
 104        else
 105                INIT_WORK(&pw->pw_work, work_fn);
 106        pw->pw_data = data;
 107}
 108
 109static int __init padata_work_alloc_mt(int nworks, void *data,
 110                                       struct list_head *head)
 111{
 112        int i;
 113
 114        spin_lock(&padata_works_lock);
 115        /* Start at 1 because the current task participates in the job. */
 116        for (i = 1; i < nworks; ++i) {
 117                struct padata_work *pw = padata_work_alloc();
 118
 119                if (!pw)
 120                        break;
 121                padata_work_init(pw, padata_mt_helper, data, 0);
 122                list_add(&pw->pw_list, head);
 123        }
 124        spin_unlock(&padata_works_lock);
 125
 126        return i;
 127}
 128
 129static void padata_work_free(struct padata_work *pw)
 130{
 131        lockdep_assert_held(&padata_works_lock);
 132        list_add(&pw->pw_list, &padata_free_works);
 133}
 134
 135static void __init padata_works_free(struct list_head *works)
 136{
 137        struct padata_work *cur, *next;
 138
 139        if (list_empty(works))
 140                return;
 141
 142        spin_lock(&padata_works_lock);
 143        list_for_each_entry_safe(cur, next, works, pw_list) {
 144                list_del(&cur->pw_list);
 145                padata_work_free(cur);
 146        }
 147        spin_unlock(&padata_works_lock);
 148}
 149
 150static void padata_parallel_worker(struct work_struct *parallel_work)
 151{
 152        struct padata_work *pw = container_of(parallel_work, struct padata_work,
 153                                              pw_work);
 154        struct padata_priv *padata = pw->pw_data;
 155
 156        local_bh_disable();
 157        padata->parallel(padata);
 158        spin_lock(&padata_works_lock);
 159        padata_work_free(pw);
 160        spin_unlock(&padata_works_lock);
 161        local_bh_enable();
 162}
 163
 164/**
 165 * padata_do_parallel - padata parallelization function
 166 *
 167 * @ps: padatashell
 168 * @padata: object to be parallelized
 169 * @cb_cpu: pointer to the CPU that the serialization callback function should
 170 *          run on.  If it's not in the serial cpumask of @pinst
 171 *          (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
 172 *          none found, returns -EINVAL.
 173 *
 174 * The parallelization callback function will run with BHs off.
 175 * Note: Every object which is parallelized by padata_do_parallel
 176 * must be seen by padata_do_serial.
 177 *
 178 * Return: 0 on success or else negative error code.
 179 */
 180int padata_do_parallel(struct padata_shell *ps,
 181                       struct padata_priv *padata, int *cb_cpu)
 182{
 183        struct padata_instance *pinst = ps->pinst;
 184        int i, cpu, cpu_index, err;
 185        struct parallel_data *pd;
 186        struct padata_work *pw;
 187
 188        rcu_read_lock_bh();
 189
 190        pd = rcu_dereference_bh(ps->pd);
 191
 192        err = -EINVAL;
 193        if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
 194                goto out;
 195
 196        if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
 197                if (!cpumask_weight(pd->cpumask.cbcpu))
 198                        goto out;
 199
 200                /* Select an alternate fallback CPU and notify the caller. */
 201                cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
 202
 203                cpu = cpumask_first(pd->cpumask.cbcpu);
 204                for (i = 0; i < cpu_index; i++)
 205                        cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
 206
 207                *cb_cpu = cpu;
 208        }
 209
 210        err =  -EBUSY;
 211        if ((pinst->flags & PADATA_RESET))
 212                goto out;
 213
 214        atomic_inc(&pd->refcnt);
 215        padata->pd = pd;
 216        padata->cb_cpu = *cb_cpu;
 217
 218        rcu_read_unlock_bh();
 219
 220        spin_lock(&padata_works_lock);
 221        padata->seq_nr = ++pd->seq_nr;
 222        pw = padata_work_alloc();
 223        spin_unlock(&padata_works_lock);
 224        if (pw) {
 225                padata_work_init(pw, padata_parallel_worker, padata, 0);
 226                queue_work(pinst->parallel_wq, &pw->pw_work);
 227        } else {
 228                /* Maximum works limit exceeded, run in the current task. */
 229                padata->parallel(padata);
 230        }
 231
 232        return 0;
 233out:
 234        rcu_read_unlock_bh();
 235
 236        return err;
 237}
 238EXPORT_SYMBOL(padata_do_parallel);
 239
 240/*
 241 * padata_find_next - Find the next object that needs serialization.
 242 *
 243 * Return:
 244 * * A pointer to the control struct of the next object that needs
 245 *   serialization, if present in one of the percpu reorder queues.
 246 * * NULL, if the next object that needs serialization will
 247 *   be parallel processed by another cpu and is not yet present in
 248 *   the cpu's reorder queue.
 249 */
 250static struct padata_priv *padata_find_next(struct parallel_data *pd,
 251                                            bool remove_object)
 252{
 253        struct padata_parallel_queue *next_queue;
 254        struct padata_priv *padata;
 255        struct padata_list *reorder;
 256        int cpu = pd->cpu;
 257
 258        next_queue = per_cpu_ptr(pd->pqueue, cpu);
 259        reorder = &next_queue->reorder;
 260
 261        spin_lock(&reorder->lock);
 262        if (list_empty(&reorder->list)) {
 263                spin_unlock(&reorder->lock);
 264                return NULL;
 265        }
 266
 267        padata = list_entry(reorder->list.next, struct padata_priv, list);
 268
 269        /*
 270         * Checks the rare case where two or more parallel jobs have hashed to
 271         * the same CPU and one of the later ones finishes first.
 272         */
 273        if (padata->seq_nr != pd->processed) {
 274                spin_unlock(&reorder->lock);
 275                return NULL;
 276        }
 277
 278        if (remove_object) {
 279                list_del_init(&padata->list);
 280                ++pd->processed;
 281                pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
 282        }
 283
 284        spin_unlock(&reorder->lock);
 285        return padata;
 286}
 287
 288static void padata_reorder(struct parallel_data *pd)
 289{
 290        struct padata_instance *pinst = pd->ps->pinst;
 291        int cb_cpu;
 292        struct padata_priv *padata;
 293        struct padata_serial_queue *squeue;
 294        struct padata_parallel_queue *next_queue;
 295
 296        /*
 297         * We need to ensure that only one cpu can work on dequeueing of
 298         * the reorder queue the time. Calculating in which percpu reorder
 299         * queue the next object will arrive takes some time. A spinlock
 300         * would be highly contended. Also it is not clear in which order
 301         * the objects arrive to the reorder queues. So a cpu could wait to
 302         * get the lock just to notice that there is nothing to do at the
 303         * moment. Therefore we use a trylock and let the holder of the lock
 304         * care for all the objects enqueued during the holdtime of the lock.
 305         */
 306        if (!spin_trylock_bh(&pd->lock))
 307                return;
 308
 309        while (1) {
 310                padata = padata_find_next(pd, true);
 311
 312                /*
 313                 * If the next object that needs serialization is parallel
 314                 * processed by another cpu and is still on it's way to the
 315                 * cpu's reorder queue, nothing to do for now.
 316                 */
 317                if (!padata)
 318                        break;
 319
 320                cb_cpu = padata->cb_cpu;
 321                squeue = per_cpu_ptr(pd->squeue, cb_cpu);
 322
 323                spin_lock(&squeue->serial.lock);
 324                list_add_tail(&padata->list, &squeue->serial.list);
 325                spin_unlock(&squeue->serial.lock);
 326
 327                queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
 328        }
 329
 330        spin_unlock_bh(&pd->lock);
 331
 332        /*
 333         * The next object that needs serialization might have arrived to
 334         * the reorder queues in the meantime.
 335         *
 336         * Ensure reorder queue is read after pd->lock is dropped so we see
 337         * new objects from another task in padata_do_serial.  Pairs with
 338         * smp_mb__after_atomic in padata_do_serial.
 339         */
 340        smp_mb();
 341
 342        next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
 343        if (!list_empty(&next_queue->reorder.list) &&
 344            padata_find_next(pd, false))
 345                queue_work(pinst->serial_wq, &pd->reorder_work);
 346}
 347
 348static void invoke_padata_reorder(struct work_struct *work)
 349{
 350        struct parallel_data *pd;
 351
 352        local_bh_disable();
 353        pd = container_of(work, struct parallel_data, reorder_work);
 354        padata_reorder(pd);
 355        local_bh_enable();
 356}
 357
 358static void padata_serial_worker(struct work_struct *serial_work)
 359{
 360        struct padata_serial_queue *squeue;
 361        struct parallel_data *pd;
 362        LIST_HEAD(local_list);
 363        int cnt;
 364
 365        local_bh_disable();
 366        squeue = container_of(serial_work, struct padata_serial_queue, work);
 367        pd = squeue->pd;
 368
 369        spin_lock(&squeue->serial.lock);
 370        list_replace_init(&squeue->serial.list, &local_list);
 371        spin_unlock(&squeue->serial.lock);
 372
 373        cnt = 0;
 374
 375        while (!list_empty(&local_list)) {
 376                struct padata_priv *padata;
 377
 378                padata = list_entry(local_list.next,
 379                                    struct padata_priv, list);
 380
 381                list_del_init(&padata->list);
 382
 383                padata->serial(padata);
 384                cnt++;
 385        }
 386        local_bh_enable();
 387
 388        if (atomic_sub_and_test(cnt, &pd->refcnt))
 389                padata_free_pd(pd);
 390}
 391
 392/**
 393 * padata_do_serial - padata serialization function
 394 *
 395 * @padata: object to be serialized.
 396 *
 397 * padata_do_serial must be called for every parallelized object.
 398 * The serialization callback function will run with BHs off.
 399 */
 400void padata_do_serial(struct padata_priv *padata)
 401{
 402        struct parallel_data *pd = padata->pd;
 403        int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
 404        struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
 405                                                           hashed_cpu);
 406        struct padata_priv *cur;
 407
 408        spin_lock(&pqueue->reorder.lock);
 409        /* Sort in ascending order of sequence number. */
 410        list_for_each_entry_reverse(cur, &pqueue->reorder.list, list)
 411                if (cur->seq_nr < padata->seq_nr)
 412                        break;
 413        list_add(&padata->list, &cur->list);
 414        spin_unlock(&pqueue->reorder.lock);
 415
 416        /*
 417         * Ensure the addition to the reorder list is ordered correctly
 418         * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
 419         * in padata_reorder.
 420         */
 421        smp_mb__after_atomic();
 422
 423        padata_reorder(pd);
 424}
 425EXPORT_SYMBOL(padata_do_serial);
 426
 427static int padata_setup_cpumasks(struct padata_instance *pinst)
 428{
 429        struct workqueue_attrs *attrs;
 430        int err;
 431
 432        attrs = alloc_workqueue_attrs();
 433        if (!attrs)
 434                return -ENOMEM;
 435
 436        /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
 437        cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
 438        err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
 439        free_workqueue_attrs(attrs);
 440
 441        return err;
 442}
 443
 444static int pd_setup_cpumasks(struct parallel_data *pd,
 445                             const struct cpumask *pcpumask,
 446                             const struct cpumask *cbcpumask)
 447{
 448        int err = -ENOMEM;
 449
 450        if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
 451                goto out;
 452        if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
 453                goto free_pcpu_mask;
 454
 455        cpumask_copy(pd->cpumask.pcpu, pcpumask);
 456        cpumask_copy(pd->cpumask.cbcpu, cbcpumask);
 457
 458        return 0;
 459
 460free_pcpu_mask:
 461        free_cpumask_var(pd->cpumask.pcpu);
 462out:
 463        return err;
 464}
 465
 466static void __init padata_mt_helper(struct work_struct *w)
 467{
 468        struct padata_work *pw = container_of(w, struct padata_work, pw_work);
 469        struct padata_mt_job_state *ps = pw->pw_data;
 470        struct padata_mt_job *job = ps->job;
 471        bool done;
 472
 473        spin_lock(&ps->lock);
 474
 475        while (job->size > 0) {
 476                unsigned long start, size, end;
 477
 478                start = job->start;
 479                /* So end is chunk size aligned if enough work remains. */
 480                size = roundup(start + 1, ps->chunk_size) - start;
 481                size = min(size, job->size);
 482                end = start + size;
 483
 484                job->start = end;
 485                job->size -= size;
 486
 487                spin_unlock(&ps->lock);
 488                job->thread_fn(start, end, job->fn_arg);
 489                spin_lock(&ps->lock);
 490        }
 491
 492        ++ps->nworks_fini;
 493        done = (ps->nworks_fini == ps->nworks);
 494        spin_unlock(&ps->lock);
 495
 496        if (done)
 497                complete(&ps->completion);
 498}
 499
 500/**
 501 * padata_do_multithreaded - run a multithreaded job
 502 * @job: Description of the job.
 503 *
 504 * See the definition of struct padata_mt_job for more details.
 505 */
 506void __init padata_do_multithreaded(struct padata_mt_job *job)
 507{
 508        /* In case threads finish at different times. */
 509        static const unsigned long load_balance_factor = 4;
 510        struct padata_work my_work, *pw;
 511        struct padata_mt_job_state ps;
 512        LIST_HEAD(works);
 513        int nworks;
 514
 515        if (job->size == 0)
 516                return;
 517
 518        /* Ensure at least one thread when size < min_chunk. */
 519        nworks = max(job->size / job->min_chunk, 1ul);
 520        nworks = min(nworks, job->max_threads);
 521
 522        if (nworks == 1) {
 523                /* Single thread, no coordination needed, cut to the chase. */
 524                job->thread_fn(job->start, job->start + job->size, job->fn_arg);
 525                return;
 526        }
 527
 528        spin_lock_init(&ps.lock);
 529        init_completion(&ps.completion);
 530        ps.job         = job;
 531        ps.nworks      = padata_work_alloc_mt(nworks, &ps, &works);
 532        ps.nworks_fini = 0;
 533
 534        /*
 535         * Chunk size is the amount of work a helper does per call to the
 536         * thread function.  Load balance large jobs between threads by
 537         * increasing the number of chunks, guarantee at least the minimum
 538         * chunk size from the caller, and honor the caller's alignment.
 539         */
 540        ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
 541        ps.chunk_size = max(ps.chunk_size, job->min_chunk);
 542        ps.chunk_size = roundup(ps.chunk_size, job->align);
 543
 544        list_for_each_entry(pw, &works, pw_list)
 545                queue_work(system_unbound_wq, &pw->pw_work);
 546
 547        /* Use the current thread, which saves starting a workqueue worker. */
 548        padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
 549        padata_mt_helper(&my_work.pw_work);
 550
 551        /* Wait for all the helpers to finish. */
 552        wait_for_completion(&ps.completion);
 553
 554        destroy_work_on_stack(&my_work.pw_work);
 555        padata_works_free(&works);
 556}
 557
 558static void __padata_list_init(struct padata_list *pd_list)
 559{
 560        INIT_LIST_HEAD(&pd_list->list);
 561        spin_lock_init(&pd_list->lock);
 562}
 563
 564/* Initialize all percpu queues used by serial workers */
 565static void padata_init_squeues(struct parallel_data *pd)
 566{
 567        int cpu;
 568        struct padata_serial_queue *squeue;
 569
 570        for_each_cpu(cpu, pd->cpumask.cbcpu) {
 571                squeue = per_cpu_ptr(pd->squeue, cpu);
 572                squeue->pd = pd;
 573                __padata_list_init(&squeue->serial);
 574                INIT_WORK(&squeue->work, padata_serial_worker);
 575        }
 576}
 577
 578/* Initialize all percpu queues used by parallel workers */
 579static void padata_init_pqueues(struct parallel_data *pd)
 580{
 581        int cpu;
 582        struct padata_parallel_queue *pqueue;
 583
 584        for_each_cpu(cpu, pd->cpumask.pcpu) {
 585                pqueue = per_cpu_ptr(pd->pqueue, cpu);
 586
 587                __padata_list_init(&pqueue->reorder);
 588                atomic_set(&pqueue->num_obj, 0);
 589        }
 590}
 591
 592/* Allocate and initialize the internal cpumask dependend resources. */
 593static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
 594{
 595        struct padata_instance *pinst = ps->pinst;
 596        const struct cpumask *cbcpumask;
 597        const struct cpumask *pcpumask;
 598        struct parallel_data *pd;
 599
 600        cbcpumask = pinst->rcpumask.cbcpu;
 601        pcpumask = pinst->rcpumask.pcpu;
 602
 603        pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
 604        if (!pd)
 605                goto err;
 606
 607        pd->pqueue = alloc_percpu(struct padata_parallel_queue);
 608        if (!pd->pqueue)
 609                goto err_free_pd;
 610
 611        pd->squeue = alloc_percpu(struct padata_serial_queue);
 612        if (!pd->squeue)
 613                goto err_free_pqueue;
 614
 615        pd->ps = ps;
 616        if (pd_setup_cpumasks(pd, pcpumask, cbcpumask))
 617                goto err_free_squeue;
 618
 619        padata_init_pqueues(pd);
 620        padata_init_squeues(pd);
 621        pd->seq_nr = -1;
 622        atomic_set(&pd->refcnt, 1);
 623        spin_lock_init(&pd->lock);
 624        pd->cpu = cpumask_first(pd->cpumask.pcpu);
 625        INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
 626
 627        return pd;
 628
 629err_free_squeue:
 630        free_percpu(pd->squeue);
 631err_free_pqueue:
 632        free_percpu(pd->pqueue);
 633err_free_pd:
 634        kfree(pd);
 635err:
 636        return NULL;
 637}
 638
 639static void padata_free_pd(struct parallel_data *pd)
 640{
 641        free_cpumask_var(pd->cpumask.pcpu);
 642        free_cpumask_var(pd->cpumask.cbcpu);
 643        free_percpu(pd->pqueue);
 644        free_percpu(pd->squeue);
 645        kfree(pd);
 646}
 647
 648static void __padata_start(struct padata_instance *pinst)
 649{
 650        pinst->flags |= PADATA_INIT;
 651}
 652
 653static void __padata_stop(struct padata_instance *pinst)
 654{
 655        if (!(pinst->flags & PADATA_INIT))
 656                return;
 657
 658        pinst->flags &= ~PADATA_INIT;
 659
 660        synchronize_rcu();
 661}
 662
 663/* Replace the internal control structure with a new one. */
 664static int padata_replace_one(struct padata_shell *ps)
 665{
 666        struct parallel_data *pd_new;
 667
 668        pd_new = padata_alloc_pd(ps);
 669        if (!pd_new)
 670                return -ENOMEM;
 671
 672        ps->opd = rcu_dereference_protected(ps->pd, 1);
 673        rcu_assign_pointer(ps->pd, pd_new);
 674
 675        return 0;
 676}
 677
 678static int padata_replace(struct padata_instance *pinst)
 679{
 680        struct padata_shell *ps;
 681        int err = 0;
 682
 683        pinst->flags |= PADATA_RESET;
 684
 685        cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
 686                    cpu_online_mask);
 687
 688        cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
 689                    cpu_online_mask);
 690
 691        list_for_each_entry(ps, &pinst->pslist, list) {
 692                err = padata_replace_one(ps);
 693                if (err)
 694                        break;
 695        }
 696
 697        synchronize_rcu();
 698
 699        list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
 700                if (atomic_dec_and_test(&ps->opd->refcnt))
 701                        padata_free_pd(ps->opd);
 702
 703        pinst->flags &= ~PADATA_RESET;
 704
 705        return err;
 706}
 707
 708/* If cpumask contains no active cpu, we mark the instance as invalid. */
 709static bool padata_validate_cpumask(struct padata_instance *pinst,
 710                                    const struct cpumask *cpumask)
 711{
 712        if (!cpumask_intersects(cpumask, cpu_online_mask)) {
 713                pinst->flags |= PADATA_INVALID;
 714                return false;
 715        }
 716
 717        pinst->flags &= ~PADATA_INVALID;
 718        return true;
 719}
 720
 721static int __padata_set_cpumasks(struct padata_instance *pinst,
 722                                 cpumask_var_t pcpumask,
 723                                 cpumask_var_t cbcpumask)
 724{
 725        int valid;
 726        int err;
 727
 728        valid = padata_validate_cpumask(pinst, pcpumask);
 729        if (!valid) {
 730                __padata_stop(pinst);
 731                goto out_replace;
 732        }
 733
 734        valid = padata_validate_cpumask(pinst, cbcpumask);
 735        if (!valid)
 736                __padata_stop(pinst);
 737
 738out_replace:
 739        cpumask_copy(pinst->cpumask.pcpu, pcpumask);
 740        cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
 741
 742        err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
 743
 744        if (valid)
 745                __padata_start(pinst);
 746
 747        return err;
 748}
 749
 750/**
 751 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
 752 *                      equivalent to @cpumask.
 753 * @pinst: padata instance
 754 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
 755 *                to parallel and serial cpumasks respectively.
 756 * @cpumask: the cpumask to use
 757 *
 758 * Return: 0 on success or negative error code
 759 */
 760int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
 761                       cpumask_var_t cpumask)
 762{
 763        struct cpumask *serial_mask, *parallel_mask;
 764        int err = -EINVAL;
 765
 766        get_online_cpus();
 767        mutex_lock(&pinst->lock);
 768
 769        switch (cpumask_type) {
 770        case PADATA_CPU_PARALLEL:
 771                serial_mask = pinst->cpumask.cbcpu;
 772                parallel_mask = cpumask;
 773                break;
 774        case PADATA_CPU_SERIAL:
 775                parallel_mask = pinst->cpumask.pcpu;
 776                serial_mask = cpumask;
 777                break;
 778        default:
 779                 goto out;
 780        }
 781
 782        err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
 783
 784out:
 785        mutex_unlock(&pinst->lock);
 786        put_online_cpus();
 787
 788        return err;
 789}
 790EXPORT_SYMBOL(padata_set_cpumask);
 791
 792/**
 793 * padata_start - start the parallel processing
 794 *
 795 * @pinst: padata instance to start
 796 *
 797 * Return: 0 on success or negative error code
 798 */
 799int padata_start(struct padata_instance *pinst)
 800{
 801        int err = 0;
 802
 803        mutex_lock(&pinst->lock);
 804
 805        if (pinst->flags & PADATA_INVALID)
 806                err = -EINVAL;
 807
 808         __padata_start(pinst);
 809
 810        mutex_unlock(&pinst->lock);
 811
 812        return err;
 813}
 814EXPORT_SYMBOL(padata_start);
 815
 816/**
 817 * padata_stop - stop the parallel processing
 818 *
 819 * @pinst: padata instance to stop
 820 */
 821void padata_stop(struct padata_instance *pinst)
 822{
 823        mutex_lock(&pinst->lock);
 824        __padata_stop(pinst);
 825        mutex_unlock(&pinst->lock);
 826}
 827EXPORT_SYMBOL(padata_stop);
 828
 829#ifdef CONFIG_HOTPLUG_CPU
 830
 831static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
 832{
 833        int err = 0;
 834
 835        if (cpumask_test_cpu(cpu, cpu_online_mask)) {
 836                err = padata_replace(pinst);
 837
 838                if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
 839                    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
 840                        __padata_start(pinst);
 841        }
 842
 843        return err;
 844}
 845
 846static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
 847{
 848        int err = 0;
 849
 850        if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
 851                if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
 852                    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
 853                        __padata_stop(pinst);
 854
 855                err = padata_replace(pinst);
 856        }
 857
 858        return err;
 859}
 860
 861static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
 862{
 863        return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
 864                cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
 865}
 866
 867static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
 868{
 869        struct padata_instance *pinst;
 870        int ret;
 871
 872        pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
 873        if (!pinst_has_cpu(pinst, cpu))
 874                return 0;
 875
 876        mutex_lock(&pinst->lock);
 877        ret = __padata_add_cpu(pinst, cpu);
 878        mutex_unlock(&pinst->lock);
 879        return ret;
 880}
 881
 882static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
 883{
 884        struct padata_instance *pinst;
 885        int ret;
 886
 887        pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
 888        if (!pinst_has_cpu(pinst, cpu))
 889                return 0;
 890
 891        mutex_lock(&pinst->lock);
 892        ret = __padata_remove_cpu(pinst, cpu);
 893        mutex_unlock(&pinst->lock);
 894        return ret;
 895}
 896
 897static enum cpuhp_state hp_online;
 898#endif
 899
 900static void __padata_free(struct padata_instance *pinst)
 901{
 902#ifdef CONFIG_HOTPLUG_CPU
 903        cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
 904                                            &pinst->cpu_dead_node);
 905        cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
 906#endif
 907
 908        WARN_ON(!list_empty(&pinst->pslist));
 909
 910        padata_stop(pinst);
 911        free_cpumask_var(pinst->rcpumask.cbcpu);
 912        free_cpumask_var(pinst->rcpumask.pcpu);
 913        free_cpumask_var(pinst->cpumask.pcpu);
 914        free_cpumask_var(pinst->cpumask.cbcpu);
 915        destroy_workqueue(pinst->serial_wq);
 916        destroy_workqueue(pinst->parallel_wq);
 917        kfree(pinst);
 918}
 919
 920#define kobj2pinst(_kobj)                                       \
 921        container_of(_kobj, struct padata_instance, kobj)
 922#define attr2pentry(_attr)                                      \
 923        container_of(_attr, struct padata_sysfs_entry, attr)
 924
 925static void padata_sysfs_release(struct kobject *kobj)
 926{
 927        struct padata_instance *pinst = kobj2pinst(kobj);
 928        __padata_free(pinst);
 929}
 930
 931struct padata_sysfs_entry {
 932        struct attribute attr;
 933        ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
 934        ssize_t (*store)(struct padata_instance *, struct attribute *,
 935                         const char *, size_t);
 936};
 937
 938static ssize_t show_cpumask(struct padata_instance *pinst,
 939                            struct attribute *attr,  char *buf)
 940{
 941        struct cpumask *cpumask;
 942        ssize_t len;
 943
 944        mutex_lock(&pinst->lock);
 945        if (!strcmp(attr->name, "serial_cpumask"))
 946                cpumask = pinst->cpumask.cbcpu;
 947        else
 948                cpumask = pinst->cpumask.pcpu;
 949
 950        len = snprintf(buf, PAGE_SIZE, "%*pb\n",
 951                       nr_cpu_ids, cpumask_bits(cpumask));
 952        mutex_unlock(&pinst->lock);
 953        return len < PAGE_SIZE ? len : -EINVAL;
 954}
 955
 956static ssize_t store_cpumask(struct padata_instance *pinst,
 957                             struct attribute *attr,
 958                             const char *buf, size_t count)
 959{
 960        cpumask_var_t new_cpumask;
 961        ssize_t ret;
 962        int mask_type;
 963
 964        if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
 965                return -ENOMEM;
 966
 967        ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
 968                           nr_cpumask_bits);
 969        if (ret < 0)
 970                goto out;
 971
 972        mask_type = !strcmp(attr->name, "serial_cpumask") ?
 973                PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
 974        ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
 975        if (!ret)
 976                ret = count;
 977
 978out:
 979        free_cpumask_var(new_cpumask);
 980        return ret;
 981}
 982
 983#define PADATA_ATTR_RW(_name, _show_name, _store_name)          \
 984        static struct padata_sysfs_entry _name##_attr =         \
 985                __ATTR(_name, 0644, _show_name, _store_name)
 986#define PADATA_ATTR_RO(_name, _show_name)               \
 987        static struct padata_sysfs_entry _name##_attr = \
 988                __ATTR(_name, 0400, _show_name, NULL)
 989
 990PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
 991PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
 992
 993/*
 994 * Padata sysfs provides the following objects:
 995 * serial_cpumask   [RW] - cpumask for serial workers
 996 * parallel_cpumask [RW] - cpumask for parallel workers
 997 */
 998static struct attribute *padata_default_attrs[] = {
 999        &serial_cpumask_attr.attr,
1000        &parallel_cpumask_attr.attr,
1001        NULL,
1002};
1003
1004static ssize_t padata_sysfs_show(struct kobject *kobj,
1005                                 struct attribute *attr, char *buf)
1006{
1007        struct padata_instance *pinst;
1008        struct padata_sysfs_entry *pentry;
1009        ssize_t ret = -EIO;
1010
1011        pinst = kobj2pinst(kobj);
1012        pentry = attr2pentry(attr);
1013        if (pentry->show)
1014                ret = pentry->show(pinst, attr, buf);
1015
1016        return ret;
1017}
1018
1019static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
1020                                  const char *buf, size_t count)
1021{
1022        struct padata_instance *pinst;
1023        struct padata_sysfs_entry *pentry;
1024        ssize_t ret = -EIO;
1025
1026        pinst = kobj2pinst(kobj);
1027        pentry = attr2pentry(attr);
1028        if (pentry->show)
1029                ret = pentry->store(pinst, attr, buf, count);
1030
1031        return ret;
1032}
1033
1034static const struct sysfs_ops padata_sysfs_ops = {
1035        .show = padata_sysfs_show,
1036        .store = padata_sysfs_store,
1037};
1038
1039static struct kobj_type padata_attr_type = {
1040        .sysfs_ops = &padata_sysfs_ops,
1041        .default_attrs = padata_default_attrs,
1042        .release = padata_sysfs_release,
1043};
1044
1045/**
1046 * padata_alloc - allocate and initialize a padata instance and specify
1047 *                cpumasks for serial and parallel workers.
1048 *
1049 * @name: used to identify the instance
1050 * @pcpumask: cpumask that will be used for padata parallelization
1051 * @cbcpumask: cpumask that will be used for padata serialization
1052 *
1053 * Return: new instance on success, NULL on error
1054 */
1055static struct padata_instance *padata_alloc(const char *name,
1056                                            const struct cpumask *pcpumask,
1057                                            const struct cpumask *cbcpumask)
1058{
1059        struct padata_instance *pinst;
1060
1061        pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1062        if (!pinst)
1063                goto err;
1064
1065        pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
1066                                             name);
1067        if (!pinst->parallel_wq)
1068                goto err_free_inst;
1069
1070        get_online_cpus();
1071
1072        pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
1073                                           WQ_CPU_INTENSIVE, 1, name);
1074        if (!pinst->serial_wq)
1075                goto err_put_cpus;
1076
1077        if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1078                goto err_free_serial_wq;
1079        if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1080                free_cpumask_var(pinst->cpumask.pcpu);
1081                goto err_free_serial_wq;
1082        }
1083        if (!padata_validate_cpumask(pinst, pcpumask) ||
1084            !padata_validate_cpumask(pinst, cbcpumask))
1085                goto err_free_masks;
1086
1087        if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL))
1088                goto err_free_masks;
1089        if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL))
1090                goto err_free_rcpumask_pcpu;
1091
1092        INIT_LIST_HEAD(&pinst->pslist);
1093
1094        cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1095        cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1096        cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask);
1097        cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask);
1098
1099        if (padata_setup_cpumasks(pinst))
1100                goto err_free_rcpumask_cbcpu;
1101
1102        pinst->flags = 0;
1103
1104        kobject_init(&pinst->kobj, &padata_attr_type);
1105        mutex_init(&pinst->lock);
1106
1107#ifdef CONFIG_HOTPLUG_CPU
1108        cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1109                                                    &pinst->cpu_online_node);
1110        cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1111                                                    &pinst->cpu_dead_node);
1112#endif
1113
1114        put_online_cpus();
1115
1116        return pinst;
1117
1118err_free_rcpumask_cbcpu:
1119        free_cpumask_var(pinst->rcpumask.cbcpu);
1120err_free_rcpumask_pcpu:
1121        free_cpumask_var(pinst->rcpumask.pcpu);
1122err_free_masks:
1123        free_cpumask_var(pinst->cpumask.pcpu);
1124        free_cpumask_var(pinst->cpumask.cbcpu);
1125err_free_serial_wq:
1126        destroy_workqueue(pinst->serial_wq);
1127err_put_cpus:
1128        put_online_cpus();
1129        destroy_workqueue(pinst->parallel_wq);
1130err_free_inst:
1131        kfree(pinst);
1132err:
1133        return NULL;
1134}
1135
1136/**
1137 * padata_alloc_possible - Allocate and initialize padata instance.
1138 *                         Use the cpu_possible_mask for serial and
1139 *                         parallel workers.
1140 *
1141 * @name: used to identify the instance
1142 *
1143 * Return: new instance on success, NULL on error
1144 */
1145struct padata_instance *padata_alloc_possible(const char *name)
1146{
1147        return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
1148}
1149EXPORT_SYMBOL(padata_alloc_possible);
1150
1151/**
1152 * padata_free - free a padata instance
1153 *
1154 * @pinst: padata instance to free
1155 */
1156void padata_free(struct padata_instance *pinst)
1157{
1158        kobject_put(&pinst->kobj);
1159}
1160EXPORT_SYMBOL(padata_free);
1161
1162/**
1163 * padata_alloc_shell - Allocate and initialize padata shell.
1164 *
1165 * @pinst: Parent padata_instance object.
1166 *
1167 * Return: new shell on success, NULL on error
1168 */
1169struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1170{
1171        struct parallel_data *pd;
1172        struct padata_shell *ps;
1173
1174        ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1175        if (!ps)
1176                goto out;
1177
1178        ps->pinst = pinst;
1179
1180        get_online_cpus();
1181        pd = padata_alloc_pd(ps);
1182        put_online_cpus();
1183
1184        if (!pd)
1185                goto out_free_ps;
1186
1187        mutex_lock(&pinst->lock);
1188        RCU_INIT_POINTER(ps->pd, pd);
1189        list_add(&ps->list, &pinst->pslist);
1190        mutex_unlock(&pinst->lock);
1191
1192        return ps;
1193
1194out_free_ps:
1195        kfree(ps);
1196out:
1197        return NULL;
1198}
1199EXPORT_SYMBOL(padata_alloc_shell);
1200
1201/**
1202 * padata_free_shell - free a padata shell
1203 *
1204 * @ps: padata shell to free
1205 */
1206void padata_free_shell(struct padata_shell *ps)
1207{
1208        struct padata_instance *pinst = ps->pinst;
1209
1210        mutex_lock(&pinst->lock);
1211        list_del(&ps->list);
1212        padata_free_pd(rcu_dereference_protected(ps->pd, 1));
1213        mutex_unlock(&pinst->lock);
1214
1215        kfree(ps);
1216}
1217EXPORT_SYMBOL(padata_free_shell);
1218
1219void __init padata_init(void)
1220{
1221        unsigned int i, possible_cpus;
1222#ifdef CONFIG_HOTPLUG_CPU
1223        int ret;
1224
1225        ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1226                                      padata_cpu_online, NULL);
1227        if (ret < 0)
1228                goto err;
1229        hp_online = ret;
1230
1231        ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1232                                      NULL, padata_cpu_dead);
1233        if (ret < 0)
1234                goto remove_online_state;
1235#endif
1236
1237        possible_cpus = num_possible_cpus();
1238        padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1239                                     GFP_KERNEL);
1240        if (!padata_works)
1241                goto remove_dead_state;
1242
1243        for (i = 0; i < possible_cpus; ++i)
1244                list_add(&padata_works[i].pw_list, &padata_free_works);
1245
1246        return;
1247
1248remove_dead_state:
1249#ifdef CONFIG_HOTPLUG_CPU
1250        cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1251remove_online_state:
1252        cpuhp_remove_multi_state(hp_online);
1253err:
1254#endif
1255        pr_warn("padata: initialization failed\n");
1256}
1257