linux/kernel/bpf/percpu_freelist.c
<<
>>
Prefs
   1/* Copyright (c) 2016 Facebook
   2 *
   3 * This program is free software; you can redistribute it and/or
   4 * modify it under the terms of version 2 of the GNU General Public
   5 * License as published by the Free Software Foundation.
   6 */
   7#include "percpu_freelist.h"
   8
   9int pcpu_freelist_init(struct pcpu_freelist *s)
  10{
  11        int cpu;
  12
  13        s->freelist = alloc_percpu(struct pcpu_freelist_head);
  14        if (!s->freelist)
  15                return -ENOMEM;
  16
  17        for_each_possible_cpu(cpu) {
  18                struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
  19
  20                raw_spin_lock_init(&head->lock);
  21                head->first = NULL;
  22        }
  23        return 0;
  24}
  25
  26void pcpu_freelist_destroy(struct pcpu_freelist *s)
  27{
  28        free_percpu(s->freelist);
  29}
  30
  31static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
  32                                        struct pcpu_freelist_node *node)
  33{
  34        raw_spin_lock(&head->lock);
  35        node->next = head->first;
  36        head->first = node;
  37        raw_spin_unlock(&head->lock);
  38}
  39
  40void pcpu_freelist_push(struct pcpu_freelist *s,
  41                        struct pcpu_freelist_node *node)
  42{
  43        struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
  44
  45        __pcpu_freelist_push(head, node);
  46}
  47
  48void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
  49                            u32 nr_elems)
  50{
  51        struct pcpu_freelist_head *head;
  52        unsigned long flags;
  53        int i, cpu, pcpu_entries;
  54
  55        pcpu_entries = nr_elems / num_possible_cpus() + 1;
  56        i = 0;
  57
  58        /* disable irq to workaround lockdep false positive
  59         * in bpf usage pcpu_freelist_populate() will never race
  60         * with pcpu_freelist_push()
  61         */
  62        local_irq_save(flags);
  63        for_each_possible_cpu(cpu) {
  64again:
  65                head = per_cpu_ptr(s->freelist, cpu);
  66                __pcpu_freelist_push(head, buf);
  67                i++;
  68                buf += elem_size;
  69                if (i == nr_elems)
  70                        break;
  71                if (i % pcpu_entries)
  72                        goto again;
  73        }
  74        local_irq_restore(flags);
  75}
  76
  77struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
  78{
  79        struct pcpu_freelist_head *head;
  80        struct pcpu_freelist_node *node;
  81        unsigned long flags;
  82        int orig_cpu, cpu;
  83
  84        local_irq_save(flags);
  85        orig_cpu = cpu = raw_smp_processor_id();
  86        while (1) {
  87                head = per_cpu_ptr(s->freelist, cpu);
  88                raw_spin_lock(&head->lock);
  89                node = head->first;
  90                if (node) {
  91                        head->first = node->next;
  92                        raw_spin_unlock_irqrestore(&head->lock, flags);
  93                        return node;
  94                }
  95                raw_spin_unlock(&head->lock);
  96                cpu = cpumask_next(cpu, cpu_possible_mask);
  97                if (cpu >= nr_cpu_ids)
  98                        cpu = 0;
  99                if (cpu == orig_cpu) {
 100                        local_irq_restore(flags);
 101                        return NULL;
 102                }
 103        }
 104}
 105