linux/fs/xfs/support/ktrace.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include <xfs.h>
  19
  20static kmem_zone_t *ktrace_hdr_zone;
  21static kmem_zone_t *ktrace_ent_zone;
  22static int          ktrace_zentries;
  23
  24void __init
  25ktrace_init(int zentries)
  26{
  27        ktrace_zentries = roundup_pow_of_two(zentries);
  28
  29        ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
  30                                        "ktrace_hdr");
  31        ASSERT(ktrace_hdr_zone);
  32
  33        ktrace_ent_zone = kmem_zone_init(ktrace_zentries
  34                                        * sizeof(ktrace_entry_t),
  35                                        "ktrace_ent");
  36        ASSERT(ktrace_ent_zone);
  37}
  38
  39void __exit
  40ktrace_uninit(void)
  41{
  42        kmem_zone_destroy(ktrace_hdr_zone);
  43        kmem_zone_destroy(ktrace_ent_zone);
  44}
  45
  46/*
  47 * ktrace_alloc()
  48 *
  49 * Allocate a ktrace header and enough buffering for the given
  50 * number of entries. Round the number of entries up to a
  51 * power of 2 so we can do fast masking to get the index from
  52 * the atomic index counter.
  53 */
  54ktrace_t *
  55ktrace_alloc(int nentries, unsigned int __nocast sleep)
  56{
  57        ktrace_t        *ktp;
  58        ktrace_entry_t  *ktep;
  59        int             entries;
  60
  61        ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
  62
  63        if (ktp == (ktrace_t*)NULL) {
  64                /*
  65                 * KM_SLEEP callers don't expect failure.
  66                 */
  67                if (sleep & KM_SLEEP)
  68                        panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
  69
  70                return NULL;
  71        }
  72
  73        /*
  74         * Special treatment for buffers with the ktrace_zentries entries
  75         */
  76        entries = roundup_pow_of_two(nentries);
  77        if (entries == ktrace_zentries) {
  78                ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
  79                                                            sleep);
  80        } else {
  81                ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
  82                                                            sleep | KM_LARGE);
  83        }
  84
  85        if (ktep == NULL) {
  86                /*
  87                 * KM_SLEEP callers don't expect failure.
  88                 */
  89                if (sleep & KM_SLEEP)
  90                        panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
  91
  92                kmem_free(ktp);
  93
  94                return NULL;
  95        }
  96
  97        ktp->kt_entries  = ktep;
  98        ktp->kt_nentries = entries;
  99        ASSERT(is_power_of_2(entries));
 100        ktp->kt_index_mask = entries - 1;
 101        atomic_set(&ktp->kt_index, 0);
 102        ktp->kt_rollover = 0;
 103        return ktp;
 104}
 105
 106
 107/*
 108 * ktrace_free()
 109 *
 110 * Free up the ktrace header and buffer.  It is up to the caller
 111 * to ensure that no-one is referencing it.
 112 */
 113void
 114ktrace_free(ktrace_t *ktp)
 115{
 116        if (ktp == (ktrace_t *)NULL)
 117                return;
 118
 119        /*
 120         * Special treatment for the Vnode trace buffer.
 121         */
 122        if (ktp->kt_nentries == ktrace_zentries)
 123                kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
 124        else
 125                kmem_free(ktp->kt_entries);
 126
 127        kmem_zone_free(ktrace_hdr_zone, ktp);
 128}
 129
 130
 131/*
 132 * Enter the given values into the "next" entry in the trace buffer.
 133 * kt_index is always the index of the next entry to be filled.
 134 */
 135void
 136ktrace_enter(
 137        ktrace_t        *ktp,
 138        void            *val0,
 139        void            *val1,
 140        void            *val2,
 141        void            *val3,
 142        void            *val4,
 143        void            *val5,
 144        void            *val6,
 145        void            *val7,
 146        void            *val8,
 147        void            *val9,
 148        void            *val10,
 149        void            *val11,
 150        void            *val12,
 151        void            *val13,
 152        void            *val14,
 153        void            *val15)
 154{
 155        int             index;
 156        ktrace_entry_t  *ktep;
 157
 158        ASSERT(ktp != NULL);
 159
 160        /*
 161         * Grab an entry by pushing the index up to the next one.
 162         */
 163        index = atomic_add_return(1, &ktp->kt_index);
 164        index = (index - 1) & ktp->kt_index_mask;
 165        if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
 166                ktp->kt_rollover = 1;
 167
 168        ASSERT((index >= 0) && (index < ktp->kt_nentries));
 169
 170        ktep = &(ktp->kt_entries[index]);
 171
 172        ktep->val[0]  = val0;
 173        ktep->val[1]  = val1;
 174        ktep->val[2]  = val2;
 175        ktep->val[3]  = val3;
 176        ktep->val[4]  = val4;
 177        ktep->val[5]  = val5;
 178        ktep->val[6]  = val6;
 179        ktep->val[7]  = val7;
 180        ktep->val[8]  = val8;
 181        ktep->val[9]  = val9;
 182        ktep->val[10] = val10;
 183        ktep->val[11] = val11;
 184        ktep->val[12] = val12;
 185        ktep->val[13] = val13;
 186        ktep->val[14] = val14;
 187        ktep->val[15] = val15;
 188}
 189
 190/*
 191 * Return the number of entries in the trace buffer.
 192 */
 193int
 194ktrace_nentries(
 195        ktrace_t        *ktp)
 196{
 197        int     index;
 198        if (ktp == NULL)
 199                return 0;
 200
 201        index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
 202        return (ktp->kt_rollover ? ktp->kt_nentries : index);
 203}
 204
 205/*
 206 * ktrace_first()
 207 *
 208 * This is used to find the start of the trace buffer.
 209 * In conjunction with ktrace_next() it can be used to
 210 * iterate through the entire trace buffer.  This code does
 211 * not do any locking because it is assumed that it is called
 212 * from the debugger.
 213 *
 214 * The caller must pass in a pointer to a ktrace_snap
 215 * structure in which we will keep some state used to
 216 * iterate through the buffer.  This state must not touched
 217 * by any code outside of this module.
 218 */
 219ktrace_entry_t *
 220ktrace_first(ktrace_t   *ktp, ktrace_snap_t     *ktsp)
 221{
 222        ktrace_entry_t  *ktep;
 223        int             index;
 224        int             nentries;
 225
 226        if (ktp->kt_rollover)
 227                index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
 228        else
 229                index = 0;
 230
 231        ktsp->ks_start = index;
 232        ktep = &(ktp->kt_entries[index]);
 233
 234        nentries = ktrace_nentries(ktp);
 235        index++;
 236        if (index < nentries) {
 237                ktsp->ks_index = index;
 238        } else {
 239                ktsp->ks_index = 0;
 240                if (index > nentries)
 241                        ktep = NULL;
 242        }
 243        return ktep;
 244}
 245
 246/*
 247 * ktrace_next()
 248 *
 249 * This is used to iterate through the entries of the given
 250 * trace buffer.  The caller must pass in the ktrace_snap_t
 251 * structure initialized by ktrace_first().  The return value
 252 * will be either a pointer to the next ktrace_entry or NULL
 253 * if all of the entries have been traversed.
 254 */
 255ktrace_entry_t *
 256ktrace_next(
 257        ktrace_t        *ktp,
 258        ktrace_snap_t   *ktsp)
 259{
 260        int             index;
 261        ktrace_entry_t  *ktep;
 262
 263        index = ktsp->ks_index;
 264        if (index == ktsp->ks_start) {
 265                ktep = NULL;
 266        } else {
 267                ktep = &ktp->kt_entries[index];
 268        }
 269
 270        index++;
 271        if (index == ktrace_nentries(ktp)) {
 272                ktsp->ks_index = 0;
 273        } else {
 274                ktsp->ks_index = index;
 275        }
 276
 277        return ktep;
 278}
 279
 280/*
 281 * ktrace_skip()
 282 *
 283 * Skip the next "count" entries and return the entry after that.
 284 * Return NULL if this causes us to iterate past the beginning again.
 285 */
 286ktrace_entry_t *
 287ktrace_skip(
 288        ktrace_t        *ktp,
 289        int             count,
 290        ktrace_snap_t   *ktsp)
 291{
 292        int             index;
 293        int             new_index;
 294        ktrace_entry_t  *ktep;
 295        int             nentries = ktrace_nentries(ktp);
 296
 297        index = ktsp->ks_index;
 298        new_index = index + count;
 299        while (new_index >= nentries) {
 300                new_index -= nentries;
 301        }
 302        if (index == ktsp->ks_start) {
 303                /*
 304                 * We've iterated around to the start, so we're done.
 305                 */
 306                ktep = NULL;
 307        } else if ((new_index < index) && (index < ktsp->ks_index)) {
 308                /*
 309                 * We've skipped past the start again, so we're done.
 310                 */
 311                ktep = NULL;
 312                ktsp->ks_index = ktsp->ks_start;
 313        } else {
 314                ktep = &(ktp->kt_entries[new_index]);
 315                new_index++;
 316                if (new_index == nentries) {
 317                        ktsp->ks_index = 0;
 318                } else {
 319                        ktsp->ks_index = new_index;
 320                }
 321        }
 322        return ktep;
 323}
 324