linux/drivers/net/isa-skeleton.c
<<
>>
Prefs
   1/* isa-skeleton.c: A network driver outline for linux.
   2 *
   3 *      Written 1993-94 by Donald Becker.
   4 *
   5 *      Copyright 1993 United States Government as represented by the
   6 *      Director, National Security Agency.
   7 *
   8 *      This software may be used and distributed according to the terms
   9 *      of the GNU General Public License, incorporated herein by reference.
  10 *
  11 *      The author may be reached as becker@scyld.com, or C/O
  12 *      Scyld Computing Corporation
  13 *      410 Severn Ave., Suite 210
  14 *      Annapolis MD 21403
  15 *
  16 *      This file is an outline for writing a network device driver for the
  17 *      the Linux operating system.
  18 *
  19 *      To write (or understand) a driver, have a look at the "loopback.c" file to
  20 *      get a feel of what is going on, and then use the code below as a skeleton
  21 *      for the new driver.
  22 *
  23 */
  24
  25static const char *version =
  26        "isa-skeleton.c:v1.51 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
  27
  28/*
  29 *  Sources:
  30 *      List your sources of programming information to document that
  31 *      the driver is your own creation, and give due credit to others
  32 *      that contributed to the work. Remember that GNU project code
  33 *      cannot use proprietary or trade secret information. Interface
  34 *      definitions are generally considered non-copyrightable to the
  35 *      extent that the same names and structures must be used to be
  36 *      compatible.
  37 *
  38 *      Finally, keep in mind that the Linux kernel is has an API, not
  39 *      ABI. Proprietary object-code-only distributions are not permitted
  40 *      under the GPL.
  41 */
  42
  43#include <linux/module.h>
  44#include <linux/kernel.h>
  45#include <linux/types.h>
  46#include <linux/fcntl.h>
  47#include <linux/interrupt.h>
  48#include <linux/ioport.h>
  49#include <linux/in.h>
  50#include <linux/slab.h>
  51#include <linux/string.h>
  52#include <linux/spinlock.h>
  53#include <linux/errno.h>
  54#include <linux/init.h>
  55#include <linux/netdevice.h>
  56#include <linux/etherdevice.h>
  57#include <linux/skbuff.h>
  58#include <linux/bitops.h>
  59
  60#include <asm/system.h>
  61#include <asm/io.h>
  62#include <asm/dma.h>
  63
  64/*
  65 * The name of the card. Is used for messages and in the requests for
  66 * io regions, irqs and dma channels
  67 */
  68static const char* cardname = "netcard";
  69
  70/* First, a few definitions that the brave might change. */
  71
  72/* A zero-terminated list of I/O addresses to be probed. */
  73static unsigned int netcard_portlist[] __initdata =
  74   { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
  75
  76/* use 0 for production, 1 for verification, >2 for debug */
  77#ifndef NET_DEBUG
  78#define NET_DEBUG 2
  79#endif
  80static unsigned int net_debug = NET_DEBUG;
  81
  82/* The number of low I/O ports used by the ethercard. */
  83#define NETCARD_IO_EXTENT       32
  84
  85#define MY_TX_TIMEOUT  ((400*HZ)/1000)
  86
  87/* Information that need to be kept for each board. */
  88struct net_local {
  89        struct net_device_stats stats;
  90        long open_time;                 /* Useless example local info. */
  91
  92        /* Tx control lock.  This protects the transmit buffer ring
  93         * state along with the "tx full" state of the driver.  This
  94         * means all netif_queue flow control actions are protected
  95         * by this lock as well.
  96         */
  97        spinlock_t lock;
  98};
  99
 100/* The station (ethernet) address prefix, used for IDing the board. */
 101#define SA_ADDR0 0x00
 102#define SA_ADDR1 0x42
 103#define SA_ADDR2 0x65
 104
 105/* Index to functions, as function prototypes. */
 106
 107static int      netcard_probe1(struct net_device *dev, int ioaddr);
 108static int      net_open(struct net_device *dev);
 109static int      net_send_packet(struct sk_buff *skb, struct net_device *dev);
 110static irqreturn_t net_interrupt(int irq, void *dev_id);
 111static void     net_rx(struct net_device *dev);
 112static int      net_close(struct net_device *dev);
 113static struct   net_device_stats *net_get_stats(struct net_device *dev);
 114static void     set_multicast_list(struct net_device *dev);
 115static void     net_tx_timeout(struct net_device *dev);
 116
 117
 118/* Example routines you must write ;->. */
 119#define tx_done(dev) 1
 120static void     hardware_send_packet(short ioaddr, char *buf, int length);
 121static void     chipset_init(struct net_device *dev, int startp);
 122
 123/*
 124 * Check for a network adaptor of this type, and return '0' iff one exists.
 125 * If dev->base_addr == 0, probe all likely locations.
 126 * If dev->base_addr == 1, always return failure.
 127 * If dev->base_addr == 2, allocate space for the device and return success
 128 * (detachable devices only).
 129 */
 130static int __init do_netcard_probe(struct net_device *dev)
 131{
 132        int i;
 133        int base_addr = dev->base_addr;
 134        int irq = dev->irq;
 135
 136        if (base_addr > 0x1ff)    /* Check a single specified location. */
 137                return netcard_probe1(dev, base_addr);
 138        else if (base_addr != 0)  /* Don't probe at all. */
 139                return -ENXIO;
 140
 141        for (i = 0; netcard_portlist[i]; i++) {
 142                int ioaddr = netcard_portlist[i];
 143                if (netcard_probe1(dev, ioaddr) == 0)
 144                        return 0;
 145                dev->irq = irq;
 146        }
 147
 148        return -ENODEV;
 149}
 150
 151static void cleanup_card(struct net_device *dev)
 152{
 153#ifdef jumpered_dma
 154        free_dma(dev->dma);
 155#endif
 156#ifdef jumpered_interrupts
 157        free_irq(dev->irq, dev);
 158#endif
 159        release_region(dev->base_addr, NETCARD_IO_EXTENT);
 160}
 161
 162#ifndef MODULE
 163struct net_device * __init netcard_probe(int unit)
 164{
 165        struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
 166        int err;
 167
 168        if (!dev)
 169                return ERR_PTR(-ENOMEM);
 170
 171        sprintf(dev->name, "eth%d", unit);
 172        netdev_boot_setup_check(dev);
 173
 174        err = do_netcard_probe(dev);
 175        if (err)
 176                goto out;
 177        return dev;
 178out:
 179        free_netdev(dev);
 180        return ERR_PTR(err);
 181}
 182#endif
 183
 184/*
 185 * This is the real probe routine. Linux has a history of friendly device
 186 * probes on the ISA bus. A good device probes avoids doing writes, and
 187 * verifies that the correct device exists and functions.
 188 */
 189static int __init netcard_probe1(struct net_device *dev, int ioaddr)
 190{
 191        struct net_local *np;
 192        static unsigned version_printed;
 193        int i;
 194        int err = -ENODEV;
 195        DECLARE_MAC_BUF(mac);
 196
 197        /* Grab the region so that no one else tries to probe our ioports. */
 198        if (!request_region(ioaddr, NETCARD_IO_EXTENT, cardname))
 199                return -EBUSY;
 200
 201        /*
 202         * For ethernet adaptors the first three octets of the station address
 203         * contains the manufacturer's unique code. That might be a good probe
 204         * method. Ideally you would add additional checks.
 205         */
 206        if (inb(ioaddr + 0) != SA_ADDR0
 207                ||       inb(ioaddr + 1) != SA_ADDR1
 208                ||       inb(ioaddr + 2) != SA_ADDR2)
 209                goto out;
 210
 211        if (net_debug  &&  version_printed++ == 0)
 212                printk(KERN_DEBUG "%s", version);
 213
 214        printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cardname, ioaddr);
 215
 216        /* Fill in the 'dev' fields. */
 217        dev->base_addr = ioaddr;
 218
 219        /* Retrieve and print the ethernet address. */
 220        for (i = 0; i < 6; i++)
 221                dev->dev_addr[i] = inb(ioaddr + i);
 222
 223        printk("%s", print_mac(mac, dev->dev_addr));
 224
 225        err = -EAGAIN;
 226#ifdef jumpered_interrupts
 227        /*
 228         * If this board has jumpered interrupts, allocate the interrupt
 229         * vector now. There is no point in waiting since no other device
 230         * can use the interrupt, and this marks the irq as busy. Jumpered
 231         * interrupts are typically not reported by the boards, and we must
 232         * used autoIRQ to find them.
 233         */
 234
 235        if (dev->irq == -1)
 236                ;       /* Do nothing: a user-level program will set it. */
 237        else if (dev->irq < 2) {        /* "Auto-IRQ" */
 238                unsigned long irq_mask = probe_irq_on();
 239                /* Trigger an interrupt here. */
 240
 241                dev->irq = probe_irq_off(irq_mask);
 242                if (net_debug >= 2)
 243                        printk(" autoirq is %d", dev->irq);
 244        } else if (dev->irq == 2)
 245                /*
 246                 * Fixup for users that don't know that IRQ 2 is really
 247                 * IRQ9, or don't know which one to set.
 248                 */
 249                dev->irq = 9;
 250
 251        {
 252                int irqval = request_irq(dev->irq, &net_interrupt, 0, cardname, dev);
 253                if (irqval) {
 254                        printk("%s: unable to get IRQ %d (irqval=%d).\n",
 255                                   dev->name, dev->irq, irqval);
 256                        goto out;
 257                }
 258        }
 259#endif  /* jumpered interrupt */
 260#ifdef jumpered_dma
 261        /*
 262         * If we use a jumpered DMA channel, that should be probed for and
 263         * allocated here as well. See lance.c for an example.
 264         */
 265        if (dev->dma == 0) {
 266                if (request_dma(dev->dma, cardname)) {
 267                        printk("DMA %d allocation failed.\n", dev->dma);
 268                        goto out1;
 269                } else
 270                        printk(", assigned DMA %d.\n", dev->dma);
 271        } else {
 272                short dma_status, new_dma_status;
 273
 274                /* Read the DMA channel status registers. */
 275                dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
 276                        (inb(DMA2_STAT_REG) & 0xf0);
 277                /* Trigger a DMA request, perhaps pause a bit. */
 278                outw(0x1234, ioaddr + 8);
 279                /* Re-read the DMA status registers. */
 280                new_dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
 281                        (inb(DMA2_STAT_REG) & 0xf0);
 282                /*
 283                 * Eliminate the old and floating requests,
 284                 * and DMA4 the cascade.
 285                 */
 286                new_dma_status ^= dma_status;
 287                new_dma_status &= ~0x10;
 288                for (i = 7; i > 0; i--)
 289                        if (test_bit(i, &new_dma_status)) {
 290                                dev->dma = i;
 291                                break;
 292                        }
 293                if (i <= 0) {
 294                        printk("DMA probe failed.\n");
 295                        goto out1;
 296                }
 297                if (request_dma(dev->dma, cardname)) {
 298                        printk("probed DMA %d allocation failed.\n", dev->dma);
 299                        goto out1;
 300                }
 301        }
 302#endif  /* jumpered DMA */
 303
 304        np = netdev_priv(dev);
 305        spin_lock_init(&np->lock);
 306
 307        dev->open               = net_open;
 308        dev->stop               = net_close;
 309        dev->hard_start_xmit    = net_send_packet;
 310        dev->get_stats          = net_get_stats;
 311        dev->set_multicast_list = &set_multicast_list;
 312
 313        dev->tx_timeout         = &net_tx_timeout;
 314        dev->watchdog_timeo     = MY_TX_TIMEOUT;
 315
 316        err = register_netdev(dev);
 317        if (err)
 318                goto out2;
 319        return 0;
 320out2:
 321#ifdef jumpered_dma
 322        free_dma(dev->dma);
 323#endif
 324out1:
 325#ifdef jumpered_interrupts
 326        free_irq(dev->irq, dev);
 327#endif
 328out:
 329        release_region(base_addr, NETCARD_IO_EXTENT);
 330        return err;
 331}
 332
 333static void net_tx_timeout(struct net_device *dev)
 334{
 335        struct net_local *np = netdev_priv(dev);
 336
 337        printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
 338               tx_done(dev) ? "IRQ conflict" : "network cable problem");
 339
 340        /* Try to restart the adaptor. */
 341        chipset_init(dev, 1);
 342
 343        np->stats.tx_errors++;
 344
 345        /* If we have space available to accept new transmit
 346         * requests, wake up the queueing layer.  This would
 347         * be the case if the chipset_init() call above just
 348         * flushes out the tx queue and empties it.
 349         *
 350         * If instead, the tx queue is retained then the
 351         * netif_wake_queue() call should be placed in the
 352         * TX completion interrupt handler of the driver instead
 353         * of here.
 354         */
 355        if (!tx_full(dev))
 356                netif_wake_queue(dev);
 357}
 358
 359/*
 360 * Open/initialize the board. This is called (in the current kernel)
 361 * sometime after booting when the 'ifconfig' program is run.
 362 *
 363 * This routine should set everything up anew at each open, even
 364 * registers that "should" only need to be set once at boot, so that
 365 * there is non-reboot way to recover if something goes wrong.
 366 */
 367static int
 368net_open(struct net_device *dev)
 369{
 370        struct net_local *np = netdev_priv(dev);
 371        int ioaddr = dev->base_addr;
 372        /*
 373         * This is used if the interrupt line can turned off (shared).
 374         * See 3c503.c for an example of selecting the IRQ at config-time.
 375         */
 376        if (request_irq(dev->irq, &net_interrupt, 0, cardname, dev)) {
 377                return -EAGAIN;
 378        }
 379        /*
 380         * Always allocate the DMA channel after the IRQ,
 381         * and clean up on failure.
 382         */
 383        if (request_dma(dev->dma, cardname)) {
 384                free_irq(dev->irq, dev);
 385                return -EAGAIN;
 386        }
 387
 388        /* Reset the hardware here. Don't forget to set the station address. */
 389        chipset_init(dev, 1);
 390        outb(0x00, ioaddr);
 391        np->open_time = jiffies;
 392
 393        /* We are now ready to accept transmit requeusts from
 394         * the queueing layer of the networking.
 395         */
 396        netif_start_queue(dev);
 397
 398        return 0;
 399}
 400
 401/* This will only be invoked if your driver is _not_ in XOFF state.
 402 * What this means is that you need not check it, and that this
 403 * invariant will hold if you make sure that the netif_*_queue()
 404 * calls are done at the proper times.
 405 */
 406static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
 407{
 408        struct net_local *np = netdev_priv(dev);
 409        int ioaddr = dev->base_addr;
 410        short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
 411        unsigned char *buf = skb->data;
 412
 413        /* If some error occurs while trying to transmit this
 414         * packet, you should return '1' from this function.
 415         * In such a case you _may not_ do anything to the
 416         * SKB, it is still owned by the network queueing
 417         * layer when an error is returned.  This means you
 418         * may not modify any SKB fields, you may not free
 419         * the SKB, etc.
 420         */
 421
 422#if TX_RING
 423        /* This is the most common case for modern hardware.
 424         * The spinlock protects this code from the TX complete
 425         * hardware interrupt handler.  Queue flow control is
 426         * thus managed under this lock as well.
 427         */
 428        spin_lock_irq(&np->lock);
 429
 430        add_to_tx_ring(np, skb, length);
 431        dev->trans_start = jiffies;
 432
 433        /* If we just used up the very last entry in the
 434         * TX ring on this device, tell the queueing
 435         * layer to send no more.
 436         */
 437        if (tx_full(dev))
 438                netif_stop_queue(dev);
 439
 440        /* When the TX completion hw interrupt arrives, this
 441         * is when the transmit statistics are updated.
 442         */
 443
 444        spin_unlock_irq(&np->lock);
 445#else
 446        /* This is the case for older hardware which takes
 447         * a single transmit buffer at a time, and it is
 448         * just written to the device via PIO.
 449         *
 450         * No spin locking is needed since there is no TX complete
 451         * event.  If by chance your card does have a TX complete
 452         * hardware IRQ then you may need to utilize np->lock here.
 453         */
 454        hardware_send_packet(ioaddr, buf, length);
 455        np->stats.tx_bytes += skb->len;
 456
 457        dev->trans_start = jiffies;
 458
 459        /* You might need to clean up and record Tx statistics here. */
 460        if (inw(ioaddr) == /*RU*/81)
 461                np->stats.tx_aborted_errors++;
 462        dev_kfree_skb (skb);
 463#endif
 464
 465        return 0;
 466}
 467
 468#if TX_RING
 469/* This handles TX complete events posted by the device
 470 * via interrupts.
 471 */
 472void net_tx(struct net_device *dev)
 473{
 474        struct net_local *np = netdev_priv(dev);
 475        int entry;
 476
 477        /* This protects us from concurrent execution of
 478         * our dev->hard_start_xmit function above.
 479         */
 480        spin_lock(&np->lock);
 481
 482        entry = np->tx_old;
 483        while (tx_entry_is_sent(np, entry)) {
 484                struct sk_buff *skb = np->skbs[entry];
 485
 486                np->stats.tx_bytes += skb->len;
 487                dev_kfree_skb_irq (skb);
 488
 489                entry = next_tx_entry(np, entry);
 490        }
 491        np->tx_old = entry;
 492
 493        /* If we had stopped the queue due to a "tx full"
 494         * condition, and space has now been made available,
 495         * wake up the queue.
 496         */
 497        if (netif_queue_stopped(dev) && ! tx_full(dev))
 498                netif_wake_queue(dev);
 499
 500        spin_unlock(&np->lock);
 501}
 502#endif
 503
 504/*
 505 * The typical workload of the driver:
 506 * Handle the network interface interrupts.
 507 */
 508static irqreturn_t net_interrupt(int irq, void *dev_id)
 509{
 510        struct net_device *dev = dev_id;
 511        struct net_local *np;
 512        int ioaddr, status;
 513        int handled = 0;
 514
 515        ioaddr = dev->base_addr;
 516
 517        np = netdev_priv(dev);
 518        status = inw(ioaddr + 0);
 519
 520        if (status == 0)
 521                goto out;
 522        handled = 1;
 523
 524        if (status & RX_INTR) {
 525                /* Got a packet(s). */
 526                net_rx(dev);
 527        }
 528#if TX_RING
 529        if (status & TX_INTR) {
 530                /* Transmit complete. */
 531                net_tx(dev);
 532                np->stats.tx_packets++;
 533                netif_wake_queue(dev);
 534        }
 535#endif
 536        if (status & COUNTERS_INTR) {
 537                /* Increment the appropriate 'localstats' field. */
 538                np->stats.tx_window_errors++;
 539        }
 540out:
 541        return IRQ_RETVAL(handled);
 542}
 543
 544/* We have a good packet(s), get it/them out of the buffers. */
 545static void
 546net_rx(struct net_device *dev)
 547{
 548        struct net_local *lp = netdev_priv(dev);
 549        int ioaddr = dev->base_addr;
 550        int boguscount = 10;
 551
 552        do {
 553                int status = inw(ioaddr);
 554                int pkt_len = inw(ioaddr);
 555
 556                if (pkt_len == 0)               /* Read all the frames? */
 557                        break;                  /* Done for now */
 558
 559                if (status & 0x40) {    /* There was an error. */
 560                        lp->stats.rx_errors++;
 561                        if (status & 0x20) lp->stats.rx_frame_errors++;
 562                        if (status & 0x10) lp->stats.rx_over_errors++;
 563                        if (status & 0x08) lp->stats.rx_crc_errors++;
 564                        if (status & 0x04) lp->stats.rx_fifo_errors++;
 565                } else {
 566                        /* Malloc up new buffer. */
 567                        struct sk_buff *skb;
 568
 569                        lp->stats.rx_bytes+=pkt_len;
 570
 571                        skb = dev_alloc_skb(pkt_len);
 572                        if (skb == NULL) {
 573                                printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
 574                                           dev->name);
 575                                lp->stats.rx_dropped++;
 576                                break;
 577                        }
 578                        skb->dev = dev;
 579
 580                        /* 'skb->data' points to the start of sk_buff data area. */
 581                        memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start,
 582                                   pkt_len);
 583                        /* or */
 584                        insw(ioaddr, skb->data, (pkt_len + 1) >> 1);
 585
 586                        netif_rx(skb);
 587                        dev->last_rx = jiffies;
 588                        lp->stats.rx_packets++;
 589                        lp->stats.rx_bytes += pkt_len;
 590                }
 591        } while (--boguscount);
 592
 593        return;
 594}
 595
 596/* The inverse routine to net_open(). */
 597static int
 598net_close(struct net_device *dev)
 599{
 600        struct net_local *lp = netdev_priv(dev);
 601        int ioaddr = dev->base_addr;
 602
 603        lp->open_time = 0;
 604
 605        netif_stop_queue(dev);
 606
 607        /* Flush the Tx and disable Rx here. */
 608
 609        disable_dma(dev->dma);
 610
 611        /* If not IRQ or DMA jumpered, free up the line. */
 612        outw(0x00, ioaddr+0);   /* Release the physical interrupt line. */
 613
 614        free_irq(dev->irq, dev);
 615        free_dma(dev->dma);
 616
 617        /* Update the statistics here. */
 618
 619        return 0;
 620
 621}
 622
 623/*
 624 * Get the current statistics.
 625 * This may be called with the card open or closed.
 626 */
 627static struct net_device_stats *net_get_stats(struct net_device *dev)
 628{
 629        struct net_local *lp = netdev_priv(dev);
 630        short ioaddr = dev->base_addr;
 631
 632        /* Update the statistics from the device registers. */
 633        lp->stats.rx_missed_errors = inw(ioaddr+1);
 634        return &lp->stats;
 635}
 636
 637/*
 638 * Set or clear the multicast filter for this adaptor.
 639 * num_addrs == -1      Promiscuous mode, receive all packets
 640 * num_addrs == 0       Normal mode, clear multicast list
 641 * num_addrs > 0        Multicast mode, receive normal and MC packets,
 642 *                      and do best-effort filtering.
 643 */
 644static void
 645set_multicast_list(struct net_device *dev)
 646{
 647        short ioaddr = dev->base_addr;
 648        if (dev->flags&IFF_PROMISC)
 649        {
 650                /* Enable promiscuous mode */
 651                outw(MULTICAST|PROMISC, ioaddr);
 652        }
 653        else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
 654        {
 655                /* Disable promiscuous mode, use normal mode. */
 656                hardware_set_filter(NULL);
 657
 658                outw(MULTICAST, ioaddr);
 659        }
 660        else if(dev->mc_count)
 661        {
 662                /* Walk the address list, and load the filter */
 663                hardware_set_filter(dev->mc_list);
 664
 665                outw(MULTICAST, ioaddr);
 666        }
 667        else
 668                outw(0, ioaddr);
 669}
 670
 671#ifdef MODULE
 672
 673static struct net_device *this_device;
 674static int io = 0x300;
 675static int irq;
 676static int dma;
 677static int mem;
 678MODULE_LICENSE("GPL");
 679
 680int init_module(void)
 681{
 682        struct net_device *dev;
 683        int result;
 684
 685        if (io == 0)
 686                printk(KERN_WARNING "%s: You shouldn't use auto-probing with insmod!\n",
 687                           cardname);
 688        dev = alloc_etherdev(sizeof(struct net_local));
 689        if (!dev)
 690                return -ENOMEM;
 691
 692        /* Copy the parameters from insmod into the device structure. */
 693        dev->base_addr = io;
 694        dev->irq       = irq;
 695        dev->dma       = dma;
 696        dev->mem_start = mem;
 697        if (do_netcard_probe(dev) == 0) {
 698                this_device = dev;
 699                return 0;
 700        }
 701        free_netdev(dev);
 702        return -ENXIO;
 703}
 704
 705void
 706cleanup_module(void)
 707{
 708        unregister_netdev(this_device);
 709        cleanup_card(this_device);
 710        free_netdev(this_device);
 711}
 712
 713#endif /* MODULE */
 714
 715/*
 716 * Local variables:
 717 *  compile-command:
 718 *      gcc -D__KERNEL__ -Wall -Wstrict-prototypes -Wwrite-strings
 719 *      -Wredundant-decls -O2 -m486 -c skeleton.c
 720 *  version-control: t
 721 *  kept-new-versions: 5
 722 *  tab-width: 4
 723 *  c-indent-level: 4
 724 * End:
 725 */
 726