Bug Summary

File:pfinet/./linux-src/net/core/dev.c
Location:line 739, column 8
Description:Access to field 'dev' results in a dereference of a null pointer (loaded from variable 'curr')

Annotated Source Code

1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dhinds@allegro.stanford.edu>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 *
21 * Changes:
22 * Marcelo Tosatti <marcelo@conectiva.com.br> : dont accept mtu 0 or <
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant stunts to
25 * keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into drivers
30 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
31 * Alan Cox : 100 backlog just doesn't cut it when
32 * you start doing multicast video 8)
33 * Alan Cox : Rewrote net_bh and list manager.
34 * Alan Cox : Fix ETH_P_ALL echoback lengths.
35 * Alan Cox : Took out transmit every packet pass
36 * Saved a few bytes in the ioctl handler
37 * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
38 * a function call a packet.
39 * Alan Cox : Hashed net_bh()
40 * Richard Kooijman: Timestamp fixes.
41 * Alan Cox : Wrong field in SIOCGIFDSTADDR
42 * Alan Cox : Device lock protection.
43 * Alan Cox : Fixed nasty side effect of device close changes.
44 * Rudi Cilibrasi : Pass the right thing to set_mac_address()
45 * Dave Miller : 32bit quantity for the device lock to make it work out
46 * on a Sparc.
47 * Bjorn Ekwall : Added KERNELD hack.
48 * Alan Cox : Cleaned up the backlog initialise.
49 * Craig Metz : SIOCGIFCONF fix if space for under
50 * 1 device.
51 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
52 * is no device open function.
53 * Andi Kleen : Fix error reporting for SIOCGIFCONF
54 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
55 * Cyrus Durgin : Cleaned for KMOD
56 * Adam Sulmicki : Bug Fix : Network Device Unload
57 * A network device unload needs to purge
58 * the backlog queue.
59 * Paul Rusty Russel : SIOCSIFNAME
60 * Andrea Arcangeli : dev_clear_backlog() needs the
61 * skb_queue_lock held.
62 */
63
64#include <asm/uaccess.h>
65#include <asm/system.h>
66#include <asm/bitops.h>
67#include <linux/config.h>
68#include <linux/types.h>
69#include <linux/kernel.h>
70#include <linux/sched.h>
71#include <linux/string.h>
72#include <linux/mm.h>
73#include <linux/socket.h>
74#include <linux/sockios.h>
75#include <linux/errno.h>
76#include <linux/interrupt.h>
77#include <linux/if_ether.h>
78#include <linux/netdevice.h>
79#include <linux/etherdevice.h>
80#include <linux/notifier.h>
81#include <linux/skbuff.h>
82#include <net/sock.h>
83#include <linux/rtnetlink.h>
84#include <net/slhc.h>
85#include <linux/proc_fs.h>
86#include <linux/stat.h>
87#include <net/br.h>
88#include <net/dst.h>
89#include <net/pkt_sched.h>
90#include <net/profile.h>
91#include <linux/init.h>
92#include <linux/kmod.h>
93#ifdef CONFIG_NET_RADIO
94#include <linux/wireless.h>
95#endif /* CONFIG_NET_RADIO */
96#ifdef CONFIG_PLIP
97extern int plip_init(void);
98#endif
99
100NET_PROFILE_DEFINE(dev_queue_xmit)
101NET_PROFILE_DEFINE(net_bh)
102NET_PROFILE_DEFINE(net_bh_skb)
103
104
105const char *if_port_text[] = {
106 "unknown",
107 "BNC",
108 "10baseT",
109 "AUI",
110 "100baseT",
111 "100baseTX",
112 "100baseFX"
113};
114
115/*
116 * The list of packet types we will receive (as opposed to discard)
117 * and the routines to invoke.
118 *
119 * Why 16. Because with 16 the only overlap we get on a hash of the
120 * low nibble of the protocol value is RARP/SNAP/X.25.
121 *
122 * 0800 IP
123 * 0001 802.3
124 * 0002 AX.25
125 * 0004 802.2
126 * 8035 RARP
127 * 0005 SNAP
128 * 0805 X.25
129 * 0806 ARP
130 * 8137 IPX
131 * 0009 Localtalk
132 * 86DD IPv6
133 */
134
135struct packet_type *ptype_base[16]; /* 16 way hashed list */
136struct packet_type *ptype_all = NULL((void*)0); /* Taps */
137
138/*
139 * Device list lock. Setting it provides that interface
140 * will not disappear unexpectedly while kernel sleeps.
141 */
142
143atomic_t dev_lockct = ATOMIC_INIT(0){ (0) };
144
145/*
146 * Our notifier list
147 */
148
149#ifdef _HURD_1
150struct notifier_block *netdev_chain=NULL((void*)0);
151#else
152static struct notifier_block *netdev_chain=NULL((void*)0);
153#endif
154
155/*
156 * Device drivers call our routines to queue packets here. We empty the
157 * queue in the bottom half handler.
158 */
159
160static struct sk_buff_head backlog;
161
162#ifdef CONFIG_NET_FASTROUTE
163int netdev_fastroute;
164int netdev_fastroute_obstacles;
165struct net_fastroute_stats dev_fastroute_stat;
166#endif
167
168static void dev_clear_backlog(struct device *dev);
169
170
171/******************************************************************************************
172
173 Protocol management and registration routines
174
175*******************************************************************************************/
176
177/*
178 * For efficiency
179 */
180
181int netdev_nit=0;
182
183/*
184 * Add a protocol ID to the list. Now that the input handler is
185 * smarter we can dispense with all the messy stuff that used to be
186 * here.
187 *
188 * BEWARE!!! Protocol handlers, mangling input packets,
189 * MUST BE last in hash buckets and checking protocol handlers
190 * MUST start from promiscuous ptype_all chain in net_bh.
191 * It is true now, do not change it.
192 * Explantion follows: if protocol handler, mangling packet, will
193 * be the first on list, it is not able to sense, that packet
194 * is cloned and should be copied-on-write, so that it will
195 * change it and subsequent readers will get broken packet.
196 * --ANK (980803)
197 */
198
199void dev_add_pack(struct packet_type *pt)
200{
201 int hash;
202#ifdef CONFIG_NET_FASTROUTE
203 /* Hack to detect packet socket */
204 if (pt->data) {
205 netdev_fastroute_obstacles++;
206 dev_clear_fastroute(pt->dev);
207 }
208#endif
209 if(pt->type==htons(ETH_P_ALL0x0003))
210 {
211 netdev_nit++;
212 pt->next=ptype_all;
213 ptype_all=pt;
214 }
215 else
216 {
217 hash=ntohs(pt->type)&15;
218 pt->next = ptype_base[hash];
219 ptype_base[hash] = pt;
220 }
221}
222
223
224/*
225 * Remove a protocol ID from the list.
226 */
227
228void dev_remove_pack(struct packet_type *pt)
229{
230 struct packet_type **pt1;
231 if(pt->type==htons(ETH_P_ALL0x0003))
232 {
233 netdev_nit--;
234 pt1=&ptype_all;
235 }
236 else
237 pt1=&ptype_base[ntohs(pt->type)&15];
238 for(; (*pt1)!=NULL((void*)0); pt1=&((*pt1)->next))
239 {
240 if(pt==(*pt1))
241 {
242 *pt1=pt->next;
243 synchronize_bh()((void) 0);
244#ifdef CONFIG_NET_FASTROUTE
245 if (pt->data)
246 netdev_fastroute_obstacles--;
247#endif
248 return;
249 }
250 }
251 printkprintf(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
252}
253
254/*****************************************************************************************
255
256 Device Interface Subroutines
257
258******************************************************************************************/
259
260/*
261 * Find an interface by name.
262 */
263
264struct device *dev_get(const char *name)
265{
266 struct device *dev;
267
268 for (dev = dev_base; dev != NULL((void*)0); dev = dev->next)
269 {
270 if (strcmp(dev->name, name) == 0)
271 return(dev);
272 }
273 return NULL((void*)0);
274}
275
276struct device * dev_get_by_index(int ifindex)
277{
278 struct device *dev;
279
280 for (dev = dev_base; dev != NULL((void*)0); dev = dev->next)
281 {
282 if (dev->ifindex == ifindex)
283 return(dev);
284 }
285 return NULL((void*)0);
286}
287
288struct device *dev_getbyhwaddr(unsigned short type, char *ha)
289{
290 struct device *dev;
291
292 for (dev = dev_base; dev != NULL((void*)0); dev = dev->next)
293 {
294 if (dev->type == type &&
295 memcmp(dev->dev_addr, ha, dev->addr_len) == 0)
296 return(dev);
297 }
298 return(NULL((void*)0));
299}
300
301/*
302 * Passed a format string - eg "lt%d" it will try and find a suitable
303 * id. Not efficient for many devices, not called a lot..
304 */
305
306int dev_alloc_name(struct device *dev, const char *name)
307{
308 int i;
309 /*
310 * If you need over 100 please also fix the algorithm...
311 */
312 for(i=0;i<100;i++)
313 {
314 sprintf(dev->name,name,i);
315 if(dev_get(dev->name)==NULL((void*)0))
316 return i;
317 }
318 return -ENFILE((0x10 << 26) | ((23) & 0x3fff)); /* Over 100 of the things .. bail out! */
319}
320
321struct device *dev_alloc(const char *name, int *err)
322{
323 struct device *dev=kmalloc(sizeof(struct device)+16, GFP_KERNEL0);
324 if(dev==NULL((void*)0))
325 {
326 *err=-ENOBUFS((0x10 << 26) | ((55) & 0x3fff));
327 return NULL((void*)0);
328 }
329 dev->name=(char *)(dev+1); /* Name string space */
330 *err=dev_alloc_name(dev,name);
331 if(*err<0)
332 {
333 kfree(dev);
334 return NULL((void*)0);
335 }
336 return dev;
337}
338
339void netdev_state_change(struct device *dev)
340{
341 if (dev->flags&IFF_UPIFF_UP)
342 notifier_call_chain(&netdev_chain, NETDEV_CHANGE0x0004, dev);
343}
344
345
346/*
347 * Find and possibly load an interface.
348 */
349
350#ifdef CONFIG_KMOD
351
352void dev_load(const char *name)
353{
354 if(!dev_get(name) && capable(CAP_SYS_MODULE16))
355 request_module(name)do {} while(0);
356}
357
358#else
359
360extern inline void dev_load(const char *unused){;}
361
362#endif
363
364static int default_rebuild_header(struct sk_buff *skb)
365{
366 printkprintf(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", skb->dev ? skb->dev->name : "NULL!!!");
367 kfree_skb(skb);
368 return 1;
369}
370
371/*
372 * Prepare an interface for use.
373 */
374
375int dev_open(struct device *dev)
376{
377 int ret = 0;
378
379 /*
380 * Is it already up?
381 */
382
383 if (dev->flags&IFF_UPIFF_UP)
384 return 0;
385
386 /*
387 * Call device private open method
388 */
389
390 if (dev->open)
391 ret = dev->open(dev);
392
393 /*
394 * If it went open OK then:
395 */
396
397 if (ret == 0)
398 {
399 /*
400 * nil rebuild_header routine,
401 * that should be never called and used as just bug trap.
402 */
403
404 if (dev->rebuild_header == NULL((void*)0))
405 dev->rebuild_header = default_rebuild_header;
406
407 /*
408 * Set the flags.
409 */
410 dev->flags |= (IFF_UPIFF_UP | IFF_RUNNINGIFF_RUNNING);
411
412 /*
413 * Initialize multicasting status
414 */
415 dev_mc_upload(dev);
416
417 /*
418 * Wakeup transmit queue engine
419 */
420 dev_activate(dev);
421
422 /*
423 * ... and announce new interface.
424 */
425 notifier_call_chain(&netdev_chain, NETDEV_UP0x0001, dev);
426
427 }
428 return(ret);
429}
430
431#ifdef CONFIG_NET_FASTROUTE
432
433static __inline__ void dev_do_clear_fastroute(struct device *dev)
434{
435 if (dev->accept_fastpath) {
436 int i;
437
438 for (i=0; i<=NETDEV_FASTROUTE_HMASK; i++)
439 dst_release_irqwait(xchg(dev->fastpath+i, NULL)({ __typeof__ (*(dev->fastpath+i)) *_ptr = (dev->fastpath
+i), _x = *_ptr; *_ptr = (((void*)0)); _x; })
);
440 }
441}
442
443void dev_clear_fastroute(struct device *dev)
444{
445 if (dev) {
446 dev_do_clear_fastroute(dev);
447 } else {
448 for (dev = dev_base; dev; dev = dev->next)
449 dev_do_clear_fastroute(dev);
450 }
451}
452#endif
453
454/*
455 * Completely shutdown an interface.
456 */
457
458int dev_close(struct device *dev)
459{
460 if (!(dev->flags&IFF_UPIFF_UP))
5
Taking false branch
461 return 0;
462
463 dev_deactivate(dev);
464
465 dev_lock_wait();
466
467 /*
468 * Call the device specific close. This cannot fail.
469 * Only if device is UP
470 */
471
472 if (dev->stop)
6
Taking false branch
473 dev->stop(dev);
474
475 if (dev->start)
7
Taking false branch
476 printkprintf("dev_close: bug %s still running\n", dev->name);
477
478 /*
479 * Device is now down.
480 */
481 dev_clear_backlog(dev);
8
Calling 'dev_clear_backlog'
482
483 dev->flags&=~(IFF_UPIFF_UP|IFF_RUNNINGIFF_RUNNING);
484#ifdef CONFIG_NET_FASTROUTE
485 dev_clear_fastroute(dev);
486#endif
487
488 /*
489 * Tell people we are going down
490 */
491 notifier_call_chain(&netdev_chain, NETDEV_DOWN0x0002, dev);
492
493 return(0);
494}
495
496
497/*
498 * Device change register/unregister. These are not inline or static
499 * as we export them to the world.
500 */
501
502int register_netdevice_notifier(struct notifier_block *nb)
503{
504 return notifier_chain_register(&netdev_chain, nb);
505}
506
507int unregister_netdevice_notifier(struct notifier_block *nb)
508{
509 return notifier_chain_unregister(&netdev_chain,nb);
510}
511
512/*
513 * Support routine. Sends outgoing frames to any network
514 * taps currently in use.
515 */
516
517void dev_queue_xmit_nit(struct sk_buff *skb, struct device *dev)
518{
519 struct packet_type *ptype;
520 get_fast_time(&skb->stamp)maptime_read (mapped_time, (&skb->stamp));
521
522 for (ptype = ptype_all; ptype!=NULL((void*)0); ptype = ptype->next)
523 {
524 /* Never send packets back to the socket
525 * they originated from - MvS (miquels@drinkel.ow.org)
526 */
527 if ((ptype->dev == dev || !ptype->dev) &&
528 ((struct sock *)ptype->data != skb->sk))
529 {
530 struct sk_buff *skb2;
531 if ((skb2 = skb_clone(skb, GFP_ATOMIC0)) == NULL((void*)0))
532 break;
533
534 /* Code, following below is wrong.
535
536 The only reason, why it does work is that
537 ONLY packet sockets receive outgoing
538 packets. If such a packet will be (occasionally)
539 received by normal packet handler, which expects
540 that mac header is pulled...
541 */
542
543 /* More sensible variant. skb->nh should be correctly
544 set by sender, so that the second statement is
545 just protection against buggy protocols.
546 */
547 skb2->mac.raw = skb2->data;
548
549 if (skb2->nh.raw < skb2->data || skb2->nh.raw >= skb2->tail) {
550 if (net_ratelimit())
551 printkprintf(KERN_DEBUG "protocol %04x is buggy, dev %s\n", skb2->protocol, dev->name);
552 skb2->nh.raw = skb2->data;
553 if (dev->hard_header)
554 skb2->nh.raw += dev->hard_header_len;
555 }
556
557 skb2->h.raw = skb2->nh.raw;
558 skb2->pkt_type = PACKET_OUTGOING4;
559 ptype->func(skb2, skb->dev, ptype);
560 }
561 }
562}
563
564/*
565 * Fast path for loopback frames.
566 */
567
568void dev_loopback_xmit(struct sk_buff *skb)
569{
570 struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC0);
571 if (newskb==NULL((void*)0))
572 return;
573
574 newskb->mac.raw = newskb->data;
575 skb_pull(newskb, newskb->nh.raw - newskb->data);
576 newskb->pkt_type = PACKET_LOOPBACK5;
577 newskb->ip_summed = CHECKSUM_UNNECESSARY2;
578 if (newskb->dst==NULL((void*)0))
579 printkprintf(KERN_DEBUG "BUG: packet without dst looped back 1\n");
580 netif_rx(newskb);
581}
582
583int dev_queue_xmit(struct sk_buff *skb)
584{
585 struct device *dev = skb->dev;
586 struct Qdisc *q;
587
588#ifdef CONFIG_NET_PROFILE
589 start_bh_atomic()((void) 0);
590 NET_PROFILE_ENTER(dev_queue_xmit)do { } while(0);
591#endif
592
593 start_bh_atomic()((void) 0);
594 q = dev->qdisc;
595 if (q->enqueue) {
596 q->enqueue(skb, q);
597 qdisc_wakeup(dev);
598 end_bh_atomic()((void) 0);
599
600#ifdef CONFIG_NET_PROFILE
601 NET_PROFILE_LEAVE(dev_queue_xmit)do { } while(0);
602 end_bh_atomic()((void) 0);
603#endif
604
605 return 0;
606 }
607
608 /* The device has no queue. Common case for software devices:
609 loopback, all the sorts of tunnels...
610
611 Really, it is unlikely that bh protection is necessary here:
612 virtual devices do not generate EOI events.
613 However, it is possible, that they rely on bh protection
614 made by us here.
615 */
616 if (dev->flags&IFF_UPIFF_UP) {
617 if (netdev_nit)
618 dev_queue_xmit_nit(skb,dev);
619 if (dev->hard_start_xmit(skb, dev) == 0) {
620 end_bh_atomic()((void) 0);
621
622#ifdef CONFIG_NET_PROFILE
623 NET_PROFILE_LEAVE(dev_queue_xmit)do { } while(0);
624 end_bh_atomic()((void) 0);
625#endif
626
627 return 0;
628 }
629 if (net_ratelimit())
630 printkprintf(KERN_DEBUG "Virtual device %s asks to queue packet!\n", dev->name);
631 }
632 end_bh_atomic()((void) 0);
633
634 kfree_skb(skb);
635
636#ifdef CONFIG_NET_PROFILE
637 NET_PROFILE_LEAVE(dev_queue_xmit)do { } while(0);
638 end_bh_atomic()((void) 0);
639#endif
640
641 return 0;
642}
643
644
645/*=======================================================================
646 Receiver rotutines
647 =======================================================================*/
648
649int netdev_dropping = 0;
650int netdev_max_backlog = 300;
651atomic_t netdev_rx_dropped;
652#ifdef CONFIG_CPU_IS_SLOW
653int net_cpu_congestion;
654#endif
655
656#ifdef CONFIG_NET_HW_FLOWCONTROL
657int netdev_throttle_events;
658static unsigned long netdev_fc_mask = 1;
659unsigned long netdev_fc_xoff = 0;
660
661static struct
662{
663 void (*stimul)(struct device *);
664 struct device *dev;
665} netdev_fc_slots[32];
666
667int netdev_register_fc(struct device *dev, void (*stimul)(struct device *dev))
668{
669 int bit = 0;
670 unsigned long flags;
671
672 save_flags(flags);
673 cli();
674 if (netdev_fc_mask != ~0UL) {
675 bit = ffz(netdev_fc_mask)(ffs (~(unsigned int) (netdev_fc_mask)) - 1);
676 netdev_fc_slots[bit].stimul = stimul;
677 netdev_fc_slots[bit].dev = dev;
678 set_bit(bit, &netdev_fc_mask);
679 clear_bit(bit, &netdev_fc_xoff);
680 }
681 restore_flags(flags);
682 return bit;
683}
684
685void netdev_unregister_fc(int bit)
686{
687 unsigned long flags;
688
689 save_flags(flags);
690 cli();
691 if (bit > 0) {
692 netdev_fc_slots[bit].stimul = NULL((void*)0);
693 netdev_fc_slots[bit].dev = NULL((void*)0);
694 clear_bit(bit, &netdev_fc_mask);
695 clear_bit(bit, &netdev_fc_xoff);
696 }
697 restore_flags(flags);
698}
699
700static void netdev_wakeup(void)
701{
702 unsigned long xoff;
703
704 cli();
705 xoff = netdev_fc_xoff;
706 netdev_fc_xoff = 0;
707 netdev_dropping = 0;
708 netdev_throttle_events++;
709 while (xoff) {
710 int i = ffz(~xoff)(ffs (~(unsigned int) (~xoff)) - 1);
711 xoff &= ~(1<<i);
712 netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
713 }
714 sti();
715}
716#endif
717
718static void dev_clear_backlog(struct device *dev)
719{
720 struct sk_buff *curr;
721 unsigned long flags;
722
723 /*
724 *
725 * Let now clear backlog queue. -AS
726 *
727 * We are competing here both with netif_rx() and net_bh().
728 * We don't want either of those to mess with skb ptrs
729 * while we work on them, thus we must grab the
730 * skb_queue_lock.
731 */
732
733 if (backlog.qlen) {
9
Taking true branch
734 repeat:
735 spin_lock_irqsave(&skb_queue_lock, flags)((void) (&skb_queue_lock), (void) (flags));
736 for (curr = backlog.next;
10
Loop condition is true. Entering loop body
13
Loop condition is true. Entering loop body
16
Loop condition is true. Entering loop body
737 curr != (struct sk_buff *)(&backlog);
738 curr = curr->next)
15
Null pointer value stored to 'curr'
739 if (curr->dev == dev)
11
Taking true branch
14
Taking false branch
17
Access to field 'dev' results in a dereference of a null pointer (loaded from variable 'curr')
740 {
741 __skb_unlink(curr, &backlog);
742 spin_unlock_irqrestore(&skb_queue_lock, flags)((void) (&skb_queue_lock), (void) (flags));
743 kfree_skb(curr);
744 goto repeat;
12
Control jumps to line 735
745 }
746 spin_unlock_irqrestore(&skb_queue_lock, flags)((void) (&skb_queue_lock), (void) (flags));
747#ifdef CONFIG_NET_HW_FLOWCONTROL
748 if (netdev_dropping)
749 netdev_wakeup();
750#else
751 netdev_dropping = 0;
752#endif
753 }
754}
755
756/*
757 * Receive a packet from a device driver and queue it for the upper
758 * (protocol) levels. It always succeeds.
759 */
760
761void netif_rx(struct sk_buff *skb)
762{
763#ifndef CONFIG_CPU_IS_SLOW
764 if(skb->stamp.tv_sec==0)
765 get_fast_time(&skb->stamp)maptime_read (mapped_time, (&skb->stamp));
766#else
767 skb->stamp = xtime;
768#endif
769
770 /* The code is rearranged so that the path is the most
771 short when CPU is congested, but is still operating.
772 */
773
774 if (backlog.qlen <= netdev_max_backlog) {
775 if (backlog.qlen) {
776 if (netdev_dropping == 0) {
777 skb_queue_tail(&backlog,skb);
778 mark_bh(NET_BH0xb00bee51);
779 return;
780 }
781 atomic_inc(&netdev_rx_dropped);
782 kfree_skb(skb);
783 return;
784 }
785#ifdef CONFIG_NET_HW_FLOWCONTROL
786 if (netdev_dropping)
787 netdev_wakeup();
788#else
789 netdev_dropping = 0;
790#endif
791 skb_queue_tail(&backlog,skb);
792 mark_bh(NET_BH0xb00bee51);
793 return;
794 }
795 netdev_dropping = 1;
796 atomic_inc(&netdev_rx_dropped);
797 kfree_skb(skb);
798}
799
800#ifdef CONFIG_BRIDGE
801static inline void handle_bridge(struct sk_buff *skb, unsigned short type)
802{
803 /*
804 * The br_stats.flags is checked here to save the expense of a
805 * function call.
806 */
807 if ((br_stats.flags & BR_UP0x0001) && br_call_bridge(skb, type))
808 {
809 /*
810 * We pass the bridge a complete frame. This means
811 * recovering the MAC header first.
812 */
813
814 int offset;
815
816 skb=skb_clone(skb, GFP_ATOMIC0);
817 if(skb==NULL((void*)0))
818 return;
819
820 offset=skb->data-skb->mac.raw;
821 skb_push(skb,offset); /* Put header back on for bridge */
822
823 if(br_receive_frame(skb))
824 return;
825 kfree_skb(skb);
826 }
827 return;
828}
829#endif
830
831/*
832 * When we are called the queue is ready to grab, the interrupts are
833 * on and hardware can interrupt and queue to the receive queue as we
834 * run with no problems.
835 * This is run as a bottom half after an interrupt handler that does
836 * mark_bh(NET_BH);
837 */
838
839void net_bh(void)
840{
841 struct packet_type *ptype;
842 struct packet_type *pt_prev;
843 unsigned short type;
844#ifndef _HURD_1
845 unsigned long start_time = jiffies(fetch_jiffies ());
846#ifdef CONFIG_CPU_IS_SLOW
847 static unsigned long start_busy = 0;
848 static unsigned long ave_busy = 0;
849
850 if (start_busy == 0)
851 start_busy = start_time;
852 net_cpu_congestion = ave_busy>>8;
853#endif
854#endif
855
856 NET_PROFILE_ENTER(net_bh)do { } while(0);
857 /*
858 * Can we send anything now? We want to clear the
859 * decks for any more sends that get done as we
860 * process the input. This also minimises the
861 * latency on a transmit interrupt bh.
862 */
863
864 if (qdisc_head.forw != &qdisc_head)
865 qdisc_run_queues();
866
867 /*
868 * Any data left to process. This may occur because a
869 * mark_bh() is done after we empty the queue including
870 * that from the device which does a mark_bh() just after
871 */
872
873 /*
874 * While the queue is not empty..
875 *
876 * Note that the queue never shrinks due to
877 * an interrupt, so we can do this test without
878 * disabling interrupts.
879 */
880
881 while (!skb_queue_empty(&backlog))
882 {
883 struct sk_buff * skb;
884
885#ifndef _HURD_1
886 /* Give chance to other bottom halves to run */
887 if (jiffies(fetch_jiffies ()) - start_time > 1)
888 goto net_bh_break;
889#endif
890
891 /*
892 * We have a packet. Therefore the queue has shrunk
893 */
894 skb = skb_dequeue(&backlog);
895
896#ifndef _HURD_1
897#ifdef CONFIG_CPU_IS_SLOW
898 if (ave_busy > 128*16) {
899 kfree_skb(skb);
900 while ((skb = skb_dequeue(&backlog)) != NULL((void*)0))
901 kfree_skb(skb);
902 break;
903 }
904#endif
905#endif
906
907
908#if 0
909 NET_PROFILE_SKB_PASSED(skb, net_bh_skb)do { } while(0);
910#endif
911#ifdef CONFIG_NET_FASTROUTE
912 if (skb->pkt_type == PACKET_FASTROUTE6) {
913 dev_queue_xmit(skb);
914 continue;
915 }
916#endif
917
918 /*
919 * Bump the pointer to the next structure.
920 *
921 * On entry to the protocol layer. skb->data and
922 * skb->nh.raw point to the MAC and encapsulated data
923 */
924
925 /* XXX until we figure out every place to modify.. */
926 skb->h.raw = skb->nh.raw = skb->data;
927
928 if (skb->mac.raw < skb->head || skb->mac.raw > skb->data) {
929 printkprintf(KERN_CRIT "%s: wrong mac.raw ptr, proto=%04x\n", skb->dev->name, skb->protocol);
930 kfree_skb(skb);
931 continue;
932 }
933
934 /*
935 * Fetch the packet protocol ID.
936 */
937
938 type = skb->protocol;
939
940#ifdef CONFIG_BRIDGE
941 /*
942 * If we are bridging then pass the frame up to the
943 * bridging code (if this protocol is to be bridged).
944 * If it is bridged then move on
945 */
946 handle_bridge(skb, type);
947#endif
948
949 /*
950 * We got a packet ID. Now loop over the "known protocols"
951 * list. There are two lists. The ptype_all list of taps (normally empty)
952 * and the main protocol list which is hashed perfectly for normal protocols.
953 */
954
955 pt_prev = NULL((void*)0);
956 for (ptype = ptype_all; ptype!=NULL((void*)0); ptype=ptype->next)
957 {
958 if (!ptype->dev || ptype->dev == skb->dev) {
959 if(pt_prev)
960 {
961 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC0);
962 if(skb2)
963 pt_prev->func(skb2,skb->dev, pt_prev);
964 }
965 pt_prev=ptype;
966 }
967 }
968
969 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL((void*)0); ptype = ptype->next)
970 {
971 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
972 {
973 /*
974 * We already have a match queued. Deliver
975 * to it and then remember the new match
976 */
977 if(pt_prev)
978 {
979 struct sk_buff *skb2;
980
981 skb2=skb_clone(skb, GFP_ATOMIC0);
982
983 /*
984 * Kick the protocol handler. This should be fast
985 * and efficient code.
986 */
987
988 if(skb2)
989 pt_prev->func(skb2, skb->dev, pt_prev);
990 }
991 /* Remember the current last to do */
992 pt_prev=ptype;
993 }
994 } /* End of protocol list loop */
995
996 /*
997 * Is there a last item to send to ?
998 */
999
1000 if(pt_prev)
1001 pt_prev->func(skb, skb->dev, pt_prev);
1002 /*
1003 * Has an unknown packet has been received ?
1004 */
1005
1006 else {
1007 kfree_skb(skb);
1008 }
1009 } /* End of queue loop */
1010
1011 /*
1012 * We have emptied the queue
1013 */
1014
1015 /*
1016 * One last output flush.
1017 */
1018
1019 if (qdisc_head.forw != &qdisc_head)
1020 qdisc_run_queues();
1021
1022#ifndef _HURD_1
1023#ifdef CONFIG_CPU_IS_SLOW
1024 if (1) {
1025 unsigned long start_idle = jiffies(fetch_jiffies ());
1026 ave_busy += ((start_idle - start_busy)<<3) - (ave_busy>>4);
1027 start_busy = 0;
1028 }
1029#endif
1030#endif
1031#ifdef CONFIG_NET_HW_FLOWCONTROL
1032 if (netdev_dropping)
1033 netdev_wakeup();
1034#else
1035 netdev_dropping = 0;
1036#endif
1037 NET_PROFILE_LEAVE(net_bh)do { } while(0);
1038 return;
1039
1040#ifndef _HURD_1
1041net_bh_break:
1042 mark_bh(NET_BH0xb00bee51);
1043 NET_PROFILE_LEAVE(net_bh)do { } while(0);
1044 return;
1045#endif
1046}
1047
1048/* Protocol dependent address dumping routines */
1049
1050static gifconf_func_t * gifconf_list [NPROTO32];
1051
1052int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1053{
1054 if (family>=NPROTO32)
1055 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1056 gifconf_list[family] = gifconf;
1057 return 0;
1058}
1059
1060
1061/*
1062 * Map an interface index to its name (SIOCGIFNAME)
1063 */
1064
1065/*
1066 * This call is useful, but I'd remove it too.
1067 *
1068 * The reason is purely aestetical, it is the only call
1069 * from SIOC* family using struct ifreq in reversed manner.
1070 * Besides that, it is pretty silly to put "drawing" facility
1071 * to kernel, it is useful only to print ifindices
1072 * in readable form, is not it? --ANK
1073 *
1074 * We need this ioctl for efficient implementation of the
1075 * if_indextoname() function required by the IPv6 API. Without
1076 * it, we would have to search all the interfaces to find a
1077 * match. --pb
1078 */
1079
1080static int dev_ifname(struct ifreq *arg)
1081{
1082 struct device *dev;
1083 struct ifreq ifr;
1084 int err;
1085
1086 /*
1087 * Fetch the caller's info block.
1088 */
1089
1090 err = copy_from_user(&ifr, arg, sizeof(struct ifreq))(memcpy ((&ifr), (arg), (sizeof(struct ifreq))), 0);
1091 if (err)
1092 return -EFAULT((0x10 << 26) | ((14) & 0x3fff));
1093
1094 dev = dev_get_by_index(ifr.ifr_ifindexifr_ifru.ifru_ivalue);
1095 if (!dev)
1096 return -ENODEV((0x10 << 26) | ((19) & 0x3fff));
1097
1098 strcpy(ifr.ifr_nameifr_ifrn.ifrn_name, dev->name);
1099
1100 err = copy_to_user(arg, &ifr, sizeof(struct ifreq))(memcpy ((arg), (&ifr), (sizeof(struct ifreq))), 0);
1101 return (err)?-EFAULT((0x10 << 26) | ((14) & 0x3fff)):0;
1102}
1103
1104/*
1105 * Perform a SIOCGIFCONF call. This structure will change
1106 * size eventually, and there is nothing I can do about it.
1107 * Thus we will need a 'compatibility mode'.
1108 */
1109
1110#ifdef _HURD_1
1111int dev_ifconf(char *arg)
1112#else
1113static int dev_ifconf(char *arg)
1114#endif
1115{
1116 struct ifconf ifc;
1117 struct device *dev;
1118 char *pos;
1119 int len;
1120 int total;
1121 int i;
1122
1123 /*
1124 * Fetch the caller's info block.
1125 */
1126
1127 if (copy_from_user(&ifc, arg, sizeof(struct ifconf))(memcpy ((&ifc), (arg), (sizeof(struct ifconf))), 0))
1128 return -EFAULT((0x10 << 26) | ((14) & 0x3fff));
1129
1130 pos = ifc.ifc_bufifc_ifcu.ifcu_buf;
1131 len = ifc.ifc_len;
1132
1133 /*
1134 * Loop over the interfaces, and write an info block for each.
1135 */
1136
1137 total = 0;
1138 for (dev = dev_base; dev != NULL((void*)0); dev = dev->next) {
1139 for (i=0; i<NPROTO32; i++) {
1140 if (gifconf_list[i]) {
1141 int done;
1142 if (pos==NULL((void*)0)) {
1143 done = gifconf_list[i](dev, NULL((void*)0), 0);
1144 } else {
1145 done = gifconf_list[i](dev, pos+total, len-total);
1146 }
1147 if (done<0)
1148 return -EFAULT((0x10 << 26) | ((14) & 0x3fff));
1149 total += done;
1150 }
1151 }
1152 }
1153
1154 /*
1155 * All done. Write the updated control block back to the caller.
1156 */
1157 ifc.ifc_len = total;
1158
1159 if (copy_to_user(arg, &ifc, sizeof(struct ifconf))(memcpy ((arg), (&ifc), (sizeof(struct ifconf))), 0))
1160 return -EFAULT((0x10 << 26) | ((14) & 0x3fff));
1161
1162 /*
1163 * Both BSD and Solaris return 0 here, so we do too.
1164 */
1165 return 0;
1166}
1167
1168/*
1169 * This is invoked by the /proc filesystem handler to display a device
1170 * in detail.
1171 */
1172
1173#ifdef CONFIG_PROC_FS
1174static int sprintf_stats(char *buffer, struct device *dev)
1175{
1176 struct net_device_stats *stats = (dev->get_stats ? dev->get_stats(dev): NULL((void*)0));
1177 int size;
1178
1179 if (stats)
1180 size = sprintf(buffer, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu %8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
1181 dev->name,
1182 stats->rx_bytes,
1183 stats->rx_packets, stats->rx_errors,
1184 stats->rx_dropped + stats->rx_missed_errors,
1185 stats->rx_fifo_errors,
1186 stats->rx_length_errors + stats->rx_over_errors
1187 + stats->rx_crc_errors + stats->rx_frame_errors,
1188 stats->rx_compressed, stats->multicast,
1189 stats->tx_bytes,
1190 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
1191 stats->tx_fifo_errors, stats->collisions,
1192 stats->tx_carrier_errors + stats->tx_aborted_errors
1193 + stats->tx_window_errors + stats->tx_heartbeat_errors,
1194 stats->tx_compressed);
1195 else
1196 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
1197
1198 return size;
1199}
1200
1201/*
1202 * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
1203 * to create /proc/net/dev
1204 */
1205
1206int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1207{
1208 int len=0;
1209 off_t begin=0;
1210 off_t pos=0;
1211 int size;
1212
1213 struct device *dev;
1214
1215
1216 size = sprintf(buffer,
1217 "Inter-| Receive | Transmit\n"
1218 " face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n");
1219
1220 pos+=size;
1221 len+=size;
1222
1223
1224 for (dev = dev_base; dev != NULL((void*)0); dev = dev->next)
1225 {
1226 size = sprintf_stats(buffer+len, dev);
1227 len+=size;
1228 pos=begin+len;
1229
1230 if(pos<offset)
1231 {
1232 len=0;
1233 begin=pos;
1234 }
1235 if(pos>offset+length)
1236 break;
1237 }
1238
1239 *start=buffer+(offset-begin); /* Start of wanted data */
1240 len-=(offset-begin); /* Start slop */
1241 if(len>length)
1242 len=length; /* Ending slop */
1243 return len;
1244}
1245
1246static int dev_proc_stats(char *buffer, char **start, off_t offset,
1247 int length, int *eof, void *data)
1248{
1249 int len;
1250
1251 len = sprintf(buffer, "%08x %08x %08x %08x %08x\n",
1252 atomic_read(&netdev_rx_dropped)((&netdev_rx_dropped)->counter),
1253#ifdef CONFIG_NET_HW_FLOWCONTROL
1254 netdev_throttle_events,
1255#else
1256 0,
1257#endif
1258#ifdef CONFIG_NET_FASTROUTE
1259 dev_fastroute_stat.hits,
1260 dev_fastroute_stat.succeed,
1261 dev_fastroute_stat.deferred
1262#else
1263 0, 0, 0
1264#endif
1265 );
1266
1267 len -= offset;
1268
1269 if (len > length)
1270 len = length;
1271 if(len < 0)
1272 len = 0;
1273
1274 *start = buffer + offset;
1275 *eof = 1;
1276
1277 return len;
1278}
1279
1280#endif /* CONFIG_PROC_FS */
1281
1282
1283#ifdef CONFIG_NET_RADIO
1284#ifdef CONFIG_PROC_FS
1285
1286/*
1287 * Print one entry of /proc/net/wireless
1288 * This is a clone of /proc/net/dev (just above)
1289 */
1290static int sprintf_wireless_stats(char *buffer, struct device *dev)
1291{
1292 /* Get stats from the driver */
1293 struct iw_statistics *stats = (dev->get_wireless_stats ?
1294 dev->get_wireless_stats(dev) :
1295 (struct iw_statistics *) NULL((void*)0));
1296 int size;
1297
1298 if(stats != (struct iw_statistics *) NULL((void*)0))
1299 {
1300 size = sprintf(buffer,
1301 "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d\n",
1302 dev->name,
1303 stats->status,
1304 stats->qual.qual,
1305 stats->qual.updated & 1 ? '.' : ' ',
1306 stats->qual.level,
1307 stats->qual.updated & 2 ? '.' : ' ',
1308 stats->qual.noise,
1309 stats->qual.updated & 4 ? '.' : ' ',
1310 stats->discard.nwid,
1311 stats->discard.code,
1312 stats->discard.misc);
1313 stats->qual.updated = 0;
1314 }
1315 else
1316 size = 0;
1317
1318 return size;
1319}
1320
1321/*
1322 * Print info for /proc/net/wireless (print all entries)
1323 * This is a clone of /proc/net/dev (just above)
1324 */
1325int dev_get_wireless_info(char * buffer, char **start, off_t offset,
1326 int length, int dummy)
1327{
1328 int len = 0;
1329 off_t begin = 0;
1330 off_t pos = 0;
1331 int size;
1332
1333 struct device * dev;
1334
1335 size = sprintf(buffer,
1336 "Inter-| sta-| Quality | Discarded packets\n"
1337 " face | tus | link level noise | nwid crypt misc\n"
1338 );
1339
1340 pos+=size;
1341 len+=size;
1342
1343 for(dev = dev_base; dev != NULL((void*)0); dev = dev->next)
1344 {
1345 size = sprintf_wireless_stats(buffer+len, dev);
1346 len+=size;
1347 pos=begin+len;
1348
1349 if(pos < offset)
1350 {
1351 len=0;
1352 begin=pos;
1353 }
1354 if(pos > offset + length)
1355 break;
1356 }
1357
1358 *start = buffer + (offset - begin); /* Start of wanted data */
1359 len -= (offset - begin); /* Start slop */
1360 if(len > length)
1361 len = length; /* Ending slop */
1362
1363 return len;
1364}
1365#endif /* CONFIG_PROC_FS */
1366#endif /* CONFIG_NET_RADIO */
1367
1368void dev_set_promiscuity(struct device *dev, int inc)
1369{
1370 unsigned short old_flags = dev->flags;
1371
1372 dev->flags |= IFF_PROMISCIFF_PROMISC;
1373 if ((dev->promiscuity += inc) == 0)
1374 dev->flags &= ~IFF_PROMISCIFF_PROMISC;
1375 if (dev->flags^old_flags) {
1376#ifdef CONFIG_NET_FASTROUTE
1377 if (dev->flags&IFF_PROMISCIFF_PROMISC) {
1378 netdev_fastroute_obstacles++;
1379 dev_clear_fastroute(dev);
1380 } else
1381 netdev_fastroute_obstacles--;
1382#endif
1383 dev_mc_upload(dev);
1384 printkprintf(KERN_INFO "device %s %s promiscuous mode\n",
1385 dev->name, (dev->flags&IFF_PROMISCIFF_PROMISC) ? "entered" : "left");
1386 }
1387}
1388
1389void dev_set_allmulti(struct device *dev, int inc)
1390{
1391 unsigned short old_flags = dev->flags;
1392
1393 dev->flags |= IFF_ALLMULTIIFF_ALLMULTI;
1394 if ((dev->allmulti += inc) == 0)
1395 dev->flags &= ~IFF_ALLMULTIIFF_ALLMULTI;
1396 if (dev->flags^old_flags)
1397 dev_mc_upload(dev);
1398}
1399
1400int dev_change_flags(struct device *dev, unsigned flags)
1401{
1402 int ret;
1403 int old_flags = dev->flags;
1404
1405 /*
1406 * Set the flags on our device.
1407 */
1408
1409 dev->flags = (flags & (IFF_DEBUGIFF_DEBUG|IFF_NOTRAILERSIFF_NOTRAILERS|IFF_RUNNINGIFF_RUNNING|IFF_NOARPIFF_NOARP|
1410 IFF_SLAVEIFF_SLAVE|IFF_MASTERIFF_MASTER|IFF_DYNAMICIFF_DYNAMIC|
1411 IFF_MULTICASTIFF_MULTICAST|IFF_PORTSELIFF_PORTSEL|IFF_AUTOMEDIAIFF_AUTOMEDIA)) |
1412 (dev->flags & (IFF_UPIFF_UP|IFF_VOLATILE(IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ALLMULTI)|IFF_PROMISCIFF_PROMISC|IFF_ALLMULTIIFF_ALLMULTI));
1413
1414 /*
1415 * Load in the correct multicast list now the flags have changed.
1416 */
1417
1418 dev_mc_upload(dev);
1419
1420 /*
1421 * Have we downed the interface. We handle IFF_UP ourselves
1422 * according to user attempts to set it, rather than blindly
1423 * setting it.
1424 */
1425
1426 ret = 0;
1427 if ((old_flags^flags)&IFF_UPIFF_UP) /* Bit is different ? */
1428 {
1429 ret = ((old_flags & IFF_UPIFF_UP) ? dev_close : dev_open)(dev);
1430
1431 if (ret == 0)
1432 dev_mc_upload(dev);
1433 }
1434
1435 if (dev->flags&IFF_UPIFF_UP &&
1436 ((old_flags^dev->flags)&~(IFF_UPIFF_UP|IFF_RUNNINGIFF_RUNNING|IFF_PROMISCIFF_PROMISC|IFF_ALLMULTIIFF_ALLMULTI|IFF_VOLATILE(IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ALLMULTI))))
1437 notifier_call_chain(&netdev_chain, NETDEV_CHANGE0x0004, dev);
1438
1439 if ((flags^dev->gflags)&IFF_PROMISCIFF_PROMISC) {
1440 int inc = (flags&IFF_PROMISCIFF_PROMISC) ? +1 : -1;
1441 dev->gflags ^= IFF_PROMISCIFF_PROMISC;
1442 dev_set_promiscuity(dev, inc);
1443 }
1444
1445 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
1446 is important. Some (broken) drivers set IFF_PROMISC, when
1447 IFF_ALLMULTI is requested not asking us and not reporting.
1448 */
1449 if ((flags^dev->gflags)&IFF_ALLMULTIIFF_ALLMULTI) {
1450 int inc = (flags&IFF_ALLMULTIIFF_ALLMULTI) ? +1 : -1;
1451 dev->gflags ^= IFF_ALLMULTIIFF_ALLMULTI;
1452 dev_set_allmulti(dev, inc);
1453 }
1454
1455 if (!ret && dev->change_flags)
1456 ret = dev->change_flags(dev, dev->flags);
1457
1458 return ret;
1459}
1460
1461#ifdef _HURD_1
1462
1463#define dev_ioctl0 0
1464
1465#else
1466
1467/*
1468 * Perform the SIOCxIFxxx calls.
1469 */
1470
1471static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
1472{
1473 struct device *dev;
1474 int err;
1475
1476 if ((dev = dev_get(ifr->ifr_nameifr_ifrn.ifrn_name)) == NULL((void*)0))
1477 return -ENODEV((0x10 << 26) | ((19) & 0x3fff));
1478
1479 switch(cmd)
1480 {
1481 case SIOCGIFFLAGS(((17)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (short) == 8 ? IOC_64 : (sizeof (short) >> 1
))) | ((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >>
1))) << 2) << 2) << 5) << 5) <<
3))) | (IOC_INOUT) << 19) << 4) << 7))
: /* Get interface flags */
1482 ifr->ifr_flagsifr_ifru.ifru_flags = (dev->flags&~(IFF_PROMISCIFF_PROMISC|IFF_ALLMULTIIFF_ALLMULTI))
1483 |(dev->gflags&(IFF_PROMISCIFF_PROMISC|IFF_ALLMULTIIFF_ALLMULTI));
1484 return 0;
1485
1486 case SIOCSIFFLAGS(((16)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (short) == 8 ? IOC_64 : (sizeof (short) >> 1
))) | ((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >>
1))) << 2) << 2) << 5) << 5) <<
3))) | (IOC_IN) << 19) << 4) << 7))
: /* Set interface flags */
1487 return dev_change_flags(dev, ifr->ifr_flagsifr_ifru.ifru_flags);
1488
1489 case SIOCGIFMETRIC(((23)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_INOUT) << 19) << 4) << 7))
: /* Get the metric on the interface (currently unused) */
1490 ifr->ifr_metricifr_ifru.ifru_ivalue = 0;
1491 return 0;
1492
1493 case SIOCSIFMETRIC(((24)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_IN) << 19) << 4) << 7))
: /* Set the metric on the interface (currently unused) */
1494 return -EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff));
1495
1496 case SIOCGIFMTU(((51)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_INOUT) << 19) << 4) << 7))
: /* Get the MTU of a device */
1497 ifr->ifr_mtuifr_ifru.ifru_mtu = dev->mtu;
1498 return 0;
1499
1500 case SIOCSIFMTU(((52)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_IN) << 19) << 4) << 7))
: /* Set the MTU of a device */
1501 if (ifr->ifr_mtuifr_ifru.ifru_mtu == dev->mtu)
1502 return 0;
1503
1504 /*
1505 * MTU must be positive.
1506 */
1507
1508 if (ifr->ifr_mtuifr_ifru.ifru_mtu<=0)
1509 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1510
1511 if (dev->change_mtu)
1512 err = dev->change_mtu(dev, ifr->ifr_mtuifr_ifru.ifru_mtu);
1513 else {
1514 dev->mtu = ifr->ifr_mtuifr_ifru.ifru_mtu;
1515 err = 0;
1516 }
1517 if (!err && dev->flags&IFF_UPIFF_UP)
1518 notifier_call_chain(&netdev_chain, NETDEV_CHANGEMTU0x0007, dev);
1519 return err;
1520
1521 case SIOCGIFHWADDR(((39)) | ((((('i')) - 'f') | ((((0) | (((16) | ((16) | ((0) |
(((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1))
) | ((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1
))) << 2) << 2) << 5) << 5) << 3
))) | (IOC_INOUT) << 19) << 4) << 7))
:
1522 memcpy(ifr->ifr_hwaddrifr_ifru.ifru_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN7);
1523 ifr->ifr_hwaddrifr_ifru.ifru_hwaddr.sa_family=dev->type;
1524 return 0;
1525
1526 case SIOCSIFHWADDR:
1527 if(dev->set_mac_address==NULL((void*)0))
1528 return -EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff));
1529 if(ifr->ifr_hwaddrifr_ifru.ifru_hwaddr.sa_family!=dev->type)
1530 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1531 err=dev->set_mac_address(dev,&ifr->ifr_hwaddrifr_ifru.ifru_hwaddr);
1532 if (!err)
1533 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR0x0008, dev);
1534 return err;
1535
1536 case SIOCSIFHWBROADCAST:
1537 if(ifr->ifr_hwaddrifr_ifru.ifru_hwaddr.sa_family!=dev->type)
1538 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1539 memcpy(dev->broadcast, ifr->ifr_hwaddrifr_ifru.ifru_hwaddr.sa_data, MAX_ADDR_LEN7);
1540 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR0x0008, dev);
1541 return 0;
1542
1543 case SIOCGIFMAP:
1544 ifr->ifr_mapifr_ifru.ifru_map.mem_start=dev->mem_start;
1545 ifr->ifr_mapifr_ifru.ifru_map.mem_end=dev->mem_end;
1546 ifr->ifr_mapifr_ifru.ifru_map.base_addr=dev->base_addr;
1547 ifr->ifr_mapifr_ifru.ifru_map.irq=dev->irq;
1548 ifr->ifr_mapifr_ifru.ifru_map.dma=dev->dma;
1549 ifr->ifr_mapifr_ifru.ifru_map.port=dev->if_port;
1550 return 0;
1551
1552 case SIOCSIFMAP:
1553 if (dev->set_config)
1554 return dev->set_config(dev,&ifr->ifr_mapifr_ifru.ifru_map);
1555 return -EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff));
1556
1557 case SIOCADDMULTI:
1558 if(dev->set_multicast_list==NULL((void*)0) ||
1559 ifr->ifr_hwaddrifr_ifru.ifru_hwaddr.sa_family!=AF_UNSPEC0)
1560 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1561 dev_mc_add(dev,ifr->ifr_hwaddrifr_ifru.ifru_hwaddr.sa_data, dev->addr_len, 1);
1562 return 0;
1563
1564 case SIOCDELMULTI:
1565 if(dev->set_multicast_list==NULL((void*)0) ||
1566 ifr->ifr_hwaddrifr_ifru.ifru_hwaddr.sa_family!=AF_UNSPEC0)
1567 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1568 dev_mc_delete(dev,ifr->ifr_hwaddrifr_ifru.ifru_hwaddr.sa_data,dev->addr_len, 1);
1569 return 0;
1570
1571 case SIOCGIFINDEX(((90)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_INOUT) << 19) << 4) << 7))
:
1572 ifr->ifr_ifindexifr_ifru.ifru_ivalue = dev->ifindex;
1573 return 0;
1574
1575 case SIOCGIFTXQLEN:
1576 ifr->ifr_qlenifr_ifru.ifru_ivalue = dev->tx_queue_len;
1577 return 0;
1578
1579 case SIOCSIFTXQLEN:
1580 if(ifr->ifr_qlenifr_ifru.ifru_ivalue<0)
1581 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1582 dev->tx_queue_len = ifr->ifr_qlenifr_ifru.ifru_ivalue;
1583 return 0;
1584
1585 case SIOCSIFNAME:
1586 if (dev->flags&IFF_UPIFF_UP)
1587 return -EBUSY((0x10 << 26) | ((16) & 0x3fff));
1588 if (dev_get(ifr->ifr_newnameifr_ifru.ifru_newname))
1589 return -EEXIST((0x10 << 26) | ((17) & 0x3fff));
1590 memcpy(dev->name, ifr->ifr_newnameifr_ifru.ifru_newname, IFNAMSIZ16);
1591 dev->name[IFNAMSIZ16-1] = 0;
1592 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME0x000A, dev);
1593 return 0;
1594
1595 /*
1596 * Unknown or private ioctl
1597 */
1598
1599 default:
1600 if(cmd >= SIOCDEVPRIVATE &&
1601 cmd <= SIOCDEVPRIVATE + 15) {
1602 if (dev->do_ioctl)
1603 return dev->do_ioctl(dev, ifr, cmd);
1604 return -EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff));
1605 }
1606
1607#ifdef CONFIG_NET_RADIO
1608 if(cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
1609 if (dev->do_ioctl)
1610 return dev->do_ioctl(dev, ifr, cmd);
1611 return -EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff));
1612 }
1613#endif /* CONFIG_NET_RADIO */
1614
1615 }
1616 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1617}
1618
1619
1620/*
1621 * This function handles all "interface"-type I/O control requests. The actual
1622 * 'doing' part of this is dev_ifsioc above.
1623 */
1624
1625int dev_ioctl0(unsigned int cmd, void *arg)
1626{
1627 struct ifreq ifr;
1628 int ret;
1629 char *colon;
1630
1631 /* One special case: SIOCGIFCONF takes ifconf argument
1632 and requires shared lock, because it sleeps writing
1633 to user space.
1634 */
1635
1636 if (cmd == SIOCGIFCONF(((36)) | ((((('i')) - 'f') | ((((0) | (((0) | ((1) | ((0) | (
(0) | ((sizeof (struct ifconf) == 8 ? IOC_64 : (sizeof (struct
ifconf) >> 1))) << 2) << 2) << 5) <<
5) << 3))) | (IOC_INOUT) << 19) << 4) <<
7))
) {
1637 rtnl_shlock();
1638 ret = dev_ifconf((char *) arg);
1639 rtnl_shunlock();
1640 return ret;
1641 }
1642 if (cmd == SIOCGIFNAME(((91)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_INOUT) << 19) << 4) << 7))
) {
1643 return dev_ifname((struct ifreq *)arg);
1644 }
1645
1646 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))(memcpy ((&ifr), (arg), (sizeof(struct ifreq))), 0))
1647 return -EFAULT((0x10 << 26) | ((14) & 0x3fff));
1648
1649 ifr.ifr_nameifr_ifrn.ifrn_name[IFNAMSIZ16-1] = 0;
1650
1651 colon = strchr(ifr.ifr_nameifr_ifrn.ifrn_name, ':');
1652 if (colon)
1653 *colon = 0;
1654
1655 /*
1656 * See which interface the caller is talking about.
1657 */
1658
1659 switch(cmd)
1660 {
1661 /*
1662 * These ioctl calls:
1663 * - can be done by all.
1664 * - atomic and do not require locking.
1665 * - return a value
1666 */
1667
1668 case SIOCGIFFLAGS(((17)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (short) == 8 ? IOC_64 : (sizeof (short) >> 1
))) | ((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >>
1))) << 2) << 2) << 5) << 5) <<
3))) | (IOC_INOUT) << 19) << 4) << 7))
:
1669 case SIOCGIFMETRIC(((23)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_INOUT) << 19) << 4) << 7))
:
1670 case SIOCGIFMTU(((51)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_INOUT) << 19) << 4) << 7))
:
1671 case SIOCGIFHWADDR(((39)) | ((((('i')) - 'f') | ((((0) | (((16) | ((16) | ((0) |
(((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1))
) | ((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1
))) << 2) << 2) << 5) << 5) << 3
))) | (IOC_INOUT) << 19) << 4) << 7))
:
1672 case SIOCGIFSLAVE:
1673 case SIOCGIFMAP:
1674 case SIOCGIFINDEX(((90)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_INOUT) << 19) << 4) << 7))
:
1675 case SIOCGIFTXQLEN:
1676 dev_load(ifr.ifr_nameifr_ifrn.ifrn_name);
1677 ret = dev_ifsioc(&ifr, cmd);
1678 if (!ret) {
1679 if (colon)
1680 *colon = ':';
1681 if (copy_to_user(arg, &ifr, sizeof(struct ifreq))(memcpy ((arg), (&ifr), (sizeof(struct ifreq))), 0))
1682 return -EFAULT((0x10 << 26) | ((14) & 0x3fff));
1683 }
1684 return ret;
1685
1686 /*
1687 * These ioctl calls:
1688 * - require superuser power.
1689 * - require strict serialization.
1690 * - do not return a value
1691 */
1692
1693 case SIOCSIFFLAGS(((16)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (short) == 8 ? IOC_64 : (sizeof (short) >> 1
))) | ((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >>
1))) << 2) << 2) << 5) << 5) <<
3))) | (IOC_IN) << 19) << 4) << 7))
:
1694 case SIOCSIFMETRIC(((24)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_IN) << 19) << 4) << 7))
:
1695 case SIOCSIFMTU(((52)) | ((((('i')) - 'f') | ((((0) | (((1) | ((16) | ((0) |
(((sizeof (int) == 8 ? IOC_64 : (sizeof (int) >> 1))) |
((sizeof (char) == 8 ? IOC_64 : (sizeof (char) >> 1)))
<< 2) << 2) << 5) << 5) << 3))
) | (IOC_IN) << 19) << 4) << 7))
:
1696 case SIOCSIFMAP:
1697 case SIOCSIFHWADDR:
1698 case SIOCSIFSLAVE:
1699 case SIOCADDMULTI:
1700 case SIOCDELMULTI:
1701 case SIOCSIFHWBROADCAST:
1702 case SIOCSIFTXQLEN:
1703 case SIOCSIFNAME:
1704 if (!capable(CAP_NET_ADMIN12))
1705 return -EPERM((0x10 << 26) | ((1) & 0x3fff));
1706 dev_load(ifr.ifr_nameifr_ifrn.ifrn_name);
1707 rtnl_lock();
1708 ret = dev_ifsioc(&ifr, cmd);
1709 rtnl_unlock();
1710 return ret;
1711
1712 case SIOCGIFMEM:
1713 /* Get the per device memory space. We can add this but currently
1714 do not support it */
1715 case SIOCSIFMEM:
1716 /* Set the per device memory buffer space. Not applicable in our case */
1717 case SIOCSIFLINK:
1718 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1719
1720 /*
1721 * Unknown or private ioctl.
1722 */
1723
1724 default:
1725 if (cmd >= SIOCDEVPRIVATE &&
1726 cmd <= SIOCDEVPRIVATE + 15) {
1727 dev_load(ifr.ifr_nameifr_ifrn.ifrn_name);
1728 rtnl_lock();
1729 ret = dev_ifsioc(&ifr, cmd);
1730 rtnl_unlock();
1731 if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))(memcpy ((arg), (&ifr), (sizeof(struct ifreq))), 0))
1732 return -EFAULT((0x10 << 26) | ((14) & 0x3fff));
1733 return ret;
1734 }
1735#ifdef CONFIG_NET_RADIO
1736 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
1737 dev_load(ifr.ifr_nameifr_ifrn.ifrn_name);
1738 if (IW_IS_SET(cmd)) {
1739 if (!suser())
1740 return -EPERM((0x10 << 26) | ((1) & 0x3fff));
1741 rtnl_lock();
1742 }
1743 ret = dev_ifsioc(&ifr, cmd);
1744 if (IW_IS_SET(cmd))
1745 rtnl_unlock();
1746 if (!ret && IW_IS_GET(cmd) &&
1747 copy_to_user(arg, &ifr, sizeof(struct ifreq))(memcpy ((arg), (&ifr), (sizeof(struct ifreq))), 0))
1748 return -EFAULT((0x10 << 26) | ((14) & 0x3fff));
1749 return ret;
1750 }
1751#endif /* CONFIG_NET_RADIO */
1752 return -EINVAL((0x10 << 26) | ((22) & 0x3fff));
1753 }
1754}
1755
1756#endif
1757
1758int dev_new_index(void)
1759{
1760 static int ifindex;
1761 for (;;) {
1762 if (++ifindex <= 0)
1763 ifindex=1;
1764 if (dev_get_by_index(ifindex) == NULL((void*)0))
1765 return ifindex;
1766 }
1767}
1768
1769static int dev_boot_phase = 1;
1770
1771
1772int register_netdevice(struct device *dev)
1773{
1774 struct device *d, **dp;
1775
1776 if (dev_boot_phase) {
1777 /* This is NOT bug, but I am not sure, that all the
1778 devices, initialized before netdev module is started
1779 are sane.
1780
1781 Now they are chained to device boot list
1782 and probed later. If a module is initialized
1783 before netdev, but assumes that dev->init
1784 is really called by register_netdev(), it will fail.
1785
1786 So that this message should be printed for a while.
1787 */
1788 printkprintf(KERN_INFO "early initialization of device %s is deferred\n", dev->name);
1789
1790 /* Check for existence, and append to tail of chain */
1791 for (dp=&dev_base; (d=*dp) != NULL((void*)0); dp=&d->next) {
1792 if (d == dev || strcmp(d->name, dev->name) == 0)
1793 return -EEXIST((0x10 << 26) | ((17) & 0x3fff));
1794 }
1795 dev->next = NULL((void*)0);
1796 *dp = dev;
1797 return 0;
1798 }
1799
1800 dev->iflink = -1;
1801
1802 /* Init, if this function is available */
1803 if (dev->init && dev->init(dev) != 0)
1804 return -EIO((0x10 << 26) | ((5) & 0x3fff));
1805
1806 /* Check for existence, and append to tail of chain */
1807 for (dp=&dev_base; (d=*dp) != NULL((void*)0); dp=&d->next) {
1808 if (d == dev || strcmp(d->name, dev->name) == 0)
1809 return -EEXIST((0x10 << 26) | ((17) & 0x3fff));
1810 }
1811 dev->next = NULL((void*)0);
1812 dev_init_scheduler(dev);
1813 dev->ifindex = dev_new_index();
1814 if (dev->iflink == -1)
1815 dev->iflink = dev->ifindex;
1816 *dp = dev;
1817
1818 /* Notify protocols, that a new device appeared. */
1819 notifier_call_chain(&netdev_chain, NETDEV_REGISTER0x0005, dev);
1820
1821 return 0;
1822}
1823
1824int unregister_netdevice(struct device *dev)
1825{
1826 struct device *d, **dp;
1827
1828 if (dev_boot_phase == 0) {
1
Assuming 'dev_boot_phase' is equal to 0
2
Taking true branch
1829 /* If device is running, close it.
1830 It is very bad idea, really we should
1831 complain loudly here, but random hackery
1832 in linux/drivers/net likes it.
1833 */
1834 if (dev->flags & IFF_UPIFF_UP)
3
Taking true branch
1835 dev_close(dev);
4
Calling 'dev_close'
1836
1837#ifdef CONFIG_NET_FASTROUTE
1838 dev_clear_fastroute(dev);
1839#endif
1840
1841 /* Shutdown queueing discipline. */
1842 dev_shutdown(dev);
1843
1844 /* Notify protocols, that we are about to destroy
1845 this device. They should clean all the things.
1846 */
1847 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER0x0006, dev);
1848
1849 /*
1850 * Flush the multicast chain
1851 */
1852 dev_mc_discard(dev);
1853
1854 /* To avoid pointers looking to nowhere,
1855 we wait for end of critical section */
1856 dev_lock_wait();
1857 }
1858
1859 /* And unlink it from device chain. */
1860 for (dp = &dev_base; (d=*dp) != NULL((void*)0); dp=&d->next) {
1861 if (d == dev) {
1862 *dp = d->next;
1863 synchronize_bh()((void) 0);
1864 d->next = NULL((void*)0);
1865
1866 if (dev->destructor)
1867 dev->destructor(dev);
1868 return 0;
1869 }
1870 }
1871 return -ENODEV((0x10 << 26) | ((19) & 0x3fff));
1872}
1873
1874
1875/*
1876 * Initialize the DEV module. At boot time this walks the device list and
1877 * unhooks any devices that fail to initialise (normally hardware not
1878 * present) and leaves us with a valid list of present and active devices.
1879 *
1880 */
1881extern int lance_init(void);
1882extern int bpq_init(void);
1883extern int scc_init(void);
1884extern void sdla_setup(void);
1885extern void sdla_c_setup(void);
1886extern void dlci_setup(void);
1887extern int dmascc_init(void);
1888extern int sm_init(void);
1889
1890extern int baycom_ser_fdx_init(void);
1891extern int baycom_ser_hdx_init(void);
1892extern int baycom_par_init(void);
1893
1894extern int lapbeth_init(void);
1895extern int comx_init(void);
1896extern void arcnet_init(void);
1897extern void ip_auto_config(void);
1898#ifdef CONFIG_8xx
1899extern int cpm_enet_init(void);
1900#endif /* CONFIG_8xx */
1901
1902#ifdef CONFIG_PROC_FS
1903static struct proc_dir_entry proc_net_dev = {
1904 PROC_NET_DEV, 3, "dev",
1905 S_IFREG0100000 | S_IRUGO, 1, 0, 0,
1906 0, &proc_net_inode_operations,
1907 dev_get_info
1908};
1909#endif
1910
1911#ifdef CONFIG_NET_RADIO
1912#ifdef CONFIG_PROC_FS
1913static struct proc_dir_entry proc_net_wireless = {
1914 PROC_NET_WIRELESS, 8, "wireless",
1915 S_IFREG0100000 | S_IRUGO, 1, 0, 0,
1916 0, &proc_net_inode_operations,
1917 dev_get_wireless_info
1918};
1919#endif /* CONFIG_PROC_FS */
1920#endif /* CONFIG_NET_RADIO */
1921
1922__initfunc(int net_dev_init(void))int net_dev_init(void)
1923{
1924 struct device *dev, **dp;
1925
1926#ifdef CONFIG_NET_SCHED
1927 pktsched_init();
1928#endif
1929
1930 /*
1931 * Initialise the packet receive queue.
1932 */
1933
1934 skb_queue_head_init(&backlog);
1935
1936 /*
1937 * The bridge has to be up before the devices
1938 */
1939
1940#ifdef CONFIG_BRIDGE
1941 br_init();
1942#endif
1943
1944 /*
1945 * This is Very Ugly(tm).
1946 *
1947 * Some devices want to be initialized early..
1948 */
1949
1950#if defined(CONFIG_SCC)
1951 scc_init();
1952#endif
1953#if defined(CONFIG_DMASCC)
1954 dmascc_init();
1955#endif
1956#if defined(CONFIG_BPQETHER)
1957 bpq_init();
1958#endif
1959#if defined(CONFIG_DLCI)
1960 dlci_setup();
1961#endif
1962#if defined(CONFIG_SDLA)
1963 sdla_c_setup();
1964#endif
1965#if defined(CONFIG_BAYCOM_PAR)
1966 baycom_par_init();
1967#endif
1968#if defined(CONFIG_BAYCOM_SER_FDX)
1969 baycom_ser_fdx_init();
1970#endif
1971#if defined(CONFIG_BAYCOM_SER_HDX)
1972 baycom_ser_hdx_init();
1973#endif
1974#if defined(CONFIG_SOUNDMODEM)
1975 sm_init();
1976#endif
1977#if defined(CONFIG_LAPBETHER)
1978 lapbeth_init();
1979#endif
1980#if defined(CONFIG_PLIP)
1981 plip_init();
1982#endif
1983#if defined(CONFIG_ARCNET)
1984 arcnet_init();
1985#endif
1986#if defined(CONFIG_8xx)
1987 cpm_enet_init();
1988#endif
1989#if defined(CONFIG_COMX)
1990 comx_init();
1991#endif
1992 /*
1993 * SLHC if present needs attaching so other people see it
1994 * even if not opened.
1995 */
1996
1997#ifdef CONFIG_INET1
1998#if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \
1999 || defined(CONFIG_PPP) \
2000 || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP))
2001 slhc_install();
2002#endif
2003#endif
2004
2005#ifdef CONFIG_NET_PROFILE
2006 net_profile_init();
2007 NET_PROFILE_REGISTER(dev_queue_xmit)do { } while(0);
2008 NET_PROFILE_REGISTER(net_bh)do { } while(0);
2009#if 0
2010 NET_PROFILE_REGISTER(net_bh_skb)do { } while(0);
2011#endif
2012#endif
2013 /*
2014 * Add the devices.
2015 * If the call to dev->init fails, the dev is removed
2016 * from the chain disconnecting the device until the
2017 * next reboot.
2018 */
2019
2020 dp = &dev_base;
2021 while ((dev = *dp) != NULL((void*)0))
2022 {
2023 dev->iflink = -1;
2024 if (dev->init && dev->init(dev))
2025 {
2026 /*
2027 * It failed to come up. Unhook it.
2028 */
2029 *dp = dev->next;
2030 synchronize_bh()((void) 0);
2031 }
2032 else
2033 {
2034 dp = &dev->next;
2035 dev->ifindex = dev_new_index();
2036 if (dev->iflink == -1)
2037 dev->iflink = dev->ifindex;
2038 dev_init_scheduler(dev);
2039 }
2040 }
2041
2042#ifdef CONFIG_PROC_FS
2043 proc_net_register(&proc_net_dev);
2044 {
2045 struct proc_dir_entry *ent = create_proc_entry("net/dev_stat", 0, 0);
2046 ent->read_proc = dev_proc_stats;
2047 }
2048#endif
2049
2050#ifdef CONFIG_NET_RADIO
2051#ifdef CONFIG_PROC_FS
2052 proc_net_register(&proc_net_wireless);
2053#endif /* CONFIG_PROC_FS */
2054#endif /* CONFIG_NET_RADIO */
2055
2056 init_bh(NET_BH0xb00bee51, net_bh);
2057
2058 dev_boot_phase = 0;
2059
2060 dev_mcast_init();
2061
2062#ifdef CONFIG_BRIDGE
2063 /*
2064 * Register any statically linked ethernet devices with the bridge
2065 */
2066 br_spacedevice_register();
2067#endif
2068
2069#ifdef CONFIG_IP_PNP
2070 ip_auto_config();
2071#endif
2072
2073 return 0;
2074}