Re: [patch] hashtable sizes for icache and dcache

David S. Miller (davem@redhat.com)
Wed, 22 Sep 1999 09:23:37 -0700


Date: Wed, 22 Sep 1999 17:08:46 +0200 (CEST)
From: Andrea Arcangeli <andrea@suse.de>

On Wed, 22 Sep 1999, Alan Cox wrote:

>Please look at DaveM's hash patches for 2.2.x

Where?

Right here, I didn't do the inode hashes in this patch, just haven't
gotten around to it. But it does do dcache, page cache, buffer cache
and TCP, with greatly improved hash functions for several of those.

And it's dynamic and doesn't crap out on low memory systems like your
patch does.

--- ./fs/buffer.c.~1~ Fri Aug 6 16:29:08 1999
+++ ./fs/buffer.c Thu Aug 12 05:05:19 1999
@@ -58,11 +58,12 @@
/*
* Hash table mask..
*/
-static unsigned long bh_hash_mask = 0;
+static unsigned int bh_hash_mask = 0;
+static unsigned int bh_hash_shift = 0;
+static struct buffer_head ** hash_table = NULL;

static int grow_buffers(int size);

-static struct buffer_head ** hash_table;
static struct buffer_head * lru_list[NR_LIST] = {NULL, };
static struct buffer_head * free_list[NR_SIZES] = {NULL, };

@@ -420,8 +421,13 @@
}
}

-#define _hashfn(dev,block) (((unsigned)(HASHDEV(dev)^block)) & bh_hash_mask)
-#define hash(dev,block) hash_table[_hashfn(dev,block)]
+/* After several hours of tedious analysis, the following hash
+ * function won. Do not mess with it... -DaveM
+ */
+#define _hashfn(dev,block) \
+ ((((dev)<<(bh_hash_shift - 6)) ^ ((dev)<<(bh_hash_shift - 9))) ^ \
+ (((block)<<(bh_hash_shift - 6)) ^ ((block) >> 13) ^ ((block) << (bh_hash_shift - 12))))
+#define hash(dev,block) hash_table[_hashfn(dev,block) & bh_hash_mask]

static inline void remove_from_hash_queue(struct buffer_head * bh)
{
@@ -1512,16 +1518,26 @@
for something that is really too small */

do {
+ unsigned long tmp;
+
nr_hash = (1UL << order) * PAGE_SIZE /
sizeof(struct buffer_head *);
+ bh_hash_mask = (nr_hash - 1);
+
+ tmp = nr_hash;
+ bh_hash_shift = 0;
+ while((tmp >>= 1UL) != 0UL)
+ bh_hash_shift++;
+
hash_table = (struct buffer_head **)
__get_free_pages(GFP_ATOMIC, order);
- } while (hash_table == NULL && --order > 4);
+ } while (hash_table == NULL && --order >= 0);
+ printk("Buffer-cache hash table entries: %d (order: %d, %ld bytes)\n",
+ nr_hash, order, (1UL<<order) * PAGE_SIZE);

if (!hash_table)
panic("Failed to allocate buffer hash table\n");
memset(hash_table, 0, nr_hash * sizeof(struct buffer_head *));
- bh_hash_mask = nr_hash-1;

bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head),
--- ./fs/dcache.c.~1~ Tue Jun 15 08:48:37 1999
+++ ./fs/dcache.c Thu Aug 12 04:39:44 1999
@@ -22,6 +22,7 @@
#include <linux/init.h>

#include <asm/uaccess.h>
+#include <asm/cache.h>

#define DCACHE_PARANOIA 1
/* #define DCACHE_DEBUG 1 */
@@ -41,11 +42,12 @@
* This hash-function tries to avoid losing too many bits of hash
* information, yet avoid using a prime hash-size or similar.
*/
-#define D_HASHBITS 10
-#define D_HASHSIZE (1UL << D_HASHBITS)
-#define D_HASHMASK (D_HASHSIZE-1)
+#define D_HASHBITS d_hash_shift
+#define D_HASHMASK d_hash_mask

-static struct list_head dentry_hashtable[D_HASHSIZE];
+static unsigned int d_hash_mask = 0;
+static unsigned int d_hash_shift = 0;
+static struct list_head *dentry_hashtable = NULL;
static LIST_HEAD(dentry_unused);

struct {
@@ -563,7 +565,7 @@

static inline struct list_head * d_hash(struct dentry * parent, unsigned long hash)
{
- hash += (unsigned long) parent;
+ hash += (unsigned long) parent / L1_CACHE_BYTES;
hash = hash ^ (hash >> D_HASHBITS) ^ (hash >> D_HASHBITS*2);
return dentry_hashtable + (hash & D_HASHMASK);
}
@@ -902,8 +904,10 @@

void __init dcache_init(void)
{
- int i;
- struct list_head *d = dentry_hashtable;
+ int i, order;
+ struct list_head *d;
+ unsigned int nr_hash;
+ unsigned long memory_size;

/*
* A constructor could be added for stable state like the lists,
@@ -921,7 +925,33 @@
if (!dentry_cache)
panic("Cannot create dentry cache");

- i = D_HASHSIZE;
+ memory_size = num_physpages << PAGE_SHIFT;
+ for (order = 5; (1UL << order) < memory_size; order++)
+ ;
+
+ do {
+ unsigned long tmp;
+
+ nr_hash = (1UL << order) * PAGE_SIZE /
+ sizeof(struct list_head);
+ d_hash_mask = (nr_hash - 1);
+
+ tmp = nr_hash;
+ d_hash_shift = 0;
+ while((tmp >>= 1UL) != 0UL)
+ d_hash_shift++;
+
+ dentry_hashtable = (struct list_head *)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while(dentry_hashtable == NULL && --order >= 0);
+ printk("DENTRY hash table entries: %d (order: %d, %ld bytes)\n",
+ nr_hash, order, (1UL<<order) * PAGE_SIZE);
+
+ if (!dentry_hashtable)
+ panic("Failed to allocate dcache hash table\n");
+
+ d = dentry_hashtable;
+ i = nr_hash;
do {
INIT_LIST_HEAD(d);
d++;
--- ./include/linux/pagemap.h.~1~ Wed Aug 11 04:43:05 1999
+++ ./include/linux/pagemap.h Fri Aug 13 05:39:50 1999
@@ -38,11 +38,15 @@
*/
#define page_cache_entry(x) (mem_map + MAP_NR(x))

-#define PAGE_HASH_BITS 12
-#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
+#define PAGE_HASH_BITS page_hash_bits
+#define PAGE_HASH_MASK page_hash_mask

extern unsigned long page_cache_size; /* # of pages currently in the hash table */
-extern struct page * page_hash_table[PAGE_HASH_SIZE];
+extern unsigned int page_hash_bits;
+extern unsigned int page_hash_mask;
+extern struct page **page_hash_table;
+
+extern void page_cache_init(unsigned long);

/*
* We use a power-of-two hash table to avoid a modulus,
@@ -53,12 +57,10 @@
static inline unsigned long _page_hashfn(struct inode * inode, unsigned long offset)
{
#define i (((unsigned long) inode)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
-#define o (offset >> PAGE_SHIFT)
-#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
- return s(i+o) & (PAGE_HASH_SIZE-1);
+#define o ((offset >> PAGE_SHIFT) + (offset & ~PAGE_MASK))
+ return ((i+o) & PAGE_HASH_MASK);
#undef i
#undef o
-#undef s
}

#define page_hash(inode,offset) (page_hash_table+_page_hashfn(inode,offset))
--- ./include/net/tcp.h.~1~ Wed Aug 11 04:48:56 1999
+++ ./include/net/tcp.h Fri Aug 13 05:47:26 1999
@@ -24,22 +24,18 @@
#include <net/checksum.h>

/* This is for all connections with a full identity, no wildcards.
- * New scheme, half the table is for TIME_WAIT, the other half is
- * for the rest. I'll experiment with dynamic table growth later.
+ * Half of the table is for TIME_WAIT, the other half is for the
+ * rest.
+ *
+ * This needs to be shared by v4 and v6 because the lookup and hashing
+ * code needs to work with different AF's yet the port space is
+ * shared.
*/
-#define TCP_HTABLE_SIZE 512
+extern unsigned int tcp_ehash_size;
+extern struct sock **tcp_ehash;

/* This is for listening sockets, thus all sockets which possess wildcards. */
#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
-
-/* This is for all sockets, to keep track of the local port allocations. */
-#define TCP_BHTABLE_SIZE 512
-
-/* tcp_ipv4.c: These need to be shared by v4 and v6 because the lookup
- * and hashing code needs to work with different AF's yet
- * the port space is shared.
- */
-extern struct sock *tcp_established_hash[TCP_HTABLE_SIZE];
extern struct sock *tcp_listening_hash[TCP_LHTABLE_SIZE];

/* There are a few simple rules, which allow for local port reuse by
@@ -81,7 +77,8 @@
struct tcp_bind_bucket **pprev;
};

-extern struct tcp_bind_bucket *tcp_bound_hash[TCP_BHTABLE_SIZE];
+extern unsigned int tcp_bhash_size;
+extern struct tcp_bind_bucket **tcp_bhash;
extern kmem_cache_t *tcp_bucket_cachep;
extern struct tcp_bind_bucket *tcp_bucket_create(unsigned short snum);
extern void tcp_bucket_unlock(struct sock *sk);
@@ -109,7 +106,7 @@
/* These are AF independent. */
static __inline__ int tcp_bhashfn(__u16 lport)
{
- return (lport & (TCP_BHTABLE_SIZE - 1));
+ return (lport & (tcp_bhash_size - 1));
}

/* This is a TIME_WAIT bucket. It works around the memory consumption
--- ./init/main.c.~1~ Fri Aug 6 16:29:23 1999
+++ ./init/main.c Thu Aug 12 04:59:13 1999
@@ -1207,6 +1207,7 @@
dcache_init();
vma_init();
buffer_init(memory_end-memory_start);
+ page_cache_init(memory_end-memory_start);
signals_init();
inode_init();
file_table_init();
--- ./mm/filemap.c.~1~ Tue Jun 15 09:17:48 1999
+++ ./mm/filemap.c Thu Aug 12 05:16:02 1999
@@ -20,6 +20,7 @@
#include <linux/file.h>
#include <linux/swapctl.h>
#include <linux/slab.h>
+#include <linux/init.h>

#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -32,7 +33,8 @@
*/

unsigned long page_cache_size = 0;
-struct page * page_hash_table[PAGE_HASH_SIZE];
+unsigned int page_hash_bits, page_hash_mask;
+struct page **page_hash_table;

/*
* Define a request structure for outstanding page write requests
@@ -1725,4 +1727,35 @@
kmem_cache_free(pio_request_cache, p);
}
}
+}
+
+void __init page_cache_init(unsigned long memory_size)
+{
+ unsigned long htable_size;
+ long order;
+
+ htable_size = memory_size >> PAGE_SHIFT;
+ htable_size *= sizeof(struct page *);
+ for(order = 0; (PAGE_SIZE << order) < htable_size; order++)
+ ;
+
+ do {
+ unsigned long tmp = (PAGE_SIZE << order) / sizeof(struct page *);
+
+ page_hash_mask = (tmp - 1UL);
+
+ page_hash_bits = 0;
+ while((tmp >>= 1UL) != 0UL)
+ page_hash_bits++;
+
+ page_hash_table = (struct page **)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while(page_hash_table == NULL && --order >= 0L);
+
+ printk("Page-cache hash table entries: %d (order: %ld, %ld bytes)\n",
+ (1 << page_hash_bits), order, (PAGE_SIZE << order));
+ if (!page_hash_table)
+ panic("Failed to allocate page hash table\n");
+ memset(page_hash_table, 0,
+ (PAGE_HASH_MASK + 1UL) * sizeof(struct page *));
}
--- ./net/ipv4/tcp_input.c.~1~ Sun Aug 8 01:44:24 1999
+++ ./net/ipv4/tcp_input.c Fri Aug 13 04:40:55 1999
@@ -1042,7 +1043,7 @@
sk->prot->inuse--;

/* Step 4: Hash TW into TIMEWAIT half of established hash table. */
- head = &tcp_established_hash[sk->hashent + (TCP_HTABLE_SIZE/2)];
+ head = &tcp_ehash[sk->hashent + (tcp_ehash_size/2)];
sktw = (struct sock *)tw;
if((sktw->next = *head) != NULL)
(*head)->pprev = &sktw->next;
--- ./net/ipv4/tcp_ipv4.c.~1~ Thu Aug 12 08:35:18 1999
+++ ./net/ipv4/tcp_ipv4.c Fri Aug 13 04:31:33 1999
@@ -90,12 +90,14 @@
* First half of the table is for sockets not in TIME_WAIT, second half
* is for TIME_WAIT sockets only.
*/
-struct sock *tcp_established_hash[TCP_HTABLE_SIZE];
+unsigned int tcp_ehash_size;
+struct sock **tcp_ehash;

/* Ok, let's try this, I give up, we do need a local binding
* TCP hash as well as the others for fast bind/connect.
*/
-struct tcp_bind_bucket *tcp_bound_hash[TCP_BHTABLE_SIZE];
+unsigned int tcp_bhash_size;
+struct tcp_bind_bucket **tcp_bhash;

/* All sockets in TCP_LISTEN state will be in here. This is the only table
* where wildcard'd TCP sockets can exist. Hash function here is just local
@@ -117,7 +119,7 @@
static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
__u32 faddr, __u16 fport)
{
- return ((laddr ^ lport) ^ (faddr ^ fport)) & ((TCP_HTABLE_SIZE/2) - 1);
+ return ((laddr ^ lport) ^ (faddr ^ fport)) & ((tcp_ehash_size/2) - 1);
}

static __inline__ int tcp_sk_hashfn(struct sock *sk)
@@ -140,7 +142,7 @@
tb = kmem_cache_alloc(tcp_bucket_cachep, SLAB_ATOMIC);
if(tb != NULL) {
struct tcp_bind_bucket **head =
- &tcp_bound_hash[tcp_bhashfn(snum)];
+ &tcp_bhash[tcp_bhashfn(snum)];
tb->port = snum;
tb->fastreuse = 0;
tb->owners = NULL;
@@ -162,7 +164,7 @@
{
struct tcp_bind_bucket *tb;

- tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ tb = tcp_bhash[tcp_bhashfn(snum)];
for( ; (tb && (tb->port != snum)); tb = tb->next)
;
if (tb == NULL) {
@@ -181,7 +183,7 @@
#ifdef CONFIG_IP_TRANSPARENT_PROXY
if (child->num != sk->num) {
unsigned short snum = ntohs(child->num);
- for(tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ for(tb = tcp_bhash[tcp_bhashfn(snum)];
tb && tb->port != snum;
tb = tb->next)
;
@@ -220,7 +222,7 @@
do { rover++;
if ((rover < low) || (rover > high))
rover = low;
- tb = tcp_bound_hash[tcp_bhashfn(rover)];
+ tb = tcp_bhash[tcp_bhashfn(rover)];
for ( ; tb; tb = tb->next)
if (tb->port == rover)
goto next;
@@ -237,7 +239,7 @@
snum = rover;
tb = NULL;
} else {
- for (tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ for (tb = tcp_bhash[tcp_bhashfn(snum)];
tb != NULL;
tb = tb->next)
if (tb->port == snum)
@@ -328,7 +330,7 @@
if(sk->state == TCP_LISTEN)
skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
else
- skp = &tcp_established_hash[(sk->hashent = tcp_sk_hashfn(sk))];
+ skp = &tcp_ehash[(sk->hashent = tcp_sk_hashfn(sk))];

if((sk->next = *skp) != NULL)
(*skp)->pprev = &sk->next;
@@ -421,7 +423,7 @@
* have wildcards anyways.
*/
hash = tcp_hashfn(daddr, hnum, saddr, sport);
- for(sk = tcp_established_hash[hash]; sk; sk = sk->next) {
+ for(sk = tcp_ehash[hash]; sk; sk = sk->next) {
if(TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) {
if (sk->state == TCP_ESTABLISHED)
TCP_RHASH(sport) = sk;
@@ -429,7 +431,7 @@
}
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- for(sk = tcp_established_hash[hash+(TCP_HTABLE_SIZE/2)]; sk; sk = sk->next)
+ for(sk = tcp_ehash[hash+(tcp_ehash_size/2)]; sk; sk = sk->next)
if(TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit;
sk = tcp_v4_lookup_listener(daddr, hnum, dif);
@@ -469,7 +471,7 @@

/* This code must run only from NET_BH. */
{
- struct tcp_bind_bucket *tb = tcp_bound_hash[tcp_bhashfn(hnum)];
+ struct tcp_bind_bucket *tb = tcp_bhash[tcp_bhashfn(hnum)];
for( ; (tb && tb->port != hnum); tb = tb->next)
;
if(tb == NULL)
@@ -510,7 +512,7 @@
}
next:
if(firstpass--) {
- struct tcp_bind_bucket *tb = tcp_bound_hash[tcp_bhashfn(hpnum)];
+ struct tcp_bind_bucket *tb = tcp_bhash[tcp_bhashfn(hpnum)];
for( ; (tb && tb->port != hpnum); tb = tb->next)
;
if(tb) {
@@ -546,7 +548,7 @@

/* Freeze the hash while we snoop around. */
SOCKHASH_LOCK();
- tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ tb = tcp_bhash[tcp_bhashfn(snum)];
for(; tb; tb = tb->next) {
if(tb->port == snum && tb->owners != NULL) {
/* Almost certainly the re-use port case, search the real hashes
@@ -1788,7 +1794,7 @@

static void __tcp_v4_rehash(struct sock *sk)
{
- struct sock **skp = &tcp_established_hash[(sk->hashent = tcp_sk_hashfn(sk))];
+ struct sock **skp = &tcp_ehash[(sk->hashent = tcp_sk_hashfn(sk))];

SOCKHASH_LOCK();
if(sk->pprev) {
--- ./net/ipv4/tcp_timer.c.~1~ Sun Jun 20 13:18:18 1999
+++ ./net/ipv4/tcp_timer.c Thu Aug 12 15:09:52 1999
@@ -363,8 +363,8 @@
int count = 0;
int i;

- for(i = chain_start; i < (chain_start + ((TCP_HTABLE_SIZE/2) >> 2)); i++) {
- struct sock *sk = tcp_established_hash[i];
+ for(i = chain_start; i < (chain_start + ((tcp_ehash_size/2) >> 2)); i++) {
+ struct sock *sk = tcp_ehash[i];
while(sk) {
if(!atomic_read(&sk->sock_readers) && sk->keepopen) {
count += tcp_keepopen_proc(sk);
@@ -375,8 +375,8 @@
}
}
out:
- chain_start = ((chain_start + ((TCP_HTABLE_SIZE/2)>>2)) &
- ((TCP_HTABLE_SIZE/2) - 1));
+ chain_start = ((chain_start + ((tcp_ehash_size/2)>>2)) &
+ ((tcp_ehash_size/2) - 1));
}

/*
--- ./net/ipv4/tcp.c.~1~ Sun Aug 8 20:14:15 1999
+++ ./net/ipv4/tcp.c Thu Aug 12 15:17:39 1999
@@ -1798,6 +1798,8 @@
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
+ unsigned long goal;
+ int order;

if(sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
@@ -1823,4 +1825,43 @@
NULL, NULL);
if(!tcp_timewait_cachep)
panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
+
+ /* Size and allocate TCP hash tables. */
+ goal = num_physpages >> (20 - PAGE_SHIFT);
+ for (order = 0; (1UL << order) < goal; order++)
+ ;
+ do {
+ tcp_ehash_size = (1UL << order) * PAGE_SIZE /
+ sizeof(struct sock *);
+ tcp_ehash = (struct sock **)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while (tcp_ehash == NULL && --order >= 0);
+
+ if (!tcp_ehash)
+ panic("Failed to allocate TCP established hash table\n");
+ memset(tcp_ehash, 0, tcp_ehash_size * sizeof(struct sock *));
+
+ goal = (((1UL << order) * PAGE_SIZE) / sizeof(struct tcp_bind_bucket *));
+ if (goal > (64 * 1024)) {
+ /* Don't size the bind-hash larger than the port
+ * space, that is just silly.
+ */
+ goal = (((64 * 1024) * sizeof(struct tcp_bind_bucket *)) / PAGE_SIZE);
+ for (order = 0; (1UL << order) < goal; order++)
+ ;
+ }
+
+ do {
+ tcp_bhash_size = (1UL << order) * PAGE_SIZE /
+ sizeof(struct tcp_bind_bucket *);
+ tcp_bhash = (struct tcp_bind_bucket **)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while (tcp_bhash == NULL && --order >= 0);
+
+ if (!tcp_bhash)
+ panic("Failed to allocate TCP bind hash table\n");
+ memset(tcp_bhash, 0, tcp_bhash_size * sizeof(struct tcp_bind_bucket *));
+
+ printk("TCP: Hash tables configured (ehash %d bhash %d)\n",
+ tcp_ehash_size, tcp_bhash_size);
}
--- ./net/ipv6/tcp_ipv6.c.~1~ Thu Aug 12 08:35:24 1999
+++ ./net/ipv6/tcp_ipv6.c Fri Aug 13 04:31:29 1999
@@ -67,7 +67,7 @@
int hashent = (lport ^ fport);

hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
- return (hashent & ((TCP_HTABLE_SIZE/2) - 1));
+ return (hashent & ((tcp_ehash_size/2) - 1));
}

static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
@@ -98,7 +98,7 @@
do { rover++;
if ((rover < low) || (rover > high))
rover = low;
- tb = tcp_bound_hash[tcp_bhashfn(rover)];
+ tb = tcp_bhash[tcp_bhashfn(rover)];
for ( ; tb; tb = tb->next)
if (tb->port == rover)
goto next;
@@ -115,7 +115,7 @@
snum = rover;
tb = NULL;
} else {
- for (tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ for (tb = tcp_bhash[tcp_bhashfn(snum)];
tb != NULL;
tb = tb->next)
if (tb->port == snum)
@@ -191,7 +191,7 @@
if(sk->state == TCP_LISTEN)
skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
else
- skp = &tcp_established_hash[(sk->hashent = tcp_v6_sk_hashfn(sk))];
+ skp = &tcp_ehash[(sk->hashent = tcp_v6_sk_hashfn(sk))];

SOCKHASH_LOCK();
if((sk->next = *skp) != NULL)
@@ -273,7 +273,7 @@
* have wildcards anyways.
*/
hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
- for(sk = tcp_established_hash[hash]; sk; sk = sk->next) {
+ for(sk = tcp_ehash[hash]; sk; sk = sk->next) {
/* For IPV6 do the cheaper port and family tests first. */
if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif)) {
if (sk->state == TCP_ESTABLISHED)
@@ -282,7 +282,7 @@
}
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- for(sk = tcp_established_hash[hash+(TCP_HTABLE_SIZE/2)]; sk; sk = sk->next) {
+ for(sk = tcp_ehash[hash+(tcp_ehash_size/2)]; sk; sk = sk->next) {
if(*((__u32 *)&(sk->dport)) == ports &&
sk->family == PF_INET6) {
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
@@ -333,7 +333,7 @@

/* Freeze the hash while we snoop around. */
SOCKHASH_LOCK();
- tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ tb = tcp_bhash[tcp_bhashfn(snum)];
for(; tb; tb = tb->next) {
if(tb->port == snum && tb->owners != NULL) {
/* Almost certainly the re-use port case, search the real hashes
--- ./net/netsyms.c.~1~ Sat Aug 7 23:58:35 1999
+++ ./net/netsyms.c Thu Aug 12 15:19:33 1999
@@ -275,9 +275,11 @@
EXPORT_SYMBOL(inet_recvmsg);

/* Socket demultiplexing. */
-EXPORT_SYMBOL(tcp_established_hash);
+EXPORT_SYMBOL(tcp_ehash_size);
+EXPORT_SYMBOL(tcp_ehash);
EXPORT_SYMBOL(tcp_listening_hash);
-EXPORT_SYMBOL(tcp_bound_hash);
+EXPORT_SYMBOL(tcp_bhash_size);
+EXPORT_SYMBOL(tcp_bhash);
EXPORT_SYMBOL(udp_hash);

EXPORT_SYMBOL(destroy_sock);

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/