diff --git a/auto/modules b/auto/modules index 5e9d4d5..f875afb 100644 --- a/auto/modules +++ b/auto/modules @@ -210,6 +210,12 @@ if [ $HTTP_LIMIT_ZONE = YES ]; then HTTP_SRCS="$HTTP_SRCS $HTTP_LIMIT_ZONE_SRCS" fi +if [ $HTTP_LIMIT_VAR = YES ]; then + have=NGX_HTTP_LIMIT_VAR . auto/have + HTTP_MODULES="$HTTP_MODULES $HTTP_LIMIT_VAR_MODULE" + HTTP_SRCS="$HTTP_SRCS $HTTP_LIMIT_VAR_SRCS" +fi + if [ $HTTP_LIMIT_REQ = YES ]; then HTTP_MODULES="$HTTP_MODULES $HTTP_LIMIT_REQ_MODULE" HTTP_SRCS="$HTTP_SRCS $HTTP_LIMIT_REQ_SRCS" diff --git a/auto/options b/auto/options index 8ec692e..299574f 100644 --- a/auto/options +++ b/auto/options @@ -78,6 +78,7 @@ HTTP_FASTCGI=YES HTTP_PERL=NO HTTP_MEMCACHED=YES HTTP_LIMIT_ZONE=YES +HTTP_LIMIT_VAR=YES HTTP_LIMIT_REQ=YES HTTP_EMPTY_GIF=YES HTTP_BROWSER=YES @@ -200,6 +201,7 @@ do --without-http_fastcgi_module) HTTP_FASTCGI=NO ;; --without-http_memcached_module) HTTP_MEMCACHED=NO ;; --without-http_limit_zone_module) HTTP_LIMIT_ZONE=NO ;; + --without-http_limit_var_module) HTTP_LIMIT_VAR=NO ;; --without-http_limit_req_module) HTTP_LIMIT_REQ=NO ;; --without-http_empty_gif_module) HTTP_EMPTY_GIF=NO ;; --without-http_browser_module) HTTP_BROWSER=NO ;; @@ -322,6 +324,7 @@ cat << END --without-http_fastcgi_module disable ngx_http_fastcgi_module --without-http_memcached_module disable ngx_http_memcached_module --without-http_limit_zone_module disable ngx_http_limit_zone_module + --without-http_limit_var_module disable ngx_http_limit_var_module --without-http_limit_req_module disable ngx_http_limit_req_module --without-http_empty_gif_module disable ngx_http_empty_gif_module --without-http_browser_module disable ngx_http_browser_module diff --git a/auto/sources b/auto/sources index 4c14634..4f65033 100644 --- a/auto/sources +++ b/auto/sources @@ -23,8 +23,11 @@ CORE_DEPS="src/core/nginx.h \ src/core/ngx_crc.h \ src/core/ngx_crc32.h \ src/core/ngx_md5.h \ + src/core/ngx_lookup3.h \ src/core/ngx_sha1.h \ src/core/ngx_rbtree.h \ + src/core/ngx_rbtreehash.h \ + src/core/ngx_flathash.h \ src/core/ngx_radix_tree.h \ src/core/ngx_slab.h \ src/core/ngx_times.h \ @@ -50,7 +53,10 @@ CORE_SRCS="src/core/nginx.c \ src/core/ngx_inet.c \ src/core/ngx_file.c \ src/core/ngx_crc32.c \ + src/core/ngx_lookup3.c \ src/core/ngx_rbtree.c \ + src/core/ngx_rbtreehash.c \ + src/core/ngx_flathash.c \ src/core/ngx_radix_tree.c \ src/core/ngx_slab.c \ src/core/ngx_times.c \ @@ -412,6 +418,10 @@ HTTP_LIMIT_ZONE_MODULE=ngx_http_limit_zone_module HTTP_LIMIT_ZONE_SRCS=src/http/modules/ngx_http_limit_zone_module.c +HTTP_LIMIT_VAR_MODULE=ngx_http_limit_var_module +HTTP_LIMIT_VAR_SRCS=src/http/modules/ngx_http_limit_var_module.c + + HTTP_LIMIT_REQ_MODULE=ngx_http_limit_req_module HTTP_LIMIT_REQ_SRCS=src/http/modules/ngx_http_limit_req_module.c diff --git a/src/core/ngx_core.h b/src/core/ngx_core.h index d5f18b8..809bd71 100644 --- a/src/core/ngx_core.h +++ b/src/core/ngx_core.h @@ -75,6 +75,9 @@ typedef void (*ngx_connection_handler_pt)(ngx_connection_t *c); #include #include #include +#include +#include +#include #define LF (u_char) 10 diff --git a/src/core/ngx_flathash.c b/src/core/ngx_flathash.c new file mode 100644 index 0000000..206036e --- /dev/null +++ b/src/core/ngx_flathash.c @@ -0,0 +1,83 @@ + +/* + * Copyright (C) Kirill A. Korinskiy + */ + +#include +#include + + +typedef struct { + u_char data[1]; +} ngx_flathash_node_t; + + +#define ngx_flathash_hashsize(n) ((uint32_t)1 << (n)) +#define ngx_flathash_hashmask(n) (ngx_flathash_hashsize(n) - 1) +#define ngx_flathash_hashfunc(a, b) ngx_lookup3_hashlittle(a, b, 0x715517) + + +/* + * Really simple index + */ +static inline ngx_flathash_node_t * +ngx_flathash_index(ngx_flathash_t *hashtable, uint32_t hash) { + return (ngx_flathash_node_t *)(hashtable->data + + ((offsetof(ngx_flathash_node_t, data) + + hashtable->value_len) + * hash)); +}; + + +void * +ngx_flathash_get(ngx_flathash_t *hashtable, ngx_str_t *key) +{ + uint32_t hash; + ngx_flathash_node_t *rn; + + hash = ngx_flathash_hashfunc(key->data, key->len) + & ngx_flathash_hashmask(hashtable->bits); + + rn = ngx_flathash_index(hashtable, hash); + + return rn->data; +} + + +size_t +ngx_flathash_need_memory(size_t length, size_t size) +{ + uint32_t bits; + size_t i; + + for (bits = 0, i = size; i; i >>= 1, bits++); + + bits += 2; + + return offsetof(ngx_flathash_t, data) + + ((offsetof(ngx_flathash_node_t, data) + + length) + * 1<value_len = length; + + hashtable->bits = 0; + + for (i = size; i; i >>= 1, hashtable->bits++); + + hashtable->bits += 2; + + hashtable->length = 1 << hashtable->bits; + + + ngx_memzero(hashtable->data, hashtable->length); + + return NGX_OK; +} diff --git a/src/core/ngx_flathash.h b/src/core/ngx_flathash.h new file mode 100644 index 0000000..990018e --- /dev/null +++ b/src/core/ngx_flathash.h @@ -0,0 +1,26 @@ + +/* + * Copyright (C) Kirill A. Korinskiy + */ + +#ifndef _NGX_FLATHASH_H_INCLUDED_ +#define _NGX_FLATHASH_H_INCLUDED_ + +#include +#include + +typedef struct { + size_t value_len; + size_t length; + u_char bits; + u_char data[1]; +} ngx_flathash_t; + +void *ngx_flathash_get(ngx_flathash_t *hashtable, ngx_str_t *key); + +size_t ngx_flathash_need_memory(size_t length, size_t size); + +ngx_int_t ngx_flathash_init(ngx_flathash_t *hashtable, size_t length, size_t size); + + +#endif /* _NGX_FLATHASH_H_INCLUDED_ */ diff --git a/src/core/ngx_lookup3.c b/src/core/ngx_lookup3.c new file mode 100644 index 0000000..190b64b --- /dev/null +++ b/src/core/ngx_lookup3.c @@ -0,0 +1,761 @@ +/* + * ngx_lookup3.c, by Kirill A. Korinskiy + */ + +#include +#include + +/* +------------------------------------------------------------------------------- +lookup3.c, by Bob Jenkins, May 2006, Public Domain. + +These are functions for producing 32-bit hashes for hash table lookup. +hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() +are externally useful functions. Routines to test the hash are included +if SELF_TEST is defined. You can use this free for any purpose. It's in +the public domain. It has no warranty. + +You probably want to use hashlittle(). hashlittle() and hashbig() +hash byte arrays. hashlittle() is is faster than hashbig() on +little-endian machines. Intel and AMD are little-endian machines. +On second thought, you probably want hashlittle2(), which is identical to +hashlittle() except it returns two 32-bit hashes for the price of one. +You could implement hashbig2() if you wanted but I haven't bothered here. + +If you want to find a hash of, say, exactly 7 integers, do + a = i1; b = i2; c = i3; + mix(a,b,c); + a += i4; b += i5; c += i6; + mix(a,b,c); + a += i7; + final(a,b,c); +then use c as the hash value. If you have a variable length array of +4-byte integers to hash, use hashword(). If you have a byte array (like +a character string), use hashlittle(). If you have several byte arrays, or +a mix of things, see the comments above hashlittle(). + +Why is this so big? I read 12 bytes at a time into 3 4-byte integers, +then mix those integers. This is fast (you can do a lot more thorough +mixing with 12*3 instructions on 3 integers than you can with 3 instructions +on 1 byte), but shoehorning those bytes into integers efficiently is messy. +------------------------------------------------------------------------------- +*/ + +/* + * My best guess at if you are big-endian or little-endian. This may + * need adjustment. + */ +#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ + __BYTE_ORDER == __LITTLE_ENDIAN) || \ + (defined(i386) || defined(__i386__) || defined(__i486__) || \ + defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL)) +# define HASH_LITTLE_ENDIAN 1 +# define HASH_BIG_ENDIAN 0 +#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ + __BYTE_ORDER == __BIG_ENDIAN) || \ + (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel)) +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 1 +#else +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 0 +#endif + +#define hashsize(n) ((uint32_t)1<<(n)) +#define hashmask(n) (hashsize(n)-1) +#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) + +/* +------------------------------------------------------------------------------- +mix -- mix 3 32-bit values reversibly. + +This is reversible, so any information in (a,b,c) before mix() is +still in (a,b,c) after mix(). + +If four pairs of (a,b,c) inputs are run through mix(), or through +mix() in reverse, there are at least 32 bits of the output that +are sometimes the same for one pair and different for another pair. +This was tested for: +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that +satisfy this are + 4 6 8 16 19 4 + 9 15 3 18 27 15 + 14 9 3 7 17 3 +Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing +for "differ" defined as + with a one-bit base and a two-bit delta. I +used http://burtleburtle.net/bob/hash/avalanche.html to choose +the operations, constants, and arrangements of the variables. + +This does not achieve avalanche. There are input bits of (a,b,c) +that fail to affect some output bits of (a,b,c), especially of a. The +most thoroughly mixed value is c, but it doesn't really even achieve +avalanche in c. + +This allows some parallelism. Read-after-writes are good at doubling +the number of bits affected, so the goal of mixing pulls in the opposite +direction as the goal of parallelism. I did what I could. Rotates +seem to cost as much as shifts on every machine I could lay my hands +on, and rotates are much kinder to the top and bottom bits, so I used +rotates. +------------------------------------------------------------------------------- +*/ +#define mix(a,b,c) \ +{ \ + a -= c; a ^= rot(c, 4); c += b; \ + b -= a; b ^= rot(a, 6); a += c; \ + c -= b; c ^= rot(b, 8); b += a; \ + a -= c; a ^= rot(c,16); c += b; \ + b -= a; b ^= rot(a,19); a += c; \ + c -= b; c ^= rot(b, 4); b += a; \ +} + +/* +------------------------------------------------------------------------------- +final -- final mixing of 3 32-bit values (a,b,c) into c + +Pairs of (a,b,c) values differing in only a few bits will usually +produce values of c that look totally different. This was tested for +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +These constants passed: + 14 11 25 16 4 14 24 + 12 14 25 16 4 14 24 +and these came close: + 4 8 15 26 3 22 24 + 10 8 15 26 3 22 24 + 11 8 15 26 3 22 24 +------------------------------------------------------------------------------- +*/ +#define final(a,b,c) \ +{ \ + c ^= b; c -= rot(b,14); \ + a ^= c; a -= rot(c,11); \ + b ^= a; b -= rot(a,25); \ + c ^= b; c -= rot(b,16); \ + a ^= c; a -= rot(c,4); \ + b ^= a; b -= rot(a,14); \ + c ^= b; c -= rot(b,24); \ +} + +/* +-------------------------------------------------------------------- + This works on all machines. To be useful, it requires + -- that the key be an array of uint32_t's, and + -- that the length be the number of uint32_t's in the key + + The function hashword() is identical to hashlittle() on little-endian + machines, and identical to hashbig() on big-endian machines, + except that the length has to be measured in uint32_ts rather than in + bytes. hashlittle() is more complicated than hashword() only because + hashlittle() has to dance around fitting the key bytes into registers. +-------------------------------------------------------------------- +*/ +uint32_t ngx_lookup3_hashword(const uint32_t *k, /* the key, an array of uint32_t values */ + size_t length, /* the length of the key, in uint32_ts */ + uint32_t initval) /* the previous hash, or an arbitrary value */ +{ + uint32_t a,b,c; + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval; + + /*------------------------------------------------- handle most of the key */ + while (length > 3) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 3; + k += 3; + } + + /*------------------------------------------- handle the last 3 uint32_t's */ + switch(length) /* all the case statements fall through */ + { + case 3 : c+=k[2]; + case 2 : b+=k[1]; + case 1 : a+=k[0]; + final(a,b,c); + case 0: /* case 0: nothing left to add */ + break; + } + /*------------------------------------------------------ report the result */ + return c; +} + + +/* +-------------------------------------------------------------------- +hashword2() -- same as hashword(), but take two seeds and return two +32-bit values. pc and pb must both be nonnull, and *pc and *pb must +both be initialized with seeds. If you pass in (*pb)==0, the output +(*pc) will be the same as the return value from hashword(). +-------------------------------------------------------------------- +*/ +void ngx_lookup3_hashword2 (const uint32_t *k, /* the key, an array of uint32_t values */ + size_t length, /* the length of the key, in uint32_ts */ + uint32_t *pc, /* IN: seed OUT: primary hash value */ + uint32_t *pb) /* IN: more seed OUT: secondary hash value */ +{ + uint32_t a,b,c; + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)(length<<2)) + *pc; + c += *pb; + + /*------------------------------------------------- handle most of the key */ + while (length > 3) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 3; + k += 3; + } + + /*------------------------------------------- handle the last 3 uint32_t's */ + switch(length) /* all the case statements fall through */ + { + case 3 : c+=k[2]; + case 2 : b+=k[1]; + case 1 : a+=k[0]; + final(a,b,c); + case 0: /* case 0: nothing left to add */ + break; + } + /*------------------------------------------------------ report the result */ + *pc=c; *pb=b; +} + + +/* +------------------------------------------------------------------------------- +hashlittle() -- hash a variable-length key into a 32-bit value + k : the key (the unaligned variable-length array of bytes) + length : the length of the key, counting by bytes + initval : can be any 4-byte value +Returns a 32-bit value. Every bit of the key affects every bit of +the return value. Two keys differing by one or two bits will have +totally different hash values. + +The best hash table sizes are powers of 2. There is no need to do +mod a prime (mod is sooo slow!). If you need less than 32 bits, +use a bitmask. For example, if you need only 10 bits, do + h = (h & hashmask(10)); +In which case, the hash table should have hashsize(10) elements. + +If you are hashing n strings (uint8_t **)k, do it like this: + for (i=0, h=0; i 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]&0xffffff" actually reads beyond the end of the string, but + * then masks off the part it's not allowed to read. Because the + * string is aligned, the masked-off tail is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff; a+=k[0]; break; + case 6 : b+=k[1]&0xffff; a+=k[0]; break; + case 5 : b+=k[1]&0xff; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff; break; + case 2 : a+=k[0]&0xffff; break; + case 1 : a+=k[0]&0xff; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + const uint8_t *k8; + + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 1 : a+=k8[0]; break; + case 0 : return c; + } + +#endif /* !valgrind */ + + } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { + const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ + const uint8_t *k8; + + /*--------------- all but last block: aligned reads and different mixing */ + while (length > 12) + { + a += k[0] + (((uint32_t)k[1])<<16); + b += k[2] + (((uint32_t)k[3])<<16); + c += k[4] + (((uint32_t)k[5])<<16); + mix(a,b,c); + length -= 12; + k += 6; + } + + /*----------------------------- handle the last (probably partial) block */ + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[4]+(((uint32_t)k[5])<<16); + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=k[4]; + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=k[2]; + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=k[0]; + break; + case 1 : a+=k8[0]; + break; + case 0 : return c; /* zero length requires no mixing */ + } + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + a += ((uint32_t)k[1])<<8; + a += ((uint32_t)k[2])<<16; + a += ((uint32_t)k[3])<<24; + b += k[4]; + b += ((uint32_t)k[5])<<8; + b += ((uint32_t)k[6])<<16; + b += ((uint32_t)k[7])<<24; + c += k[8]; + c += ((uint32_t)k[9])<<8; + c += ((uint32_t)k[10])<<16; + c += ((uint32_t)k[11])<<24; + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=((uint32_t)k[11])<<24; + case 11: c+=((uint32_t)k[10])<<16; + case 10: c+=((uint32_t)k[9])<<8; + case 9 : c+=k[8]; + case 8 : b+=((uint32_t)k[7])<<24; + case 7 : b+=((uint32_t)k[6])<<16; + case 6 : b+=((uint32_t)k[5])<<8; + case 5 : b+=k[4]; + case 4 : a+=((uint32_t)k[3])<<24; + case 3 : a+=((uint32_t)k[2])<<16; + case 2 : a+=((uint32_t)k[1])<<8; + case 1 : a+=k[0]; + break; + case 0 : return c; + } + } + + final(a,b,c); + return c; +} + + +/* + * hashlittle2: return 2 32-bit hash values + * + * This is identical to hashlittle(), except it returns two 32-bit hash + * values instead of just one. This is good enough for hash table + * lookup with 2^^64 buckets, or if you want a second hash if you're not + * happy with the first, or if you want a probably-unique 64-bit ID for + * the key. *pc is better mixed than *pb, so use *pc first. If you want + * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)". + */ +void ngx_lookup3_hashlittle2(const void *key, /* the key to hash */ + size_t length, /* length of the key */ + uint32_t *pc, /* IN: primary initval, OUT: primary hash */ + uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */ +{ + uint32_t a,b,c; /* internal state */ + union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */ + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)length) + *pc; + c += *pb; + + u.ptr = key; + if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]&0xffffff" actually reads beyond the end of the string, but + * then masks off the part it's not allowed to read. Because the + * string is aligned, the masked-off tail is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff; a+=k[0]; break; + case 6 : b+=k[1]&0xffff; a+=k[0]; break; + case 5 : b+=k[1]&0xff; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff; break; + case 2 : a+=k[0]&0xffff; break; + case 1 : a+=k[0]&0xff; break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 1 : a+=k8[0]; break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + +#endif /* !valgrind */ + + } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { + const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ + const uint8_t *k8; + + /*--------------- all but last block: aligned reads and different mixing */ + while (length > 12) + { + a += k[0] + (((uint32_t)k[1])<<16); + b += k[2] + (((uint32_t)k[3])<<16); + c += k[4] + (((uint32_t)k[5])<<16); + mix(a,b,c); + length -= 12; + k += 6; + } + + /*----------------------------- handle the last (probably partial) block */ + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[4]+(((uint32_t)k[5])<<16); + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=k[4]; + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=k[2]; + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=k[0]; + break; + case 1 : a+=k8[0]; + break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + a += ((uint32_t)k[1])<<8; + a += ((uint32_t)k[2])<<16; + a += ((uint32_t)k[3])<<24; + b += k[4]; + b += ((uint32_t)k[5])<<8; + b += ((uint32_t)k[6])<<16; + b += ((uint32_t)k[7])<<24; + c += k[8]; + c += ((uint32_t)k[9])<<8; + c += ((uint32_t)k[10])<<16; + c += ((uint32_t)k[11])<<24; + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=((uint32_t)k[11])<<24; + case 11: c+=((uint32_t)k[10])<<16; + case 10: c+=((uint32_t)k[9])<<8; + case 9 : c+=k[8]; + case 8 : b+=((uint32_t)k[7])<<24; + case 7 : b+=((uint32_t)k[6])<<16; + case 6 : b+=((uint32_t)k[5])<<8; + case 5 : b+=k[4]; + case 4 : a+=((uint32_t)k[3])<<24; + case 3 : a+=((uint32_t)k[2])<<16; + case 2 : a+=((uint32_t)k[1])<<8; + case 1 : a+=k[0]; + break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + } + + final(a,b,c); + *pc=c; *pb=b; +} + + + +/* + * hashbig(): + * This is the same as hashword() on big-endian machines. It is different + * from hashlittle() on all machines. hashbig() takes advantage of + * big-endian byte ordering. + */ +uint32_t ngx_lookup3_hashbig(const void *key, size_t length, uint32_t initval) +{ + uint32_t a,b,c; + union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */ + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)length) + initval; + + u.ptr = key; + if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]<<8" actually reads beyond the end of the string, but + * then shifts out the part it's not allowed to read. Because the + * string is aligned, the illegal read is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff00; a+=k[0]; break; + case 6 : b+=k[1]&0xffff0000; a+=k[0]; break; + case 5 : b+=k[1]&0xff000000; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff00; break; + case 2 : a+=k[0]&0xffff0000; break; + case 1 : a+=k[0]&0xff000000; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) /* all the case statements fall through */ + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<8; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<16; /* fall through */ + case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */ + case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */ + case 1 : a+=((uint32_t)k8[0])<<24; break; + case 0 : return c; + } + +#endif /* !VALGRIND */ + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += ((uint32_t)k[0])<<24; + a += ((uint32_t)k[1])<<16; + a += ((uint32_t)k[2])<<8; + a += ((uint32_t)k[3]); + b += ((uint32_t)k[4])<<24; + b += ((uint32_t)k[5])<<16; + b += ((uint32_t)k[6])<<8; + b += ((uint32_t)k[7]); + c += ((uint32_t)k[8])<<24; + c += ((uint32_t)k[9])<<16; + c += ((uint32_t)k[10])<<8; + c += ((uint32_t)k[11]); + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=k[11]; + case 11: c+=((uint32_t)k[10])<<8; + case 10: c+=((uint32_t)k[9])<<16; + case 9 : c+=((uint32_t)k[8])<<24; + case 8 : b+=k[7]; + case 7 : b+=((uint32_t)k[6])<<8; + case 6 : b+=((uint32_t)k[5])<<16; + case 5 : b+=((uint32_t)k[4])<<24; + case 4 : a+=k[3]; + case 3 : a+=((uint32_t)k[2])<<8; + case 2 : a+=((uint32_t)k[1])<<16; + case 1 : a+=((uint32_t)k[0])<<24; + break; + case 0 : return c; + } + } + + final(a,b,c); + return c; +} + +/* Local Variables: */ +/* mode: c */ +/* c-basic-offset: 4 */ +/* c-file-offsets: ((arglist-cont-nonempty . 4)) */ +/* End: */ diff --git a/src/core/ngx_lookup3.h b/src/core/ngx_lookup3.h new file mode 100644 index 0000000..3aa1579 --- /dev/null +++ b/src/core/ngx_lookup3.h @@ -0,0 +1,95 @@ +/* + * ngx_lookup3.h by Kirill A. Korinskiy + */ + +#ifndef _NGX_LOOKUP3_H_INCLUDED_ +#define _NGX_LOOKUP3_H_INCLUDED_ + +#include +#include + +/* +-------------------------------------------------------------------- + This works on all machines. To be useful, it requires + -- that the key be an array of uint32_t's, and + -- that the length be the number of uint32_t's in the key + + The function hashword() is identical to hashlittle() on little-endian + machines, and identical to hashbig() on big-endian machines, + except that the length has to be measured in uint32_ts rather than in + bytes. hashlittle() is more complicated than hashword() only because + hashlittle() has to dance around fitting the key bytes into registers. +-------------------------------------------------------------------- +*/ +uint32_t ngx_lookup3_hashword(const uint32_t *k, /* the key, an array of uint32_t values */ + size_t length, /* the length of the key, in uint32_ts */ + uint32_t initval); /* the previous hash, or an arbitrary value */ + +void ngx_lookup3_hashword2 (const uint32_t *k, /* the key, an array of uint32_t values */ + size_t length, /* the length of the key, in uint32_ts */ + uint32_t *pc, /* IN: seed OUT: primary hash value */ + uint32_t *pb); /* IN: more seed OUT: secondary hash value */ + + +/* +------------------------------------------------------------------------------- +hashlittle() -- hash a variable-length key into a 32-bit value + k : the key (the unaligned variable-length array of bytes) + length : the length of the key, counting by bytes + initval : can be any 4-byte value +Returns a 32-bit value. Every bit of the key affects every bit of +the return value. Two keys differing by one or two bits will have +totally different hash values. + +The best hash table sizes are powers of 2. There is no need to do +mod a prime (mod is sooo slow!). If you need less than 32 bits, +use a bitmask. For example, if you need only 10 bits, do + h = (h & hashmask(10)); +In which case, the hash table should have hashsize(10) elements. + +If you are hashing n strings (uint8_t **)k, do it like this: + for (i=0, h=0; i +#include + +typedef struct { + u_char color; + uint32_t crc32; + size_t len; + u_char data[1]; +} ngx_rbtreehash_node_t; + +typedef struct { + ngx_str_t key; + ngx_str_t data; +} ngx_rbtreehash_key_t; + +typedef struct { + ngx_pool_t *pool; /* this pool need only for tempory using and must destroy afer create tree */ + ngx_array_t keys; /* keys for hash */ + ngx_rbtreehash_t *hash; +} ngx_rbtreehash_ctx_t; + +static ngx_command_t ngx_rbtreehash_commands[] = { + ngx_null_command +}; + +static ngx_core_module_t ngx_rbtreehash_module_ctx = { + ngx_string("rbtreehash"), + NULL, + NULL +}; + +ngx_module_t ngx_rbtreehash_module = { + NGX_MODULE_V1, + &ngx_rbtreehash_module_ctx, /* module context */ + ngx_rbtreehash_commands, /* module directives */ + NGX_CORE_MODULE, /* module type */ + NULL, /* init master */ + NULL, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + +static void* +ngx_rbtreehash_alloc(ngx_rbtreehash_pool_t *pool, size_t size) +{ + if (pool->pool) { + /* use pool to allocated memory */ + return ngx_palloc(pool->pool, size); + } + + if (pool->shm_zone) { + /* or shm */ + return ngx_slab_alloc((ngx_slab_pool_t *) pool->shm_zone->shm.addr, + size); + } + + /* or system system memory */ + if (!pool->log) { + /* if not set log for pool use cycle log */ + return ngx_alloc(size, ngx_cycle->log); + } + + return ngx_alloc(size, pool->log); +} + +static void +ngx_rbtreehash_free(ngx_rbtreehash_pool_t *pool, void *p) +{ + if (pool->pool) { + /* can't have free in pool-based alloc */ + return; + } + + if (pool->shm_zone) { + ngx_slab_free((ngx_slab_pool_t *) pool->shm_zone->shm.addr, p); + return; + } + + ngx_free(p); +} + +static void +ngx_rbtreehash_rbtree_insert_value(ngx_rbtree_node_t *temp, + ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel) +{ + ngx_rbtree_node_t **p; + ngx_rbtreehash_node_t *rn, *rnt; + + for ( ;; ) { + + if (node->key < temp->key) { + + p = &temp->left; + + } else if (node->key > temp->key) { + + p = &temp->right; + + } else { /* node->key == temp->key */ + + rn = (ngx_rbtreehash_node_t *) &node->color; + rnt = (ngx_rbtreehash_node_t *) &temp->color; + + p = rn->crc32 < rnt->crc32 + ? &temp->left : &temp->right; + } + + if (*p == sentinel) { + break; + } + + temp = *p; + } + + *p = node; + node->parent = temp; + node->left = sentinel; + node->right = sentinel; + ngx_rbt_red(node); +} + +ngx_rbtree_node_t* ngx_rbtreehash_insert(ngx_rbtreehash_t *hash, ngx_str_t *key, + void *value, size_t len) +{ + uint32_t n; + ngx_rbtree_node_t *node; + ngx_rbtreehash_node_t *rn; + + n = offsetof(ngx_rbtree_node_t, color) + + offsetof(ngx_rbtreehash_node_t, data) + + len; + + node = ngx_rbtreehash_alloc(&hash->pool, n); + if (node == NULL) { + return NULL; + } + hash->data->nodes++; + + rn = (ngx_rbtreehash_node_t*) &node->color; + + node->key = ngx_lookup3_hashlittle(key->data, key->len, 0); + rn->crc32 = ngx_crc32_short(key->data, key->len); + rn->len = len; + ngx_memcpy(rn->data, value, len); + + ngx_rbtree_insert(hash->data->tree, node); + + return node; +} + +ngx_int_t ngx_rbtreehash_delete(ngx_rbtreehash_t *hash, ngx_str_t *key) +{ + ngx_uint_t hash32; + ngx_uint_t crc32; + ngx_rbtree_node_t *node; + ngx_rbtreehash_node_t *rn; + + hash32 = ngx_lookup3_hashlittle(key->data, key->len, 0); + crc32 = ngx_crc32_short(key->data, key->len); + node = hash->data->tree->root; + + while (node != hash->data->tree->sentinel) { + if (hash32 < node->key) { + node = node->left; + continue; + } + + if (hash32 > node->key) { + node = node->right; + continue; + } + + do { + rn = (ngx_rbtreehash_node_t*) &node->color; + + if (crc32 == rn->crc32) { + break; + } + + if (crc32 == rn->crc32) { + break; + } + + node = crc32 < rn->crc32 ? node->left : node->right; + } while (node != hash->data->tree->sentinel && hash32 == node->key); + + break; + } + + if (node == hash->data->tree->sentinel) { + return NGX_OK; + } + + ngx_rbtree_delete(hash->data->tree, node); + + ngx_rbtreehash_free(&hash->pool, node); + + hash->data->nodes--; + + return NGX_OK; +} + +ngx_int_t ngx_rbtreehash_init(ngx_rbtreehash_t *hash) +{ + ngx_rbtree_node_t *sentinel; + + hash->data = ngx_rbtreehash_alloc(&hash->pool, sizeof(ngx_rbtreehash_hash_t)); + ngx_memzero(hash->data, sizeof(ngx_rbtreehash_hash_t)); + + hash->data->tree = ngx_rbtreehash_alloc(&hash->pool, sizeof(ngx_rbtree_t)); + if (hash->data->tree == NULL) { + return NGX_ERROR; + } + + sentinel = ngx_rbtreehash_alloc(&hash->pool, sizeof(ngx_rbtree_node_t)); + if (sentinel == NULL) { + return NGX_ERROR; + } + + ngx_rbtree_init(hash->data->tree, sentinel, + ngx_rbtreehash_rbtree_insert_value); + + return NGX_OK; +} + +ngx_int_t ngx_rbtreehash_destroy(ngx_rbtreehash_t *hash) +{ + ngx_rbtree_node_t *node; + + for (;;) { + if (hash->data->tree->root == hash->data->tree->sentinel) { + break; + } + + node = ngx_rbtree_min(hash->data->tree->root, + hash->data->tree->sentinel); + + ngx_rbtree_delete(hash->data->tree, node); + + ngx_rbtreehash_free(&hash->pool, node); + + } + + return NGX_OK; +} + +static ngx_int_t +ngx_rbtreehash_init_tree(ngx_shm_zone_t *shm_zone, void *data) +{ + uint32_t i; + ngx_rbtreehash_t *hash; + ngx_rbtreehash_ctx_t *octx = data; + ngx_rbtreehash_ctx_t *ctx; + ngx_rbtreehash_key_t *keys; + + ctx = shm_zone->data; + + if (octx && + ngx_strncmp(ctx->hash->pool.shm_key.data, + octx->hash->pool.shm_key.data, + ctx->hash->pool.shm_key.len) != 0) { + ngx_log_error(NGX_LOG_EMERG, shm_zone->shm.log, 0, + "rbhash use path \"%s\" with previously it used " + "the \"%s\"", ctx->hash->pool.shm_key.data, + octx->hash->pool.shm_key.data); + return NGX_ERROR; + } + + if (ngx_rbtreehash_init(ctx->hash) != NGX_OK) { + return NGX_ERROR; + } + + keys = ctx->keys.elts; + + for (i = 0; i < ctx->keys.nelts; i++) { + if (keys[i].key.len == 0) { + continue; + } + + if (ngx_rbtreehash_insert(ctx->hash, &keys[i].key, + keys[i].data.data, + keys[i].data.len) == NULL) { + return NGX_ERROR; + } + + } + + if (ctx->pool) { + ngx_destroy_pool(ctx->pool); + ctx->pool = NULL; + } + + /* setup hash->data to all linked conf */ + for (hash = ctx->hash; hash->next; hash = hash->next) { + hash->next->data = hash->data; + } + + for (; hash->prev; hash = hash->prev) { + hash->prev->data = hash->data; + } + + return NGX_OK; +} + +void* ngx_rbtreehash_find(ngx_rbtreehash_t *hash, ngx_str_t *key, size_t *len) +{ + ngx_uint_t hash32; + ngx_uint_t crc32; + ngx_rbtree_node_t *node; + ngx_rbtree_node_t *sentinel; + ngx_rbtreehash_node_t *rn; + + if (!hash->data) { + return NULL; + } + + if (hash->data->nodes == 0) { + return NULL; + } + + if (key->len == 0) { + return NULL; + } + + hash32 = ngx_lookup3_hashlittle(key->data, key->len, 0); + crc32 = ngx_crc32_short(key->data, key->len); + + node = hash->data->tree->root; + sentinel = hash->data->tree->sentinel; + + while (node != sentinel) { + if (hash32 < node->key) { + node = node->left; + continue; + } + + if (hash32 > node->key) { + node = node->right; + continue; + } + + do { + rn = (ngx_rbtreehash_node_t*) &node->color; + + if (crc32 == rn->crc32) { + *len = rn->len; + return rn->data; + } + + if (crc32 == rn->crc32) { + break; + } + + node = crc32 < rn->crc32 ? node->left : node->right; + + } while (node != sentinel && hash32 == node->key); + break; + } + + return NULL; +} + +char * +ngx_rbtreehash_crete_shared_by_size(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_str_t *value; + char *p = conf; + size_t size; + ngx_rbtreehash_t *hash; + + hash = (ngx_rbtreehash_t*) (p + cmd->offset); + + if (cf->args->nelts != 2) { + return "need two args"; + } + + value = cf->args->elts; + + if (hash->pool.shm_zone) { + return "is duplicate"; + } + + size = ngx_parse_offset(&value[1]); + + hash->pool.shm_key.len = value[0].len + NGX_INT_T_LEN + sizeof(" ") - 1 + NGX_INT_T_LEN; + hash->pool.shm_key.data = ngx_palloc(cf->pool, hash->pool.shm_key.len); + if (hash->pool.shm_key.data == NULL) { + return NGX_CONF_ERROR; + } + + ngx_sprintf(hash->pool.shm_key.data, "%V%d %d", &value[0], size, rand()); + + if (size < (size_t) (8 * ngx_pagesize)) { + size = (size_t) (8 * ngx_pagesize); + } else { + size = 8 * ngx_pagesize * (size / (8 * ngx_pagesize) + 1); + } + + hash->pool.shm_zone = ngx_shared_memory_add(cf, &hash->pool.shm_key, + size, + &ngx_rbtreehash_module); + + if (hash->pool.shm_zone == NULL) { + return NGX_CONF_ERROR; + } + + hash->pool.shm_zone->init = ngx_rbtreehash_init_tree; + hash->pool.shm_zone->data = hash; + hash->pool.pool = NULL; + hash->pool.log = cf->log; + + return NGX_CONF_OK; +} + +char * +ngx_rbtreehash_from_path(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_str_t *value; + char *p = conf; + ngx_str_t data; + ngx_str_t path; + ngx_int_t fd; + ngx_file_info_t fi; + ngx_rbtreehash_key_t *key; + size_t need_shmem = 0; + ngx_rbtreehash_ctx_t *ctx; + u_char *ptr; + u_char *ptr_last; + u_char *ptr_end; + size_t len; + + ctx = ngx_pcalloc(cf->pool, sizeof(ngx_rbtreehash_ctx_t)); + if (ctx == NULL) { + return NGX_CONF_ERROR; + } + + ctx->hash = (ngx_rbtreehash_t*) (p + cmd->offset); + + if (cf->args->nelts != 2) { + return "need two args"; + } + + value = cf->args->elts; + + if (ctx->hash->pool.shm_zone) { + return "is duplicate"; + } + + ctx->pool = ngx_create_pool(4096, cf->log); + if (ctx->pool == NULL) { + return NGX_CONF_ERROR; + } + + if (ngx_conf_full_name(cf->cycle, &value[1], 0) == NGX_ERROR) { + return NGX_CONF_ERROR; + } + + path = value[1]; /* shm_key is a path to file with data */ + + fd = ngx_open_file(path.data, NGX_FILE_RDONLY, NGX_FILE_OPEN, 0); + if (fd == NGX_INVALID_FILE) { + if (ngx_errno == NGX_ENOENT) { + return NGX_CONF_OK; + } + + ngx_log_error(NGX_LOG_CRIT, cf->log, ngx_errno, + ngx_open_file_n " \"%s\" failed", path.data); + return NGX_CONF_ERROR; + } + + if (ngx_fd_info(fd, &fi) == NGX_FILE_ERROR) { + ngx_log_error(NGX_LOG_CRIT, cf->log, ngx_errno, + ngx_fd_info_n " \"%s\" failed", path.data); + + return NGX_CONF_ERROR; + } + + data.len = ngx_file_size(&fi); + data.data = mmap(NULL, data.len, PROT_READ, MAP_PRIVATE, fd, 0); + if (data.data == MAP_FAILED) { + return NGX_CONF_ERROR; + } + + for (ptr = data.data, ptr_end = data.data + data.len, len = 0; + ptr <= ptr_end; ptr++) { + if (*ptr == '\n' || ptr == ptr_end) { + len++; + } + } + + if (ngx_array_init(&ctx->keys, ctx->pool, len, + sizeof(ngx_rbtreehash_key_t)) != NGX_OK) { + return NGX_CONF_ERROR; + } + + key = ngx_array_push(&ctx->keys); + ngx_memzero(key, ctx->keys.size); + for (ptr_last = ptr = data.data, ptr_end = data.data + data.len + ; ptr < ptr_end; ptr++) { + switch (*ptr) { + case ' ': + { + if (*ptr == *ptr_last) { + ptr++; + ptr_last = ptr; + } + break; + } + case ':': + { + if (key->key.data != NULL) { + continue; + } + key->key.len = ptr - ptr_last; + key->key.data = ngx_palloc(ctx->pool, key->key.len); + if (key->key.data == NULL) { + goto error; + } + + memcpy(key->key.data, ptr_last, key->key.len); + + ptr++; + ptr_last = ptr; + break; + } + case '\n': + { + if (key->key.data == NULL) { + key->key.len = ptr - ptr_last; + key->key.data = ngx_palloc(ctx->pool, key->key.len); + if (key->key.data == NULL) { + goto error; + } + + memcpy(key->key.data, ptr_last, key->key.len); + + key->data = key->key; + } else { + key->data.len = ptr - ptr_last; + key->data.data = ngx_palloc(ctx->pool, key->data.len); + if (key->data.data == NULL) { + goto error; + } + + memcpy(key->data.data, ptr_last, key->data.len); + } + + need_shmem += ngx_align(offsetof(ngx_rbtree_node_t, color) + + offsetof(ngx_rbtreehash_node_t, data) + + key->data.len, + ngx_pagesize); + + ptr++; + ptr_last = ptr; + key = ngx_array_push(&ctx->keys); + ngx_memzero(key, ctx->keys.size); + break; + } + } + } + + ctx->hash->pool.shm_key.len = path.len + NGX_INT_T_LEN; + ctx->hash->pool.shm_key.data = ngx_palloc(cf->pool, ctx->hash->pool.shm_key.len); + if (ctx->hash->pool.shm_key.data == NULL) { + goto error; + } + + /* shm_key is path to file and value hash function on the contents of file оп*/ + ngx_sprintf(ctx->hash->pool.shm_key.data, "%s %d", &path, + ngx_lookup3_hashlittle(data.data, data.len, 0)); + + + munmap(data.data, data.len); + + need_shmem += ngx_align(sizeof(ngx_rbtree_t), ngx_pagesize) + + ngx_align(sizeof(ngx_rbtree_node_t), ngx_pagesize) /* sentinel */ + + ngx_align(sizeof(ngx_rbtreehash_hash_t), ngx_pagesize); /* hash_data */ + + if (need_shmem < (size_t) (8 * ngx_pagesize)) { + need_shmem = (size_t) (8 * ngx_pagesize); + } else { + need_shmem = 8 * ngx_pagesize * (need_shmem / (8 * ngx_pagesize) + 1); + } + + ctx->hash->pool.shm_zone = ngx_shared_memory_add(cf, &ctx->hash->pool.shm_key, + need_shmem, + &ngx_rbtreehash_module); + + if (ctx->hash->pool.shm_zone == NULL) { + goto error_wo_data; + } + + ctx->hash->pool.shm_zone->init = ngx_rbtreehash_init_tree; + ctx->hash->pool.shm_zone->data = ctx; + ctx->hash->pool.pool = NULL; + + return NGX_CONF_OK; + + error: + munmap(data.data, data.len); + error_wo_data: + ngx_destroy_pool(ctx->pool); + return NGX_CONF_ERROR; +} + +ngx_int_t ngx_rbtreehash_merge_value(ngx_rbtreehash_t *conf, ngx_rbtreehash_t *prev) +{ + ngx_rbtreehash_t *hash; + + if (prev->pool.shm_zone || prev->pool.pool) { + conf->pool = prev->pool; + } + + for (hash = prev; hash->next; hash = hash->next); + hash->next = conf; + conf->prev = hash; + + return NGX_OK; +} + +/* Local Variables: */ +/* mode: c */ +/* c-basic-offset: 4 */ +/* c-file-offsets: ((arglist-cont-nonempty . 4)) */ +/* End: */ diff --git a/src/core/ngx_rbtreehash.h b/src/core/ngx_rbtreehash.h new file mode 100644 index 0000000..45a06ba --- /dev/null +++ b/src/core/ngx_rbtreehash.h @@ -0,0 +1,59 @@ + +/* + * Copyright (C) Kirill A. Korinskiy + */ + +#ifndef __NGX_RBTREEHASH +#define __NGX_RBTREEHASH + +#include +#include + +typedef struct { + /* in hash using shm_zone */ + ngx_shm_zone_t *shm_zone; + ngx_str_t shm_key; + + /* ... or pool */ + ngx_pool_t *pool; + + ngx_log_t *log; +} ngx_rbtreehash_pool_t; + +typedef struct { + ngx_rbtree_t *tree; + size_t nodes; +} ngx_rbtreehash_hash_t; + +typedef struct ngx_rbtreehash_s ngx_rbtreehash_t; + +struct ngx_rbtreehash_s { + ngx_rbtreehash_pool_t pool; + ngx_rbtreehash_hash_t *data; + + /* hack to pointer to next/prev config */ + ngx_rbtreehash_t *next; + ngx_rbtreehash_t *prev; +}; + +ngx_int_t ngx_rbtreehash_init(ngx_rbtreehash_t *hash); +ngx_int_t ngx_rbtreehash_destroy(ngx_rbtreehash_t *hash); +ngx_rbtree_node_t* ngx_rbtreehash_insert(ngx_rbtreehash_t *hash, ngx_str_t *key, + void *value, size_t len); +ngx_int_t ngx_rbtreehash_delete(ngx_rbtreehash_t *hash, ngx_str_t *key); +void *ngx_rbtreehash_find(ngx_rbtreehash_t *hash, ngx_str_t *key, size_t *len); + +char *ngx_rbtreehash_crete_shared_by_size(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +char *ngx_rbtreehash_from_path(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); + +ngx_int_t ngx_rbtreehash_merge_value(ngx_rbtreehash_t *conf, ngx_rbtreehash_t *prev); + +extern ngx_module_t ngx_rbtreehash_module; + +#endif //__NGX_RBTREEHASH + +/* Local Variables: */ +/* mode: c */ +/* c-basic-offset: 4 */ +/* c-file-offsets: ((arglist-cont-nonempty . 4)) */ +/* End: */ diff --git a/src/http/modules/ngx_http_limit_var_module.c b/src/http/modules/ngx_http_limit_var_module.c new file mode 100644 index 0000000..a7b587d --- /dev/null +++ b/src/http/modules/ngx_http_limit_var_module.c @@ -0,0 +1,338 @@ + +/* + * Copyright (C) Kirill A. Korinskiy + */ + + +#include +#include +#include + + +typedef struct { + ngx_flathash_t *hash; + ngx_uint_t size; + ngx_uint_t rate; + ngx_shmtx_t mutex; + ngx_http_complex_value_t key; +} ngx_http_limit_var_ctx_t; + + +static char *ngx_http_limit_var(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); + + +static ngx_command_t ngx_http_limit_var_commands[] = { + + { ngx_string("limit_var"), + NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE3, + ngx_http_limit_var, + 0, + 0, + NULL }, + + ngx_null_command +}; + + +static ngx_http_module_t ngx_http_limit_var_module_ctx = { + NULL, /* preconfiguration */ + NULL, /* postconfiguration */ + + NULL, /* create main configuration */ + NULL, /* init main configuration */ + + NULL, /* create server configuration */ + NULL, /* merge server configuration */ + + NULL, /* create location configration */ + NULL /* merge location configration */ +}; + + +ngx_module_t ngx_http_limit_var_module = { + NGX_MODULE_V1, + &ngx_http_limit_var_module_ctx, /* module context */ + ngx_http_limit_var_commands, /* module directives */ + NGX_HTTP_MODULE, /* module type */ + NULL, /* init master */ + NULL, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + + +static ngx_int_t +ngx_http_limit_var_variable(ngx_http_request_t *r, ngx_http_variable_value_t *v, + uintptr_t data) +{ + ngx_http_limit_var_ctx_t *ctx = (ngx_http_limit_var_ctx_t *)data; + + + ngx_str_t key; + + time_t *value; + ngx_uint_t frequency = 0; + + if (ngx_http_complex_value(r, &ctx->key, &key) != NGX_OK) { + return NGX_ERROR; + } + + if (key.len == 0) { + v->not_found = 1; + return NGX_OK; + } + + r->main->limit_var_set = 1; + + value = ngx_flathash_get(ctx->hash, &key); + + ngx_shmtx_lock(&ctx->mutex); + + /* intreting only a less 16 bit of state */ + + /* have activity in this second */ + if ((*value & 0xFFFF) == (ngx_time() & 0xFFFF)) { + frequency = (ngx_uint_t)*value >> 16; + } + + if (r->main->limit_var_set) { + *value = (frequency + 1) << 16 | (ngx_time() & 0xFFFF); + } + + ngx_shmtx_unlock(&ctx->mutex); + + if (frequency > ctx->rate) { + goto outcast; + } + + v->not_found = 1; + return NGX_OK; + + outcast: + + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "limiting requests, excess %d", + frequency); + + + v->data = ngx_pnalloc(r->pool, NGX_INT_T_LEN); + if (v->data == NULL) { + return NGX_ERROR; + } + + v->len = ngx_sprintf(v->data, "%d", frequency) - v->data; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_limit_var_init_zone(ngx_shm_zone_t *shm_zone, void *data) +{ + ngx_http_limit_var_ctx_t *octx = data; + + ngx_http_limit_var_ctx_t *ctx; + + ctx = shm_zone->data; + + if (octx) { + if (ngx_strcmp(ctx->key.value.data, octx->key.value.data) != 0) { + ngx_log_error(NGX_LOG_EMERG, shm_zone->shm.log, 0, + "limit_req \"%V\" uses the \"%V\" key " + "while previously it used the \"%V\" key", + &shm_zone->name, &ctx->key.value, &octx->key.value); + return NGX_ERROR; + } + + ctx->hash = octx->hash; + + return NGX_OK; + } + + ctx->mutex.lock = (ngx_atomic_t *) shm_zone->shm.addr; + + ctx->hash = (ngx_flathash_t *) ((u_char *)shm_zone->shm.addr + sizeof(ngx_atomic_t)); + + /* value of hash is a time_t */ + + if (ngx_flathash_init(ctx->hash, sizeof(time_t), ctx->size) != NGX_OK) { + return NGX_ERROR; + } + + return NGX_OK; +} + + +static char * +ngx_http_limit_var(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + u_char *p; + size_t size, len; + ngx_str_t *value, name, s; + ngx_int_t rate; + ngx_uint_t i; + ngx_shm_zone_t *shm_zone; + ngx_http_variable_t *var; + ngx_http_limit_var_ctx_t *ctx; + + ngx_http_compile_complex_value_t ccv; + + if (sizeof(time_t) < 4) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "Sorry. ngx_http_limit_var_module required a system" + " with more or equal 32 bit for time_t"); + return NGX_CONF_ERROR; + } + + + + value = cf->args->elts; + + ctx = NULL; + size = 0; + rate = 1; + name.len = 0; + + for (i = 1; i < cf->args->nelts; i++) { + + if (ngx_strncmp(value[i].data, "zone=", 5) == 0) { + + name.data = value[i].data + 5; + + p = (u_char *) ngx_strchr(name.data, ':'); + + if (p) { + name.len = p - name.data; + + p++; + + s.len = value[i].data + value[i].len - p; + s.data = p; + + size = ngx_atoi(s.data, s.len); + if ((ngx_int_t)size != NGX_ERROR && size > 769) { + continue; + } + } + + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid zone size \"%V\"", &value[i]); + return NGX_CONF_ERROR; + } + + if (ngx_strncmp(value[i].data, "rate=", 5) == 0) { + + len = value[i].len; + p = value[i].data + len - 3; + + if (ngx_strncmp(p, "r/s", 3) == 0) { + len -= 3; + } + + rate = ngx_atoi(value[i].data + 5, len - 5); + if (rate <= NGX_ERROR) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid rate \"%V\"", &value[i]); + return NGX_CONF_ERROR; + } + + continue; + } + + if (value[i].data[0] == '$') { + + value[i].len--; + value[i].data++; + + ctx = ngx_pcalloc(cf->pool, sizeof(ngx_http_limit_var_ctx_t)); + if (ctx == NULL) { + return NGX_CONF_ERROR; + } + + ngx_memzero(&ccv, sizeof(ngx_http_compile_complex_value_t)); + + ccv.cf = cf; + ccv.value = &value[1]; + ccv.complex_value = &ctx->key; + ccv.zero = 1; + ccv.conf_prefix = 1; + + if (ngx_http_compile_complex_value(&ccv) != NGX_OK) { + return NGX_CONF_ERROR; + } + + continue; + } + + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid parameter \"%V\"", &value[i]); + return NGX_CONF_ERROR; + } + + if (name.len == 0 || size == 0) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "\"%V\" must have \"zone\" parameter", + &cmd->name); + return NGX_CONF_ERROR; + } + + if (ctx == NULL) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "no var is defined for limit_var_zone \"%V\"", + &cmd->name); + return NGX_CONF_ERROR; + } + + ctx->rate = rate; + + ctx->size = size; + + /* value of hash is a time_t */ + size = ngx_flathash_need_memory(sizeof(time_t), size); + + size += sizeof(ngx_atomic_t); + + shm_zone = ngx_shared_memory_add(cf, &name, size, + &ngx_http_limit_var_module); + if (shm_zone == NULL) { + return NGX_CONF_ERROR; + } + + if (shm_zone->data) { + ctx = shm_zone->data; + + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "limit_var_zone \"%V\" is already bound to var \"%V\"", + &value[1], &ctx->key.value); + return NGX_CONF_ERROR; + } + + shm_zone->init = ngx_http_limit_var_init_zone; + shm_zone->data = ctx; + + s.len = sizeof("limit_var_") - 1 + name.len; + s.data = ngx_palloc(cf->pool, s.len); + if (s.data == NULL) { + return NGX_CONF_ERROR; + } + + ngx_sprintf(s.data, "limit_var_%V", &name); + + var = ngx_http_add_variable(cf, &s, NGX_HTTP_VAR_NOCACHEABLE); + if (var == NULL) { + return NGX_CONF_ERROR; + } + + var->get_handler = ngx_http_limit_var_variable; + var->data = (uintptr_t) ctx; + + return NGX_CONF_OK; +} diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h index ee87669..60af5af 100644 --- a/src/http/ngx_http_request.h +++ b/src/http/ngx_http_request.h @@ -460,6 +460,7 @@ struct ngx_http_request_s { * we use the single bits in the request structure */ unsigned limit_zone_set:1; + unsigned limit_var_set:1; unsigned limit_req_set:1; #if 0