summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorforsyth <forsyth@vitanuova.com>2010-02-03 20:50:07 +0000
committerforsyth <forsyth@vitanuova.com>2010-02-03 20:50:07 +0000
commitcfa39d5adff9be3c5f695c2c28cffb5f93dd4598 (patch)
treedd722d340e4b81253a3c486d1ed2fcff7ea87942
parentf1dcfd03b4648fd6c0221d14436b391cd368beac (diff)
20100203-2050
-rw-r--r--libsec/port/mkfile1
-rw-r--r--libsec/port/sha2.c281
-rw-r--r--libsec/port/sha256block.c81
-rw-r--r--libsec/port/sha512block.c114
4 files changed, 477 insertions, 0 deletions
diff --git a/libsec/port/mkfile b/libsec/port/mkfile
index 7837216f..4465c696 100644
--- a/libsec/port/mkfile
+++ b/libsec/port/mkfile
@@ -6,6 +6,7 @@ CFILES = des.c desmodes.c desECB.c desCBC.c des3ECB.c des3CBC.c\
aes.c blowfish.c \
idea.c \
hmac.c md5.c md5block.c md4.c sha1.c sha1block.c\
+ sha2.c sha256block.c sha512block.c\
sha1pickle.c md5pickle.c\
rc4.c\
genrandom.c prng.c fastrand.c nfastrand.c\
diff --git a/libsec/port/sha2.c b/libsec/port/sha2.c
new file mode 100644
index 00000000..581ecc5f
--- /dev/null
+++ b/libsec/port/sha2.c
@@ -0,0 +1,281 @@
+#include "os.h"
+#include <libsec.h>
+
+extern void _sha256block(SHA256state*, uchar*);
+extern void _sha512block(SHA512state*, uchar*);
+
+u32int sha224h0[] = {
+0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939,
+0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4,
+};
+u32int sha256h0[] = {
+0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
+0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
+};
+
+u64int sha384h0[] = {
+0xcbbb9d5dc1059ed8ULL, 0x629a292a367cd507ULL, 0x9159015a3070dd17ULL, 0x152fecd8f70e5939ULL,
+0x67332667ffc00b31ULL, 0x8eb44a8768581511ULL, 0xdb0c2e0d64f98fa7ULL, 0x47b5481dbefa4fa4ULL,
+};
+u64int sha512h0[] = {
+0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
+0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL,
+};
+
+
+static SHA256state *
+sha256init(void)
+{
+ SHA256state *s;
+
+ s = malloc(sizeof(*s));
+ if(s == nil)
+ return nil;
+ s->malloced = 1;
+ s->seeded = 0;
+ s->len = 0;
+ s->blen = 0;
+
+ return s;
+}
+
+static void
+p32(u32int v, uchar *p)
+{
+ p[0] = v>>24;
+ p[1] = v>>16;
+ p[2] = v>>8;
+ p[3] = v>>0;
+}
+
+static void
+p64(u64int v, uchar *p)
+{
+ p32(v>>32, p);
+ p32(v, p+4);
+}
+
+enum {
+ HI = 0,
+ LO = 1,
+};
+static void
+p128(u64int v[2], uchar *p)
+{
+ p64(v[HI], p);
+ p64(v[LO], p+8);
+}
+
+static void
+uvvadd(u64int v[2], int n)
+{
+ v[LO] += n;
+ if(v[LO] < n) /* overflow */
+ v[HI]++;
+}
+
+static void
+uvvmult8(u64int v[2])
+{
+ v[HI] = (v[HI]<<3) | (v[LO] >> (64-3));
+ v[LO] <<= 3;
+}
+
+
+static void
+_sha256(uchar *p, ulong len, SHA256state *s)
+{
+ u32int take;
+
+ /* complete possible partial block from last time */
+ if(s->blen > 0 && s->blen+len >= SHA256bsize) {
+ take = SHA256bsize-s->blen;
+ memmove(s->buf+s->blen, p, take);
+ p += take;
+ len -= take;
+ _sha256block(s, s->buf);
+ s->len += SHA256bsize;
+ s->blen = 0;
+ memset(s->buf, 0, SHA256bsize);
+ }
+ /* whole blocks */
+ while(len >= SHA256bsize) {
+ _sha256block(s, p);
+ s->len += SHA256bsize;
+ p += SHA256bsize;
+ len -= SHA256bsize;
+ }
+ /* keep possible leftover bytes */
+ if(len > 0) {
+ memmove(s->buf+s->blen, p, len);
+ s->blen += len;
+ }
+}
+
+static void
+sha256finish(SHA256state *s, uchar *digest, int smaller)
+{
+ int i;
+ uchar end[SHA256bsize+8];
+ u32int nzero, nb, nd;
+
+ nzero = (2*SHA256bsize - s->blen - 1 - 8) % SHA256bsize;
+ end[0] = 0x80;
+ memset(end+1, 0, nzero);
+ nb = 8*(s->len+s->blen);
+ p64(nb, end+1+nzero);
+ _sha256(end, 1+nzero+8, s);
+
+ nd = SHA256dlen/4;
+ if(smaller)
+ nd = SHA224dlen/4;
+ for(i = 0; i < nd; i++, digest += 4)
+ p32(s->h32[i], digest);
+}
+
+static SHA256state*
+sha256x(uchar *p, ulong len, uchar *digest, SHA256state *s, int smaller)
+{
+ if(s == nil) {
+ s = sha256init();
+ if(s == nil)
+ return nil;
+ }
+
+ if(s->seeded == 0){
+ memmove(s->h32, smaller? sha224h0: sha256h0, sizeof s->h32);
+ s->seeded = 1;
+ }
+
+ _sha256(p, len, s);
+
+ if(digest == 0)
+ return s;
+
+ sha256finish(s, digest, smaller);
+ if(s->malloced == 1)
+ free(s);
+ return nil;
+}
+
+SHA256state*
+sha224(uchar *p, ulong len, uchar *digest, SHA256state *s)
+{
+ return sha256x(p, len, digest, s, 1);
+}
+
+SHA256state*
+sha256(uchar *p, ulong len, uchar *digest, SHA256state *s)
+{
+ return sha256x(p, len, digest, s, 0);
+}
+
+
+static SHA512state *
+sha512init(void)
+{
+ SHA512state *s;
+
+ s = malloc(sizeof(*s));
+ if(s == nil)
+ return nil;
+ s->malloced = 1;
+ s->seeded = 0;
+ s->nb128[HI] = 0;
+ s->nb128[LO] = 0;
+ s->blen = 0;
+
+ return s;
+}
+
+static void
+_sha512(uchar *p, ulong len, SHA512state *s)
+{
+ u32int take;
+
+ /* complete possible partial block from last time */
+ if(s->blen > 0 && s->blen+len >= SHA512bsize) {
+ take = SHA512bsize-s->blen;
+ memmove(s->buf+s->blen, p, take);
+ p += take;
+ len -= take;
+ _sha512block(s, s->buf);
+ uvvadd(s->nb128, SHA512bsize);
+ s->blen = 0;
+ memset(s->buf, 0, SHA512bsize);
+ }
+ /* whole blocks */
+ while(len >= SHA512bsize) {
+ _sha512block(s, p);
+ uvvadd(s->nb128, SHA512bsize);
+ p += SHA512bsize;
+ len -= SHA512bsize;
+ }
+ /* keep possible leftover bytes */
+ if(len > 0) {
+ memmove(s->buf+s->blen, p, len);
+ s->blen += len;
+ }
+}
+
+void
+sha512finish(SHA512state *s, uchar *digest, int smaller)
+{
+ int i;
+ uchar end[SHA512bsize+16];
+ u32int nzero, n;
+ u64int nb[2];
+
+ nzero = (2*SHA512bsize - s->blen - 1 - 16) % SHA512bsize;
+ end[0] = 0x80;
+ memset(end+1, 0, nzero);
+ nb[0] = s->nb128[0];
+ nb[1] = s->nb128[1];
+ uvvadd(nb, s->blen);
+ uvvmult8(nb);
+ p128(nb, end+1+nzero);
+ _sha512(end, 1+nzero+16, s);
+
+ n = SHA512dlen/8;
+ if(smaller)
+ n = SHA384dlen/8;
+ for(i = 0; i < n; i++, digest += 8)
+ p64(s->h64[i], digest);
+}
+
+static SHA512state*
+sha512x(uchar *p, ulong len, uchar *digest, SHA512state *s, int smaller)
+{
+ if(s == nil) {
+ s = sha512init();
+ if(s == nil)
+ return nil;
+ }
+
+ if(s->seeded == 0){
+ memmove(s->h64, smaller? sha384h0: sha512h0, sizeof s->h64);
+ s->seeded = 1;
+ }
+
+ _sha512(p, len, s);
+
+ if(digest == 0)
+ return s;
+
+ sha512finish(s, digest, smaller);
+ if(s->malloced == 1)
+ free(s);
+ return nil;
+}
+
+SHA512state*
+sha384(uchar *p, ulong len, uchar *digest, SHA512state *s)
+{
+ return sha512x(p, len, digest, s, 1);
+}
+
+SHA512state*
+sha512(uchar *p, ulong len, uchar *digest, SHA512state *s)
+{
+ return sha512x(p, len, digest, s, 0);
+}
diff --git a/libsec/port/sha256block.c b/libsec/port/sha256block.c
new file mode 100644
index 00000000..f99f1b67
--- /dev/null
+++ b/libsec/port/sha256block.c
@@ -0,0 +1,81 @@
+#include "os.h"
+#include <libsec.h>
+
+enum {
+ SHA256rounds = 64,
+};
+
+u32int sha256const[] = {
+0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
+};
+
+
+#define CH(x,y,z) ((x&y) ^ (~x&z))
+#define MAJ(x,y,z) ((x&y) ^ (x&z) ^ (y&z))
+#define ROTR32(n, v) ((v>>n) | (v<<(32-n)))
+#define ROTR64(n, v) ((v>>n) | (v<<(64-n)))
+#define SHR(n, x) (x>>n)
+
+#define SIGMA0a(x) (ROTR32(2, x)^ROTR32(13, x)^ROTR32(22, x))
+#define SIGMA1a(x) (ROTR32(6, x)^ROTR32(11, x)^ROTR32(25, x))
+#define sigma0a(x) (ROTR32(7, x)^ROTR32(18, x)^SHR(3, x))
+#define sigma1a(x) (ROTR32(17, x)^ROTR32(19, x)^SHR(10, x))
+
+/* for use in _sha*() */
+#define A v[0]
+#define B v[1]
+#define C v[2]
+#define D v[3]
+#define E v[4]
+#define F v[5]
+#define G v[6]
+#define H v[7]
+
+static u32int
+g32(uchar *p)
+{
+ return p[0]<<24|p[1]<<16|p[2]<<8|p[3]<<0;
+}
+
+void
+_sha256block(SHA256state *s, uchar *buf)
+{
+ u32int w[2*SHA256bsize/4];
+ int i, t;
+ u32int t1, t2;
+ u32int v[8];
+
+ for(t = 0; t < nelem(w)/2; t++) {
+ if(t < 16) {
+ w[t] = g32(buf);
+ buf += 4;
+ }
+ }
+
+ memmove(v, s->h32, sizeof s->h32);
+
+ for(t = 0; t < SHA256rounds; t++) {
+ if(t >= 16)
+ w[t&31] = sigma1a(w[(t-2)&31]) + w[(t-7)&31] + sigma0a(w[(t-15)&31]) + w[(t-16)&31];
+ t1 = H + SIGMA1a(E) + CH(E,F,G) + sha256const[t] + w[t&31];
+ t2 = SIGMA0a(A) + MAJ(A,B,C);
+ H = G;
+ G = F;
+ F = E;
+ E = D+t1;
+ D = C;
+ C = B;
+ B = A;
+ A = t1+t2;
+ }
+
+ for(i = 0; i < nelem(v); i++)
+ s->h32[i] += v[i];
+}
diff --git a/libsec/port/sha512block.c b/libsec/port/sha512block.c
new file mode 100644
index 00000000..bc3f4cfd
--- /dev/null
+++ b/libsec/port/sha512block.c
@@ -0,0 +1,114 @@
+#include "os.h"
+#include <libsec.h>
+
+
+enum {
+ SHA512rounds = 80,
+};
+
+u64int sha512const[] = {
+0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL,
+};
+
+
+static u32int
+g32(uchar *p)
+{
+ return p[0]<<24|p[1]<<16|p[2]<<8|p[3]<<0;
+}
+
+static u64int
+g64(uchar *p)
+{
+ return ((u64int)g32(p)<<32)|g32(p+4);
+}
+
+
+#define CH(x,y,z) ((x&y) ^ (~x&z))
+#define MAJ(x,y,z) ((x&y) ^ (x&z) ^ (y&z))
+#define ROTR32(n, v) ((v>>n) | (v<<(32-n)))
+#define ROTR64(n, v) ((v>>n) | (v<<(64-n)))
+#define SHR(n, x) (x>>n)
+
+#define SIGMA0b(x) (ROTR64(28, x)^ROTR64(34, x)^ROTR64(39, x))
+#define SIGMA1b(x) (ROTR64(14, x)^ROTR64(18, x)^ROTR64(41, x))
+#define sigma0b(x) (ROTR64(1, x)^ROTR64(8, x)^SHR(7, x))
+#define sigma1b(x) (ROTR64(19, x)^ROTR64(61, x)^SHR(6, x))
+
+#define A v[0]
+#define B v[1]
+#define C v[2]
+#define D v[3]
+#define E v[4]
+#define F v[5]
+#define G v[6]
+#define H v[7]
+
+void
+_sha512block(SHA512state *s, uchar *buf)
+{
+ u64int w[2*SHA512bsize/8];
+ int i, t;
+ u64int t1, t2, t3, v[8];
+
+ for(t = 0; t < nelem(w)/2; t++) {
+ if(t < 16) {
+ w[t] = g64(buf);
+ buf += 8;
+ }
+ }
+
+ memmove(v, s->h64, sizeof s->h64);
+
+ for(t = 0; t < SHA512rounds; t++) {
+ if(t >= 16) {
+ /* w[t&31] = sigma1b(w[(t-2)&31]) + w[(t-7)&31] + sigma0b(w[(t-15)&31]) + w[(t-16)&31]; */
+ t2 = w[(t-2)&31];
+ t3 = w[(t-15)&31];
+ /* w[t&31] = sigma1b(t2) + w[(t-7)&31] + sigma0b(t3) + w[(t-16)&31]; */
+ t1 = sigma1b(t2);
+ t1 += w[(t-7)&31];
+ t1 += sigma0b(t3);
+ t1 += w[(t-16)&31];
+ w[t&31] = t1;
+ }
+ /* t1 = H + SIGMA1b(E) + CH(E,F,G) + sha512const[t] + w[t&31]; */
+ t1 = H;
+ t1 += SIGMA1b(E);
+ t1 += CH(E, F, G);
+ t1 += sha512const[t] + w[t&31];
+ /* t2 = SIGMA0b(A) + MAJ(A,B,C); */
+ t2 = SIGMA0b(A);
+ t2 += MAJ(A, B, C);
+ H = G;
+ G = F;
+ F = E;
+ E = D+t1;
+ D = C;
+ C = B;
+ B = A;
+ A = t1+t2;
+ }
+
+ for(i = 0; i < nelem(v); i++)
+ s->h64[i] += v[i];
+}