Hand Written Digit Recognition





   ##################
   ##################
            ######
            ######
            ######
            ######
            ######
            ######
            ###
            ###
         ######
         ######
    


         #########
         #########
      ###############
      ###############
      ###         ###
      ###         ###
   ######         ###
   ######         ###
   ######         ###
   ######         ###
      ######   ###
      ######   ###
         ###
         ###
    
The digits to the left are 8 x 8. They originally came from the NIST dataset (http://yann.lecun.com/exdb/mnist/).

Can you identify them as 7 and 0? I only got 87% accuracy myself. The Linear Machine method converges much more quickly than the Neural Network.

src/lab3.c - Hand written digit recognition agent

   1: #include <stdio.h>
   2: #include <stdlib.h>
   3: #include <string.h>
   4: 
   5: #include "../inc/split_stdin.h"
   6: #include "../inc/linearMachine.h"
   7: #include "../inc/backProp.h"
   8: 
   9: #define WIDTH 8
  10: #define HEIGHT 8
  11: 
  12: #define DEBUG 0
  13: 
  14: double **sample;	// 0 or 1
  15: double **blurred;	// 0.0 .. 1.0
  16: 
  17: int num_fields;
  18: char **response;
  19: 
  20: int main(int argc, char *argv[]) {
  21:     int arg;
  22:     int doBP = 0;
  23:     int blur = 0;
  24:     int trainAll = 0;
  25:     int seq;
  26:     int i, j;
  27:     int correct;
  28:     int guess;
  29:     int actual;
  30:     int maxIterations = 100;
  31:     double eta = 0.3;
  32:     double minConf = 0.0;
  33:     double confidence;
  34: 
  35:     linearMachine *lm = NULL;
  36:     backProp *bp = NULL;
  37: 
  38:     for (arg = 1; arg < argc; arg++) {
  39: 	// How do I know when to stop??
  40: 	if (arg < argc-1 && strcmp(argv[arg], "-iter") == 0) {
  41: 	    maxIterations = atoi(argv[arg+1]);
  42: 	    arg++;	// Yeah, yeah, I know
  43: 	}
  44: 	else if (arg < argc-1 && strcmp(argv[arg], "-minConf") == 0) {
  45: 	    minConf = atof(argv[arg+1]);
  46: 	    arg++;
  47: 	}
  48: 	else if (arg < argc-1 && strcmp(argv[arg], "-eta") == 0) {
  49: 	    eta = atof(argv[arg+1]);
  50: 	    arg++;
  51: 	}
  52: 	else if (strcmp(argv[arg], "-blur") == 0) {
  53: 	    blur = 1;
  54: 	}
  55: 	else if (strcmp(argv[arg], "-trainAll") == 0) {
  56: 	    trainAll = 1;
  57: 	}
  58: 	else if (strcmp(argv[arg], "-bp") == 0) {
  59: 	    doBP = 1;
  60: 	}
  61: 	else {
  62: 	    fprintf(stderr, "\nUsage: %s -bp -trainAll -blur -eta 0.3 -minConf 0.3 -iter n\n", argv[0]);
  63: 	    exit(0);
  64: 	}
  65:     }
  66: 
  67:     // Allocate space for character grid
  68:     sample = (double **) calloc(WIDTH, sizeof(double *));
  69:     for (i = 0; i < WIDTH; i++) {
  70: 	sample[i] = (double *) calloc(HEIGHT, sizeof(double));
  71:     }
  72: 
  73:     if (blur) {
  74: 	blurred = (double **) calloc(WIDTH, sizeof(double *));
  75: 	for (i = 0; i < WIDTH; i++) {
  76: 	    blurred[i] = (double *) calloc(HEIGHT, sizeof(double));
  77: 	}
  78:     }
  79:     
  80:     // Initialize learner
  81:     if (!doBP) lm = createLM(WIDTH, HEIGHT, 10);
  82:     if (doBP) bp = createBP(WIDTH, HEIGHT, 10, eta);
  83: 
  84:     // Get a pattern and learn from it
  85:     seq = 0;
  86:     while (1) {
  87: 	seq++;
  88: 	response = split_stdin(" ", &num_fields);
  89: 	if (DEBUG) fprintf(stderr, "fields = %d\n", num_fields);
  90: 	if (num_fields != WIDTH * HEIGHT) {
  91: 	    fprintf(stderr, "*** Incorrect size %d, should be %d x %d\n",
  92: 		    num_fields, WIDTH, HEIGHT);
  93: 	    break;
  94: 	}
  95: 
  96: 	// Extract pixels
  97: 	for (i = 0; i < WIDTH; i++) {
  98: 	    for (j = 0; j < HEIGHT; j++) {
  99: 		sample[i][j] = response[HEIGHT*i + j][0] - '0';
 100: 		if (DEBUG) fprintf(stderr, "%.1f ", sample[i][j]);
 101: 	    }
 102: 	    if (DEBUG) fprintf(stderr, "\n");
 103: 	}
 104: 	free_split_stdin(response, num_fields);
 105: 
 106: 	// Blur image?
 107: 	if (blur) {
 108: 	    for (i = 0; i < WIDTH; i++) {
 109: 		for (j = 0; j < HEIGHT; j++) {
 110: 		    blurred[i][j] = sample[i][j] / 2;
 111: 		    if (i > 0) blurred[i][j] += sample[i-1][j] / 8;
 112: 		    if (i < WIDTH-1) blurred[i][j] += sample[i+1][j] / 8;
 113: 		    if (j > 0) blurred[i][j] += sample[i][j-1] / 8;
 114: 		    if (j < HEIGHT-1) blurred[i][j] += sample[i][j+1] / 8;
 115: 		}
 116: 	    }
 117: 	}
 118: 
 119: 	// Send prediction over to agent
 120: 	if (blur) {
 121: 	    if (lm != NULL) guess = predictLM(lm, blurred, &confidence);
 122: 	    if (bp != NULL) guess = predictBP(bp, blurred, &confidence);
 123: 	}
 124: 	else {
 125: 	    if (lm != NULL) guess = predictLM(lm, sample, &confidence);
 126: 	    if (bp != NULL) guess = predictBP(bp, sample, &confidence);
 127: 	}
 128: 	printf("%d\n", guess);
 129: 	fflush(stdout);
 130: 	if (DEBUG) fprintf(stderr, "Guess: %d\n", guess);
 131: 
 132: 	// Well, did we get it right?
 133: 	response = split_stdin(" ", &num_fields);
 134: 	if (num_fields != 10) {
 135: 	    fprintf(stderr, "*** Incorrect answer size %d, should be 10\n",
 136: 		    num_fields);
 137: 	    break;
 138: 	}
 139: 	correct = strcmp(response[0], "correct") == 0;
 140: 	actual = response[3][0] - '0';
 141: 	if (DEBUG) fprintf(stderr, "fields = %d\n", num_fields);
 142: 	fprintf(stderr, "Guess #%d is %d [%.2f]: ", seq, guess, confidence);
 143: 	for (i = 0; i < num_fields; i++) {
 144: 	    fprintf(stderr, "%s ", response[i]);
 145: 	}
 146: 	fprintf(stderr, "\n");
 147: 
 148: 	// Adjust weights if wrong
 149: 	if (trainAll || !correct) {
 150: 	    if (lm != NULL) adjustWeightsLM(lm, sample, actual);
 151: 	    if (bp != NULL) adjustWeightsBP(bp, sample, actual);
 152: 	}
 153: 
 154: 	if (seq >= maxIterations) break;
 155:     }
 156: 
 157:     exit(0);
 158: }
 159: 

src/linearMachine.c - Linear Machine Solver

   1: #include <stdio.h>
   2: #include <stdlib.h>
   3: 
   4: #include "../inc/linearMachine.h"
   5: 
   6: linearMachine *createLM(int w, int h, int c) {
   7:     int i, j;
   8: 
   9:     linearMachine *lm = (linearMachine *) calloc(1, sizeof(linearMachine));
  10:     lm->width = w;
  11:     lm->height = h;
  12:     lm->classes = c;
  13: 
  14:     lm->bias = (double *) calloc(c, sizeof(double));
  15:     lm->h = (double *) calloc(c, sizeof(double));
  16: 
  17:     lm->weight = (double ***) calloc(w, sizeof(double **));
  18:     for (i = 0; i < w; i++) {
  19: 	lm->weight[i] = (double **) calloc(h, sizeof(double *));
  20: 	for (j = 0; j < h; j++) {
  21: 	    lm->weight[i][j] = (double *) calloc(c, sizeof(double));
  22: 	}
  23:     }
  24: 
  25:     return lm;
  26: }
  27: 
  28: int predictLM(linearMachine *lm, double **sample, double *confidence) {
  29:     int maxClass;
  30:     double maxSum;
  31:     double nextMaxSum;
  32:     double sum;
  33:     int c;
  34:     int i, j;
  35: 
  36:     for (c = 0; c < lm->classes; c++) {
  37: 	sum = lm->bias[c];
  38: 	for (i = 0; i < lm->width; i++) {
  39: 	    for (j = 0; j < lm->height; j++) {
  40: 		sum += lm->weight[i][j][c] * sample[i][j];
  41: 	    }
  42: 	}
  43: 	lm->h[c] = sum;
  44: 
  45: 	if (c == 0 || sum > maxSum) {
  46: 	    maxSum = sum;
  47: 	    maxClass = c;
  48: 	}
  49:     }
  50: 
  51:     // Find second largest number
  52:     nextMaxSum = -1.0;
  53:     for (c = 0; c < lm->classes; c++) {
  54: 	if (c != maxClass) {
  55: 	    if (nextMaxSum < 0.0 || lm->h[c] > nextMaxSum) {
  56: 		nextMaxSum = lm->h[c];
  57: 	    }
  58: 	}
  59:     }
  60:     *confidence = maxSum - nextMaxSum;
  61: 
  62:     return maxClass;
  63: }
  64: 
  65: void adjustWeightsLM(linearMachine *lm, double **sample, int actual) {
  66:     int i, j;
  67:     int c;
  68:     double alpha;
  69: 
  70:     alpha = 1.0 / (lm->width * lm->height * 10.0);
  71: 
  72:     lm->bias[actual] += alpha;
  73:     for (i = 0; i < lm->width; i++) {
  74: 	for (j = 0; j < lm->height; j++) {
  75: 	    lm->weight[i][j][actual] += alpha * sample[i][j];
  76: 	}
  77:     }
  78: 
  79:     for (c = 0; c < lm->classes; c++) {
  80: 	if (c != actual && lm->h[actual] - lm->h[c] < 1) {
  81: 	    lm->bias[c] -= alpha;
  82: 	    for (i = 0; i < lm->width; i++) {
  83: 		for (j = 0; j < lm->height; j++) {
  84: 		    lm->weight[i][j][c] -= alpha * sample[i][j];
  85: 		}
  86: 	    }
  87: 	}
  88:     }
  89: }
  90: 

src/backProp.c - Backpropagation Neural Net

   1: #include <stdio.h>
   2: #include <math.h>
   3: #include <stdlib.h>
   4: #include <string.h>
   5: #include <time.h>
   6: #include <limits.h>
   7: 
   8: #include "../inc/backProp.h"
   9: 
  10: #define DEBUG 0
  11: 
  12: void init_rand() {
  13:     srand((int)time(NULL));
  14: }
  15: 
  16: // Random initial weight, from -1 to 1
  17: double randWeight() {
  18:     return ((2.0 * rand()) / INT_MAX - 1.0);
  19: }
  20: 
  21: backProp *createBP(int w, int h, int c, double eta) {
  22:     int i, j, k;
  23: 
  24:     // Strange place to do this ...
  25:     init_rand();
  26: 
  27:     backProp *bp = (backProp *) calloc(1, sizeof(backProp));
  28:     bp->width = w;
  29:     bp->height = h;
  30:     bp->inputs = w * h;
  31:     bp->hiddens = w + h;	// I guess 16 hiddens is ok
  32:     bp->classes = c;
  33:     bp->eta = eta;
  34: 
  35:     bp->weightBottom = (double ***) calloc(bp->width, sizeof(double **));
  36:     for (i = 0; i < bp->width; i++) {
  37: 	bp->weightBottom[i] = (double **) calloc(bp->height, sizeof(double *));
  38: 	for (j = 0; j < bp->height; j++) {
  39: 	    bp->weightBottom[i][j] = (double *) calloc(bp->hiddens, sizeof(double));
  40: 	    for (k = 0; k < bp->hiddens; k++) {
  41: 		bp->weightBottom[i][j][k] = randWeight();
  42: 	    }
  43: 	}
  44:     }
  45: 
  46:     bp->biasBottom = (double **) calloc(bp->hiddens, sizeof(double *));
  47:     bp->biasTop = (double *) calloc(bp->classes, sizeof(double));
  48:     bp->weightTop = (double **) calloc(bp->hiddens, sizeof(double *));
  49:     for (i = 0; i < bp->hiddens; i++) {
  50: 	bp->weightTop[i] = (double *) calloc(bp->classes, sizeof(double));
  51: 	for (k = 0; k < bp->classes; k++) {
  52: 	    bp->weightTop[i][k] = randWeight();
  53: 	}
  54: 
  55: 	bp->biasBottom[i] = (double *) calloc(bp->inputs, sizeof(double));
  56: 	for (k = 0; k < bp->inputs; k++) {
  57: 	    bp->biasBottom[i][k] = randWeight();
  58: 	}
  59:     }
  60:     for (k = 0; k < bp->classes; k++) {
  61: 	bp->biasTop[k] = randWeight();
  62: 	if (DEBUG) fprintf(stderr, "w[%d] = %.1f\n", k, bp->biasTop[k]);
  63:     }
  64: 
  65:     bp->hidden = (double *) calloc(bp->hiddens, sizeof(double));
  66:     bp->output = (double *) calloc(bp->classes, sizeof(double));
  67: 
  68:     return bp;
  69: }
  70: 
  71: int predictBP(backProp *bp, double **sample, double *confidence) {
  72:     int i, j, k;
  73:     double sum;
  74:     double nextMaxSum;
  75: 
  76:     // Calculate hidden values
  77:     if (DEBUG) fprintf(stderr, "Hiddens: ");
  78:     for (k = 0; k < bp->hiddens; k++) {
  79: 	sum = 0.0;
  80: 	for (i = 0; i < bp->width; i++) {
  81: 	    for (j = 0; j < bp->height; j++) {
  82:                 sum += bp->weightBottom[i][j][k] * sample[i][j];
  83: 	    }
  84: 	    sum += bp->biasBottom[i][j];
  85: 	}
  86: 	bp->hidden[k] = 1.0 / (1.0 + exp(-sum));
  87: 	if (DEBUG) fprintf(stderr, " %.1f", bp->hidden[k]);
  88:     }
  89:     if (DEBUG) fprintf(stderr, "\n");
  90: 
  91:     // Calculate output values
  92:     if (DEBUG) fprintf(stderr, "Outputs: ");
  93:     for (k = 0; k < bp->classes; k++) {
  94: 	sum = 0.0;
  95: 	for (i = 0; i < bp->hiddens; i++) {
  96: 	    sum += bp->weightTop[i][k] * bp->hidden[i];
  97: 	}
  98: 	sum += bp->biasTop[k];
  99: 	bp->output[k] = 1.0 / (1.0 + exp(-sum));
 100: 	if (DEBUG) fprintf(stderr, " %.1f", bp->output[k]);
 101:     }
 102:     if (DEBUG) fprintf(stderr, "\n");
 103:     if (DEBUG) fflush(stderr);
 104:     
 105:     // Find highest output activation (class = i)
 106:     for (k = 0; k < bp->classes; k++) {
 107: 	if (k == 0 || bp->output[k] > bp->output[i]) {
 108: 	    i = k;
 109: 	}
 110:     }
 111: 
 112:     // Find second largest number
 113:     nextMaxSum = -1.0;
 114:     for (k = 0; k < bp->classes; k++) {
 115: 	if (k != i) {
 116: 	    if (nextMaxSum < 0.0 || bp->output[k] > nextMaxSum) {
 117: 		nextMaxSum = bp->output[k];
 118: 	    }
 119: 	}
 120:     }
 121:     *confidence = bp->output[i] - nextMaxSum;
 122: 
 123:     return i;
 124: }
 125: 
 126: 
 127: void adjustWeightsBP(backProp *bp, double **sample, int actual) {
 128:     int i, j, k;
 129:     double sum;
 130:     double delta;
 131: 
 132:     // Propagate the error backwards
 133:     for (k = 0; k < bp->classes; k++) {
 134: 	sum = (k == actual) ? 1.0 : 0.0;	// 1 if correct, else 0
 135: 	sum -= bp->output[k];			// Predicted
 136: 
 137: 	// Take derivative
 138: 	delta = sum * bp->output[k] * (1 - bp->output[k]);
 139: 
 140: 	// Update weights from hiddens to outputs
 141: 	for (i = 0; i < bp->hiddens; i++) {
 142: 	    bp->weightTop[i][k] += bp->eta * delta * bp->hidden[i];
 143: 	}
 144: 
 145: 	// Update bias from hiddens to outputs
 146: 	bp->biasTop[k] += bp->eta * delta;
 147: 	
 148: 	// Update weights from inputs to hiddens
 149: 	for (i = 0; i < bp->width; i++) {
 150: 	    for (j = 0; j < bp->width; j++) {
 151: 		bp->weightBottom[i][j][k] += bp->eta * delta * sample[i][j];
 152: 	    }
 153: 	}
 154: 
 155: 	// Update bias from inputs to hiddens
 156: 	for (i = 0; i < bp->hiddens; i++) {
 157: 	    bp->biasBottom[i][k] += bp->eta * delta;
 158: 	}
 159:     }
 160: }
 161: 
 162: 
 163: //            // Now propagate the error backwards
 164: //            for (node = num_nodes - dobias - 1; node >= num_input_nodes; node--) {
 165: //                output = nodes[node].value;
 166: //                if (nodes[node].high_output_arc == 0) {     // Means output unit
 167: //                    sum = train_out[training][node - num_lower_nodes] - output;
 168: //                }
 169: //                else {  // Must be a hidden unit
 170: //                    sum = 0.0;
 171: //                    for (arc = nodes[node].low_output_arc; arc < nodes[node].high_output_arc; arc++) {
 172: //                        if (arcs[arc].from_node == node) {
 173: //                            sum += arcs[arc].weight * delta[arcs[arc].to_node];
 174: //                        }
 175: //                    }
 176: //                }
 177: //                delta[node] = sum * output * (1.0 - output);     // Derivative
 178: //                for (arc = nodes[node].low_input_arc; arc < nodes[node].high_input_arc; arc++) {
 179: //                    if (arcs[arc].to_node == node) {
 180: //                        arcs[arc].weight += bp->eta * delta[node] * nodes[arcs[arc].from_node].value;
 181: //                    }
 182: //                }
 183: //            }
 184: //
 185: //            if (dobias) {
 186: //                for (arc = nodes[num_nodes-1].low_output_arc; arc < nodes[num_nodes-1].high_output_arc; arc++) {
 187: //                    arcs[arc].weight += bp->eta * delta[arcs[arc].to_node];
 188: //                }
 189: //            }
 190: //        }
 191: //    }
 192: //}
 193: 

src/split_stdin.c - Line reader

   1: #include <stdio.h>
   2: #include <stdlib.h>
   3: #include <string.h>
   4: 
   5: #include "../inc/split_stdin.h"
   6: 
   7: /* splits a line from stdin into several strings.
   8: * A line is assumed to consist of several fields separated by
   9: * characters from separators.
  10: * num_fields is set to the number of strings.
  11: * num_fields is -1 if EOF before any strings are found.
  12: */
  13: 
  14: char **split_stdin(char *separators, int *num_fields) {
  15:     int c, nfields, nchars, clen, flen;
  16:     char *current;
  17:     char **fields;
  18:     current = (char *) calloc(10, sizeof(char));
  19:     clen = 10;
  20:     nchars = 0;
  21:     fields = (char **) calloc(10, sizeof(char *));
  22:     flen = 10;
  23:     nfields = 0;
  24:     do {
  25:         c = getc(stdin);
  26:         if (NULL == strchr(separators,c) && '\n' != c && EOF != c) {
  27:             if (nchars == clen) {
  28:                 clen += clen;
  29:                 current = (char *) realloc(current, clen * sizeof(char));
  30:             }
  31:             current[nchars] = c;
  32:             nchars++;
  33:         }
  34:         else if (nchars > 0) {
  35:             current = (char *) realloc(current, (nchars+1) * sizeof(char));
  36:             current[nchars] = '\0';
  37:             if (nfields == flen) {
  38:                 flen += flen;
  39:                 fields = (char **) realloc(fields, flen * sizeof(char *));
  40:             }
  41:             fields[nfields] = current;
  42:             nfields++;
  43:             current = (char *) calloc(10, sizeof(char));
  44:             clen = 10;
  45:             nchars = 0;
  46:         }
  47:     }
  48:     while ('\n' != c && EOF != c);
  49: 
  50:     free(current);
  51:     fields = (char **) realloc(fields, nfields * sizeof(char *));
  52:     if (EOF == c && nfields == 0) nfields = -1;
  53:     *num_fields = nfields;
  54:     return fields;
  55: }
  56: 
  57: /* Free up memory allocated by by split_stdin.
  58: * response is the data returned by a call to split_stdin.
  59: * num_fields is the number of strings in response.
  60: */
  61: 
  62: void free_split_stdin(char **response, int num_fields) {
  63:     int i;
  64:     for (i = 0; i < num_fields; i++) free(response[i]);
  65:     free(response);
  66: }
  67: 

lab3_brief.out - Training and testing performance

   1: Guess #1 is 0 [0.00]: incorrect (label is 7, error rate = 1/1 = 100%) 
   2: Guess #2 is 7 [0.02]: incorrect (label is 1, error rate = 2/2 = 100%) 
   3: Guess #3 is 7 [0.03]: incorrect (label is 5, error rate = 3/3 = 100%) 
   4: Guess #4 is 7 [0.03]: incorrect (label is 6, error rate = 4/4 = 100%) 
   5: Guess #5 is 6 [0.03]: incorrect (label is 4, error rate = 5/5 = 100%) 
   6: Guess #6 is 5 [0.03]: incorrect (label is 3, error rate = 6/6 = 100%) 
   7: Guess #7 is 3 [0.03]: incorrect (label is 5, error rate = 7/7 = 100%) 
   8: Guess #8 is 5 [0.04]: incorrect (label is 6, error rate = 8/8 = 100%) 
   9: Guess #9 is 6 [0.04]: incorrect (label is 3, error rate = 9/9 = 100%) 
  10: Guess #10 is 5 [0.04]: incorrect (label is 7, error rate = 10/10 = 100%) 
  11: Guess #11 is 6 [0.06]: incorrect (label is 8, error rate = 11/11 = 100%) 
  12: Guess #12 is 5 [0.05]: incorrect (label is 0, error rate = 12/12 = 100%) 
  13: Guess #13 is 5 [0.06]: incorrect (label is 6, error rate = 13/13 = 100%) 
  14: Guess #14 is 6 [0.07]: incorrect (label is 9, error rate = 14/14 = 100%) 
  15: Guess #15 is 6 [0.03]: incorrect (label is 4, error rate = 15/15 = 100%) 
  16: Guess #16 is 6 [0.06]: incorrect (label is 8, error rate = 16/16 = 100%) 
  17: Guess #17 is 4 [0.03]: correct (label is 4, error rate = 16/17 = 94.1176%) 
  18: Guess #18 is 6 [0.08]: incorrect (label is 2, error rate = 17/18 = 94.4444%) 
  19: Guess #19 is 6 [0.07]: incorrect (label is 2, error rate = 18/19 = 94.7368%) 
  20: Guess #20 is 6 [0.06]: incorrect (label is 3, error rate = 19/20 = 95%) 
  21: Guess #21 is 6 [0.05]: incorrect (label is 1, error rate = 20/21 = 95.2381%) 
  22: Guess #22 is 6 [0.04]: incorrect (label is 9, error rate = 21/22 = 95.4545%) 
  23: Guess #23 is 4 [0.02]: correct (label is 4, error rate = 21/23 = 91.3043%) 
  24: Guess #24 is 3 [0.02]: incorrect (label is 5, error rate = 22/24 = 91.6667%) 
  25: Guess #25 is 5 [0.04]: incorrect (label is 0, error rate = 23/25 = 92%) 
  26: Guess #26 is 6 [0.02]: incorrect (label is 9, error rate = 24/26 = 92.3077%) 
  27: Guess #27 is 6 [0.03]: correct (label is 6, error rate = 24/27 = 88.8889%) 
  28: Guess #28 is 9 [0.03]: incorrect (label is 0, error rate = 25/28 = 89.2857%) 
  29: Guess #29 is 0 [0.02]: incorrect (label is 7, error rate = 26/29 = 89.6552%) 
  30: Guess #30 is 9 [0.04]: incorrect (label is 4, error rate = 27/30 = 90%) 
  31: Guess #31 is 0 [0.05]: correct (label is 0, error rate = 27/31 = 87.0968%) 
  32: Guess #32 is 7 [0.02]: correct (label is 7, error rate = 27/32 = 84.375%) 
  33: Guess #33 is 7 [0.00]: incorrect (label is 9, error rate = 28/33 = 84.8485%) 
  34: Guess #34 is 6 [0.01]: incorrect (label is 0, error rate = 29/34 = 85.2941%) 
  35: Guess #35 is 0 [0.04]: incorrect (label is 6, error rate = 30/35 = 85.7143%) 
  36: Guess #36 is 9 [0.08]: correct (label is 9, error rate = 30/36 = 83.3333%) 
  37: Guess #37 is 7 [0.00]: correct (label is 7, error rate = 30/37 = 81.0811%) 
  38: Guess #38 is 6 [0.05]: incorrect (label is 0, error rate = 31/38 = 81.5789%) 
  39: Guess #39 is 0 [0.02]: incorrect (label is 8, error rate = 32/39 = 82.0513%) 
  40: Guess #40 is 6 [0.05]: correct (label is 6, error rate = 32/40 = 80%) 
  41: Guess #41 is 0 [0.02]: incorrect (label is 4, error rate = 33/41 = 80.4878%) 
  42: Guess #42 is 0 [0.08]: incorrect (label is 3, error rate = 34/42 = 80.9524%) 
  43: Guess #43 is 0 [0.08]: correct (label is 0, error rate = 34/43 = 79.0698%) 
  44: Guess #44 is 0 [0.02]: incorrect (label is 3, error rate = 35/44 = 79.5455%) 
  45: Guess #45 is 0 [0.05]: incorrect (label is 3, error rate = 36/45 = 80%) 
  46: Guess #46 is 3 [0.04]: incorrect (label is 4, error rate = 37/46 = 80.4348%) 
  47: Guess #47 is 4 [0.03]: correct (label is 4, error rate = 37/47 = 78.7234%) 
  48: Guess #48 is 3 [0.09]: incorrect (label is 5, error rate = 38/48 = 79.1667%) 
  49: Guess #49 is 0 [0.10]: correct (label is 0, error rate = 38/49 = 77.551%) 
  50: Guess #50 is 3 [0.07]: incorrect (label is 6, error rate = 39/50 = 78%) 
  51: 
  52: <59,900 lines deleted>
  53: 
  54: Guess #59951 is 0 [0.21]: correct (label is 0, error rate = 11986/59951 = 19.993%) 
  55: Guess #59952 is 8 [1.26]: correct (label is 8, error rate = 11986/59952 = 19.9927%) 
  56: Guess #59953 is 3 [1.53]: correct (label is 3, error rate = 11986/59953 = 19.9923%) 
  57: Guess #59954 is 7 [0.97]: correct (label is 7, error rate = 11986/59954 = 19.992%) 
  58: Guess #59955 is 3 [0.43]: correct (label is 3, error rate = 11986/59955 = 19.9917%) 
  59: Guess #59956 is 2 [1.10]: correct (label is 2, error rate = 11986/59956 = 19.9913%) 
  60: Guess #59957 is 4 [0.22]: correct (label is 4, error rate = 11986/59957 = 19.991%) 
  61: Guess #59958 is 9 [1.48]: incorrect (label is 4, error rate = 11987/59958 = 19.9923%) 
  62: Guess #59959 is 2 [2.67]: correct (label is 2, error rate = 11987/59959 = 19.992%) 
  63: Guess #59960 is 0 [1.57]: correct (label is 0, error rate = 11987/59960 = 19.9917%) 
  64: Guess #59961 is 4 [0.32]: correct (label is 4, error rate = 11987/59961 = 19.9913%) 
  65: Guess #59962 is 8 [1.55]: correct (label is 8, error rate = 11987/59962 = 19.991%) 
  66: Guess #59963 is 8 [0.64]: correct (label is 8, error rate = 11987/59963 = 19.9907%) 
  67: Guess #59964 is 1 [0.77]: incorrect (label is 5, error rate = 11988/59964 = 19.992%) 
  68: Guess #59965 is 7 [0.93]: correct (label is 7, error rate = 11988/59965 = 19.9917%) 
  69: Guess #59966 is 7 [0.83]: correct (label is 7, error rate = 11988/59966 = 19.9913%) 
  70: Guess #59967 is 3 [0.79]: correct (label is 3, error rate = 11988/59967 = 19.991%) 
  71: Guess #59968 is 2 [0.35]: incorrect (label is 5, error rate = 11989/59968 = 19.9923%) 
  72: Guess #59969 is 2 [1.55]: correct (label is 2, error rate = 11989/59969 = 19.992%) 
  73: Guess #59970 is 3 [0.94]: correct (label is 3, error rate = 11989/59970 = 19.9917%) 
  74: Guess #59971 is 3 [0.58]: correct (label is 3, error rate = 11989/59971 = 19.9913%) 
  75: Guess #59972 is 4 [0.59]: correct (label is 4, error rate = 11989/59972 = 19.991%) 
  76: Guess #59973 is 5 [0.57]: incorrect (label is 3, error rate = 11990/59973 = 19.9923%) 
  77: Guess #59974 is 6 [1.21]: correct (label is 6, error rate = 11990/59974 = 19.992%) 
  78: Guess #59975 is 9 [0.55]: correct (label is 9, error rate = 11990/59975 = 19.9917%) 
  79: Guess #59976 is 8 [1.01]: correct (label is 8, error rate = 11990/59976 = 19.9913%) 
  80: Guess #59977 is 7 [0.14]: incorrect (label is 9, error rate = 11991/59977 = 19.9927%) 
  81: Guess #59978 is 0 [1.52]: correct (label is 0, error rate = 11991/59978 = 19.9923%) 
  82: Guess #59979 is 0 [1.01]: correct (label is 0, error rate = 11991/59979 = 19.992%) 
  83: Guess #59980 is 5 [1.12]: correct (label is 5, error rate = 11991/59980 = 19.9917%) 
  84: Guess #59981 is 4 [0.66]: correct (label is 4, error rate = 11991/59981 = 19.9913%) 
  85: Guess #59982 is 4 [0.31]: correct (label is 4, error rate = 11991/59982 = 19.991%) 
  86: Guess #59983 is 2 [1.54]: correct (label is 2, error rate = 11991/59983 = 19.9907%) 
  87: Guess #59984 is 9 [0.56]: correct (label is 9, error rate = 11991/59984 = 19.9903%) 
  88: Guess #59985 is 9 [0.80]: correct (label is 9, error rate = 11991/59985 = 19.99%) 
  89: Guess #59986 is 9 [1.23]: correct (label is 9, error rate = 11991/59986 = 19.9897%) 
  90: Guess #59987 is 4 [0.43]: correct (label is 4, error rate = 11991/59987 = 19.9893%) 
  91: Guess #59988 is 1 [1.25]: correct (label is 1, error rate = 11991/59988 = 19.989%) 
  92: Guess #59989 is 1 [0.82]: correct (label is 1, error rate = 11991/59989 = 19.9887%) 
  93: Guess #59990 is 4 [0.58]: correct (label is 4, error rate = 11991/59990 = 19.9883%) 
  94: Guess #59991 is 2 [1.42]: correct (label is 2, error rate = 11991/59991 = 19.988%) 
  95: Guess #59992 is 2 [1.55]: correct (label is 2, error rate = 11991/59992 = 19.9877%) 
  96: Guess #59993 is 3 [1.60]: correct (label is 3, error rate = 11991/59993 = 19.9873%) 
  97: Guess #59994 is 2 [0.93]: correct (label is 2, error rate = 11991/59994 = 19.987%) 
  98: Guess #59995 is 6 [1.13]: correct (label is 6, error rate = 11991/59995 = 19.9867%) 
  99: Guess #59996 is 3 [1.26]: correct (label is 3, error rate = 11991/59996 = 19.9863%) 
 100: Guess #59997 is 5 [1.53]: correct (label is 5, error rate = 11991/59997 = 19.986%) 
 101: Guess #59998 is 1 [0.91]: correct (label is 1, error rate = 11991/59998 = 19.9857%) 
 102: Guess #59999 is 8 [0.61]: correct (label is 8, error rate = 11991/59999 = 19.9853%) 
 103: Guess #60000 is 6 [1.43]: correct (label is 6, error rate = 11991/60000 = 19.985%) 
 104: 
Email: steve@oharasteve.com