Streams of Consciousness

NOC Final Project from Colin Narver on Vimeo.

//Colin Narver //Streams of Consciousness //Nature of Code Final Project //Special thanks to Dan Shiffman and Lia Martinez //for all their help and wisdom along the way import jsyphon.*; import SimpleOpenNI.*; SimpleOpenNI context; boolean autoCalib=true; import codeanticode.syphon.*; PGraphics canvas; SyphonServer server; PImage sky; PImage grass; PImage ocean; PImage droplet; PImage finalImg; PImage kinect; int[] depthValues; // Using this variable to decide de whether to draw all the stuff boolean debug = true; // Flowfield object FlowField flowfield; // An ArrayList of vehicles ArrayList vehicles; PImage background; void setup() { //size(context.depthWidth(), context.depthHeight(),P3D); size(640, 480, P3D); context = new SimpleOpenNI(this); //background = loadImage("dog.jpg"); // Make a new flow field with "resolution" of 16 // enable depthMap generation if (context.enableDepth() == false) { println("Can't open the depthMap, maybe the camera is not connected!"); exit(); return; } canvas = createGraphics(640, 480, P3D); server = new SyphonServer(this, "Processing Syphon"); background(200, 0, 0); droplet = loadImage("blue_better_1.png"); grass = loadImage("grass.png"); ocean = loadImage("ocean.png"); sky = loadImage("sky.png"); finalImg = createImage(320, 240, RGB); vehicles = new ArrayList(); // Make a whole bunch of vehicles with random maxspeed and maxforce values for (int i = 0; i < 400; i++) { vehicles.add(new Vehicle(canvas, new PVector(random(100, width-100), random(100, height-100)), random(2, 5), random(0.1, 0.5))); } stroke(0, 0, 255); strokeWeight(3); smooth(); kinect = createImage(640, 480, RGB); flowfield = new FlowField(40); //adjust this to affect resolution grass.loadPixels(); ocean.loadPixels(); sky.loadPixels(); } void draw() { //restrict area that kinect can see to just sandbox kinect.copy(context.depthImage(), 100, 120, 300, 220, 0, 0, 640, 480); //kinect.copy(context.depthImage(), 140, 155, 375, 260, 0, 0, 640, 480); kinect.updatePixels(); println("MOUSE X: " + mouseX + " MOUSEY: " + mouseY); // Display the flowfield in "debug" mode //if (debug) flowfield.display(); //depthValues = context.depthMap(); //check every other pixel to save memory for (int px = 0; px < 640; px+=2) { for (int py = 0; py < 480; py+=2) { int depthIndex = px + py * 640; int imgIndex = (px/2) + (py/2) * 320; //for (int i = 0; i < depthValues.length; i++) { float b = brightness(kinect.pixels[depthIndex]); if (b > 0 && b < 135) { finalImg.pixels [imgIndex] = ocean.pixels [depthIndex]; } else if (b >135 && b < 255) { finalImg.pixels [imgIndex] = grass.pixels [depthIndex]; } // else if (b > 150 && b < 200) { // finalImg.pixels [imgIndex] = sky.pixels [depthIndex]; // } // else { // //make pink to test // finalImg.pixels [imgIndex] = color (200, 100, 100); // } } } finalImg.updatePixels(); //finalImg.filter(BLUR); // Tell all the vehicles to follow the flow field for (Vehicle v : vehicles) { v.follow(flowfield); v.run(); } // update the cam context.update(); //context.depthImage().filter(BLUR); flowfield.init(); // draw depthImageMap canvas.beginDraw(); canvas.background(0); //canvas.imageMode (CORNER); //canvas.image(kinect, 0, 0); canvas.image(finalImg, 0, 0,width,height); for (Vehicle v : vehicles) { v.display(); } //syphon canvas.endDraw(); server.sendImage(canvas); image (canvas, 0, 0); //vehicles.add(new Vehicle(canvas, new PVector(random(100, width-100), random(100, height-100)), random(2, 5), random(0.1, 0.5))); // if (vehicles.size() > 200) { // vehicles.remove(0); // } } //void mousePressed() { // // int clickPosition = mouseX + (mouseY * 640); // int clickedDepth = depthValues[clickPosition]; // println(clickedDepth); // // float inches = clickedDepth / 25.4; //} void keyPressed() { if (key == ' ') { debug = !debug; } } // Make a new flowfield //void mousePressed() { // // flowfield.init(); // // vehicles.add(new Vehicle(canvas, new PVector(mouseX, mouseY), random(2, 5), random(0.1, 0.5))); //} void mouseDragged() { // flowfield.init(); vehicles.add(new Vehicle(canvas, new PVector(mouseX, mouseY), random(2, 5), random(0.1, 0.5))); } // The Nature of Code // Daniel Shiffman // http://natureofcode.com // Flow Field Following //what is the vehicle's desired velocity?? //arrow below triangle in flow field indicates vehicle's desired velocity class Vehicle { PImage droplet = loadImage("blue_better_1.png"); // The usual stuff PVector location; PVector velocity; PVector acceleration; float r; float maxforce; // Maximum steering force float maxspeed; // Maximum speed PGraphics can; Vehicle(PGraphics canvas, PVector l, float ms, float mf) { location = l.get(); r = 3.0; maxspeed = ms; maxforce = mf; acceleration = new PVector(0,0); velocity = new PVector(0,0); can = canvas; } public void run() { update(); borders(); //display(); } // Implementing Reynolds' flow field following algorithm // http://www.red3d.com/cwr/steer/FlowFollow.html void follow(FlowField flow) { // What is the vector at that spot in the flow field? PVector desired = flow.lookup(location); // Scale it up by maxspeed desired.mult(maxspeed); // Steering is desired minus velocity PVector steer = PVector.sub(desired, velocity); steer.limit(maxforce); // Limit to maximum steering force applyForce(steer); } void applyForce(PVector force) { // We could add mass here if we want A = F / M acceleration.add(force); } // Method to update location void update() { // Update velocity velocity.add(acceleration); // Limit speed velocity.limit(maxspeed); location.add(velocity); // Reset accelertion to 0 each cycle acceleration.mult(0); } void display() { // Draw a triangle rotated in the direction of velocity float theta = velocity.heading2D() + radians(90); can.image(droplet, location.x, location.y, 40, 40); // fill (82,147,255,150); //// stroke(0); // pushMatrix(); // translate(location.x,location.y); // rotate(theta); // beginShape(TRIANGLES); // vertex(0, -r*3); // vertex(-r*3, r*3); // vertex(r*3, r*3); // endShape(); // popMatrix(); } // Wraparound void borders() { float buffer = 100; //constrain vehicles to area inside sandbox (avoiding noise) location.x = constrain(location.x,120, width-120); location.y = constrain(location.y, 120, height-120); // if (location.x < 100) location.x = width/2; // if (location.y < 100) location.y = height/2; // if (location.x > width-100) location.x = width/2; // if (location.y > height-100) location.y = height/2; } } // Flow Field Following class FlowField { // A flow field is a two dimensional array of PVectors PVector[][] field; int cols, rows; // Columns and Rows int resolution; // How large is each "cell" of the flow field FlowField(int r) { resolution = r; // Determine the number of columns and rows based on sketch's width and height cols = context.depthWidth()/resolution; rows = context.depthHeight()/resolution; println(cols + " " + rows); field = new PVector[cols][rows]; for (int x=0; x< cols; x++) { for (int y=0; y< rows; y++) { field[x][y] = new PVector (0, 0); } } init(); } //This is the section to get to understand best*** void init() { // lia martinez helped with this section kinect.loadPixels(); // we loop by columns and rows to preserve the resolution for (int x=0; x< cols; x++) { for (int y=0; y< rows; y++) { //make an array for just the surrounding pixels float[] areaPixels; areaPixels = new float[9]; //we don't want it to go below 0 or above the maximum or else it will crash if (x > 0 && x < cols -1 && y > 0 && y < rows-1) { // loop through the area pixels for (int i=-1; i<=1; i++) { for (int j=-1; j<=1; j++) { //the index for which pixel we are looking at int readPos = ((y*resolution + j) * kinect.width + (x*resolution + i)); //when we get our pixel, we get the color color c = kinect.pixels[readPos]; //from the color, we get just the brightness (gray) value float b = brightness(c); //writePos is another index for where in the array we write the brightness information int writePos = (j+1) * 3 + (i + 1); areaPixels[writePos] = b; } } //compare first the pixels on the left/right columns, then compare the upper/lower rows. the difference becomes our vector! float dX = (areaPixels[0] + areaPixels[3] + areaPixels[6])/3 - (areaPixels[2] + areaPixels[5] + areaPixels[8])/3; float dY = (areaPixels[0] + areaPixels[1] + areaPixels[2])/3 - (areaPixels[6] + areaPixels[7] + areaPixels[8])/3; //make a new vector based on the difference //field[x][y] = new PVector (dX, dY); field[x][y].x = lerp(field[x][y].x, dX, 0.01); field[x][y].y = lerp(field[x][y].y, dY, 0.01); //normalize, to just get the direction field[x][y].normalize(); } else { //this is to just make a do-nothing vector at the sides of the sketch, since we didn't want any pixel computing done there field[x][y] = new PVector(0, 0); } } } } // eventually you might need to look at a pixel and its neighbors //float theta = map(noise(xoff,yoff),0,1,0,TWO_PI); // Polar to cartesian coordinate transformation to get x and y components of the vector // Draw every vector void display() { for (int x=0; x< cols; x++) { for (int y=0; y< rows; y++) { drawVector(field[x][y], x*resolution, y*resolution, resolution-2); } } } // // // Renders a vector object 'v' as an arrow and a location 'x,y' void drawVector(PVector v, float x, float y, float scayl) { pushMatrix(); float arrowsize = 4; // // Translate to location to render vector translate(x, y); stroke(0, 100); // // Call vector heading function to get direction (note that pointing up is a heading of 0) and rotate rotate(v.heading2D()); // // Calculate length of vector & scale it to be bigger or smaller if necessary float len = v.mag()*scayl; // // Draw three lines to make an arrow (draw pointing up since we've rotate to the proper direction) line(0, 0, len, 0); line(len, 0, len-arrowsize, +arrowsize/2); line(len, 0, len-arrowsize, -arrowsize/2); popMatrix(); } PVector lookup(PVector lookup) { int column = int(constrain(lookup.x/resolution, 0, cols-1)); int row = int(constrain(lookup.y/resolution, 0, rows-1)); return field[column][row].get(); } }