kinect/ processing / simple openni - point cloud data not being output properly

空扰寡人 提交于 2019-12-30 05:29:13

问题


I've created a processing sketch which saves each frame of point cloud data from the kinect to a text file, where each line of the file is a point (or vertex) that the kinect has registered. I plan to pull the data into a 3d program to visualize the animation in 3d space and apply various effects. The problem is, when I do this, the first frame seems proper, and the rest of the frames seem to be spitting out what looks like the first image, plus a bunch of random noise. This is my code, in its entirety. It requires simple openni to work properly. You can see the comments

import SimpleOpenNI.*;
//import processing.opengl.*;

SimpleOpenNI context;
float        zoomF =0.5f;
float        rotX = radians(180);  // by default rotate the hole scene 180deg around the x-axis, 
float        rotY = radians(0); // the data from openni comes upside down

int maxZ = 2000;
Vector <Object> recording = new Vector<Object>(); 
boolean isRecording = false;
boolean canDraw = true;
boolean mouseMode = false;
int currentFile = 0;
int depthWidth = 640; //MH - assuming this is static?
int depthHeight = 480;
int steps = 5;
int arrayLength = (depthWidth/steps) * (depthHeight/steps); //total lines in each output file


void setup()
{
  size(1024,768,P3D);  // strange, get drawing error in the cameraFrustum if i use P3D, in opengl there is no problem
  //size(1024,768,OPENGL); 

  context = new SimpleOpenNI(this);
  context.setMirror(true);
  depthWidth = context.depthWidth();
  depthHeight = context.depthHeight();

  // enable depthMap generation 
  if(context.enableDepth() == false)
  {
     println("Can't open the depthMap, maybe the camera is not connected!"); 
     exit();
     return;
  }

  stroke(255,255,255);
  smooth();

  perspective(radians(45),
  float(width)/float(height),
  10.0f,150000.0f);
 }

void draw()
{

  //println(isRecording);

  // update the cam
  context.update();

  background(0,0,0);

  // set the scene pos
  translate(width/2, height/2, 0);
  rotateX(rotX);
  rotateY(rotY);
  scale(zoomF);

  // draw the 3d point depth map
  int[]   depthMap = context.depthMap();
  int     index = 0;
  PVector realWorldPoint;
  PVector[] frame = new PVector[arrayLength];

  translate(0,0,-1000);  // set the rotation center of the scene 1000 infront of the camera
  stroke(200); 
  for(int y=0;y < context.depthHeight();y+=steps)
  {
    for(int x=0;x < context.depthWidth();x+=steps)
    {
      int offset = x + y * context.depthWidth();
      realWorldPoint = context.depthMapRealWorld()[offset];
      if (isRecording == true){
        if (realWorldPoint.z < maxZ){
          frame[index] = realWorldPoint;
        } else {
          frame[index] = new PVector(-0.0,-0.0,0.0); 
        }
        index++;
      } else {
        if (realWorldPoint.z < maxZ){
          if (canDraw == true){
            point(realWorldPoint.x,realWorldPoint.y,realWorldPoint.z);
          }
        }
      }
    } 
  }

  if (isRecording == true){
   recording.add(frame); 
  }

 if (mouseMode == true){
   float rotVal = map (mouseX,0,1024,-1,1); //comment these out to disable mouse orientation
   float rotValX = map (mouseY,0,768,2,4);
   rotY = rotVal;
   rotX = rotValX;
 } 

}

// -----------------------------------------------------------------
// Keyboard event
void keyPressed()
{
  switch(key)
  {
    case ' ':
      context.setMirror(!context.mirror());
      break;
    case 'm':
      mouseMode = !mouseMode;
      break;
    case 'r':
      isRecording = !isRecording;
      break;
    case 's':
      if (isRecording == true){
        isRecording = false;
        canDraw = false;
        println("Stopped Recording");
        Enumeration e = recording.elements();
        int i = 0;
        while (e.hasMoreElements()) {

          // Create one directory
          boolean success = (new File("out"+currentFile)).mkdir(); 
          PrintWriter output = createWriter("out"+currentFile+"/frame" + i++ +".txt");
          PVector [] frame = (PVector []) e.nextElement();

          for (int j = 0; j < frame.length; j++) {
           output.println(j + ", " + frame[j].x + ", " + frame[j].y + ", " + frame[j].z );
          }
          output.flush(); // Write the remaining data
          output.close();
          //exit();
        }
        canDraw = true;
        println("done recording");
      }
      currentFile++;
      break;
  }

  switch(keyCode)
  {
    case LEFT:
      if(keyEvent.isShiftDown())
        maxZ -= 100;
      else
        rotY += 0.1f;
      break;
    case RIGHT:
      if(keyEvent.isShiftDown())
        maxZ += 100;
      else
        rotY -= 0.1f;
      break;
    case UP:
      if(keyEvent.isShiftDown())
        zoomF += 0.01f;
      else
        rotX += 0.1f;
      break;
    case DOWN:
      if(keyEvent.isShiftDown())
      {
        zoomF -= 0.01f;
        if(zoomF < 0.01)
          zoomF = 0.01;
      }
      else
        rotX -= 0.1f;
      break;
  }
}

I imagine the loop is where the problems begin occurring: for(int y=0;y < context.depthHeight();y+=steps) { , etc. although it could just be a problem with the python script I wrote for the 3d program. Anyway, this is a cool sketch, and I think would be super useful for anyone wanting to do 3d effects to point cloud data (or build models, etc), but I'm stuck at the moment. Thanks for your help!


回答1:


Unfortunately I can't explain a lot right now, but I've sone something similar a few months back saving to PLY and CSV:

import processing.opengl.*;
import SimpleOpenNI.*;


SimpleOpenNI context;
float        zoomF =0.5f;
float        rotX = radians(180);  
float        rotY = radians(0);

boolean recording = false;
ArrayList<PVector> pts = new ArrayList<PVector>();//points for one frame

float minZ = 100,maxZ = 150;

void setup()
{
  size(1024,768,OPENGL);  

  context = new SimpleOpenNI(this);
  context.setMirror(false);
  context.enableDepth();
  context.enableScene();

  stroke(255);
  smooth();  
  perspective(95,float(width)/float(height), 10,150000);
 }

void draw()
{
  context.update();
  background(0);

  translate(width/2, height/2, 0);
  rotateX(rotX);
  rotateY(rotY);
  scale(zoomF);

  int[]   depthMap = context.depthMap();
  int[]   sceneMap = context.sceneMap();
  int     steps   = 10;  
  int     index;
  PVector realWorldPoint;
  pts.clear();//reset points
  translate(0,0,-1000);  
  //*
  //stroke(100); 
  for(int y=0;y < context.depthHeight();y+=steps)
  {
    for(int x=0;x < context.depthWidth();x+=steps)
    {
      index = x + y * context.depthWidth();
      if(depthMap[index] > 0)
      { 
        realWorldPoint = context.depthMapRealWorld()[index];
        if(realWorldPoint.z > minZ && realWorldPoint.z < maxZ){//if within range
          stroke(0,255,0);
          point(realWorldPoint.x,realWorldPoint.y,realWorldPoint.z);
          pts.add(realWorldPoint.get());//store each point
        }
      }
    } 
  } 
  if(recording){
      savePLY(pts);//save to disk as PLY
      saveCSV(pts);//save to disk as CSV
  }
  //*/
}

// -----------------------------------------------------------------
// Keyboard events

void keyPressed()
{
  if(key == 'q') minZ += 10;
  if(key == 'w') minZ -= 10;
  if(key == 'a') maxZ += 10;
  if(key == 's') maxZ -= 10;

  switch(key)
  {
    case ' ':
      context.setMirror(!context.mirror());
    break;
    case 'r':
      recording = !recording;
    break;
  }

  switch(keyCode)
  {
    case LEFT:
      rotY += 0.1f;
      break;
    case RIGHT:
      // zoom out
      rotY -= 0.1f;
      break;
    case UP:
      if(keyEvent.isShiftDown())
        zoomF += 0.01f;
      else
        rotX += 0.1f;
      break;
    case DOWN:
      if(keyEvent.isShiftDown())
      {
        zoomF -= 0.01f;
        if(zoomF < 0.01)
          zoomF = 0.01;
      }
      else
        rotX -= 0.1f;
      break;
  }
}
void savePLY(ArrayList<PVector> pts){
  String ply = "ply\n";
  ply += "format ascii 1.0\n";
  ply += "element vertex " + pts.size() + "\n";
  ply += "property float x\n";
  ply += "property float y\n";
  ply += "property float z\n";
  ply += "end_header\n";
  for(PVector p : pts)ply += p.x + " " + p.y + " " + p.z + "\n";
  saveStrings("frame_"+frameCount+".ply",ply.split("\n"));
}
void saveCSV(ArrayList<PVector> pts){
  String csv = "x,y,z\n";
  for(PVector p : pts) csv += p.x + "," + p.y + "," + p.z + "\n";
  saveStrings("frame_"+frameCount+".csv",csv.split("\n"));
}

I'm using an if statement to save only the points within a certain Z threshold, but feel free to alter/use as you see fit. The post processing idea reminds of the Moullinex video for Catalina. Check it out, it's well documented and includes source code as well.

Update The posted code saves 1 file per frame. Even though the playback speed would be low, the sketch should still save a file for each frame. The code be simplified a bit:

import processing.opengl.*;
import SimpleOpenNI.*;


SimpleOpenNI context;
float        zoomF =0.5f;
float        rotX = radians(180);  
float        rotY = radians(0);

boolean recording = false;
String csv;

void setup()
{
  size(1024,768,OPENGL);  

  context = new SimpleOpenNI(this);
  context.setMirror(false);
  context.enableDepth();

  stroke(255);
  smooth();  
  perspective(95,float(width)/float(height), 10,150000);
 }

void draw()
{
  csv = "x,y,z\n";//reset csv for this frame
  context.update();
  background(0);

  translate(width/2, height/2, 0);
  rotateX(rotX);
  rotateY(rotY);
  scale(zoomF);

  int[]   depthMap = context.depthMap();
  int[]   sceneMap = context.sceneMap();
  int     steps   = 10;  
  int     index;
  PVector realWorldPoint;
  translate(0,0,-1000);  
  //*
  beginShape(POINTS);
  for(int y=0;y < context.depthHeight();y+=steps)
  {
    for(int x=0;x < context.depthWidth();x+=steps)
    {
      index = x + y * context.depthWidth();
      if(depthMap[index] > 0)
      { 
        realWorldPoint = context.depthMapRealWorld()[index];
        vertex(realWorldPoint.x,realWorldPoint.y,realWorldPoint.z);
        if(recording) csv += realWorldPoint.x + "," + realWorldPoint.y + "," + realWorldPoint.z + "\n";
      }
    } 
  }
  endShape(); 
  if(recording) saveStrings("frame_"+frameCount+".csv",csv.split("\n"));
  frame.setTitle((int)frameRate + " fps");
  //*/
}

// -----------------------------------------------------------------
// Keyboard events

void keyPressed()
{

  switch(key)
  {
    case ' ':
      context.setMirror(!context.mirror());
    break;
    case 'r':
      recording = !recording;
    break;
  }

  switch(keyCode)
  {
    case LEFT:
      rotY += 0.1f;
      break;
    case RIGHT:
      // zoom out
      rotY -= 0.1f;
      break;
    case UP:
      if(keyEvent.isShiftDown())
        zoomF += 0.01f;
      else
        rotX += 0.1f;
      break;
    case DOWN:
      if(keyEvent.isShiftDown())
      {
        zoomF -= 0.01f;
        if(zoomF < 0.01)
          zoomF = 0.01;
      }
      else
        rotX -= 0.1f;
      break;
  }
}

The preview can be separated from the recording with different loops and you could have a low res preview, but save more data, still, it would be slow.

I've got another suggestion: Record to the .oni format instead. If you've installed OpenNI, you could make use of a couple of samples like NiViewer and NiBackRecorder. SimpleOpenNI also exposes this functionality, have a look at the RecorderPlay sample.

I suggest trying something like this:

  1. Record your scene to an .oni file. It should be fast/responsive
  2. When you're happy with you're .oni recording, process each frame (convert depth to x,y,z points/ filter as needed/ save to the desired format/etc.)

Here's another sketch to illustrate the idea:

import SimpleOpenNI.*;

SimpleOpenNI  context;
boolean       recordFlag = true;

int frames = 0;

void setup(){
  context = new SimpleOpenNI(this);

  if(! recordFlag){
    if(! context.openFileRecording("test.oni") ){
      println("can't find recording !!!!");
      exit();
    }
    context.enableDepth();
  }else{  
    // recording
    context.enableDepth();
    // setup the recording 
    context.enableRecorder(SimpleOpenNI.RECORD_MEDIUM_FILE,"test.oni");
    // select the recording channels
    context.addNodeToRecording(SimpleOpenNI.NODE_DEPTH,SimpleOpenNI.CODEC_16Z_EMB_TABLES);
  }
  // set window size 
  if((context.nodes() & SimpleOpenNI.NODE_DEPTH) != 0)
    size(context.depthWidth() , context.depthHeight());
  else 
    exit();
}
void draw()
{
  background(0);
  context.update();
  if((context.nodes() & SimpleOpenNI.NODE_DEPTH) != 0) image(context.depthImage(),0,0);
  if(recordFlag) frames++;
}
void keyPressed(){
  if(key == ' '){
    if(recordFlag){
      saveStrings(dataPath("frames.txt"),split(frames+" ",' '));
      exit();
    }else saveONIToPLY();
  }
}
void saveONIToPLY(){
  frames = int(loadStrings(dataPath("frames.txt"))[0]);
  println("recording " + frames + " frames");
  int w = context.depthWidth();
  int h = context.depthHeight();
  noLoop();
  for(int i = 0 ; i < frames; i++){
    PrintWriter output = createWriter(dataPath("frame_"+i+".ply"));
    output.println("ply");
    output.println("format ascii 1.0");
    output.println("element vertex " + (w*h));
    output.println("property float x");
    output.println("property float y");
    output.println("property float z");
    output.println("end_header\n");
    context.update();
    int[]   depthMap = context.depthMap();
    int     index;
    PVector realWorldPoint;
    for(int y=0;y < h;y++){
      for(int x=0;x < w;x++){
        index = x + y * w;
        realWorldPoint = context.depthMapRealWorld()[index];
        output.println(realWorldPoint.x + " " + realWorldPoint.y + " " + realWorldPoint.z);
      }
    }
    output.flush();
    output.close();
    println("saved " + (i+1) + " of " + frames);
  }
  loop();
  println("recorded " + frames + " frames");
}

When the recordFlag is set to true, data will be saved to an .oni file. I haven't found anything in the docs to read how many frames there are in an .oni file so as a quick workaround I've added the frame counter. If you hit space, the recording will stop, but will also save the number of frames in a txt file then exit the app. This will be useful later.

When the recordFlag is set to false, if there is a recording already, it will playback. If you hit space in this 'mode', drawing will stop, the frame number will be load from the .txt file and for each frame:

  1. The context will be updated (moving to the next frame)
  2. Each pixel in the depth map will be converted to a point
  3. ALL the points will be written to a .ply file (you can process with meshlab)

After all frames were saved, the sketch will resume drawing. Since there's no 3D drawing and the sketch is fairly simple, performance should be better, but bare in mind that large .oni file will require a lot of RAM. Feel free to modify the sketch to your needs (e.g. filter out the information you don't want saved, etc.).

Also note that the above, although should save to PLY each separate frame, it saves the same. It seems the context doesn't update() when noLoop() has been called. Here's a modified hacky version that uses a 3s. delay (hopefully the .ply fille will be written to disk by then).

import SimpleOpenNI.*;

SimpleOpenNI  context;
boolean       recordFlag = false;
boolean       saving = false;
int frames = 0;
int savedFrames = 0;

void setup(){
  context = new SimpleOpenNI(this);

  if(! recordFlag){
    if(! context.openFileRecording("test.oni") ){
      println("can't find recording !!!!");
      exit();
    }
    context.enableDepth();
  }else{  
    // recording
    context.enableDepth();
    // setup the recording 
    context.enableRecorder(SimpleOpenNI.RECORD_MEDIUM_FILE,"test.oni");
    // select the recording channels
    context.addNodeToRecording(SimpleOpenNI.NODE_DEPTH,SimpleOpenNI.CODEC_16Z_EMB_TABLES);
  }
  // set window size 
  if((context.nodes() & SimpleOpenNI.NODE_DEPTH) != 0)
    size(context.depthWidth() , context.depthHeight());
  else 
    exit();
}
void draw()
{
  background(0);
  context.update();
  if((context.nodes() & SimpleOpenNI.NODE_DEPTH) != 0) image(context.depthImage(),0,0);
  if(recordFlag) frames++;
  if(saving && savedFrames < frames){
      delay(3000);//hack
      int i = savedFrames;
      int w = context.depthWidth();
      int h = context.depthHeight();
      PrintWriter output = createWriter(dataPath("frame_"+i+".ply"));
      output.println("ply");
      output.println("format ascii 1.0");
      output.println("element vertex " + (w*h));
      output.println("property float x");
      output.println("property float y");
      output.println("property float z");
      output.println("end_header\n");
      rect(random(width),random(height),100,100);
      int[]   depthMap = context.depthMap();
      int     index;
      PVector realWorldPoint;
      for(int y=0;y < h;y++){
        for(int x=0;x < w;x++){
          index = x + y * w;
          realWorldPoint = context.depthMapRealWorld()[index];
          output.println(realWorldPoint.x + " " + realWorldPoint.y + " " + realWorldPoint.z);
        }
      }
      output.flush();
      output.close();
      println("saved " + (i+1) + " of " + frames);
      savedFrames++;
  }
}
void keyPressed(){
  if(key == ' '){
    if(recordFlag){
      saveStrings(dataPath("frames.txt"),split(frames+" ",' '));
      exit();
    }else saveONIToPLY();
  }
}
void saveONIToPLY(){
  frames = int(loadStrings(dataPath("frames.txt"))[0]);
  saving = true;
  println("recording " + frames + " frames");
}

I'm not sure frames and files sync and the depth data is saved at medium quality, but I hope my answer provides some ideas.



来源:https://stackoverflow.com/questions/11570247/kinect-processing-simple-openni-point-cloud-data-not-being-output-properly

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!