PDA

Visualizza la versione completa : [C++] Inserire in un array i risultati di una funzione


iljakie
21-07-2017, 11:45
Salve ragazzi,

sto lavorando con una fotocamera 3d il cui codice mi permette di acquisire e salvare in successione un numero fissato di immagini. Siccome l'operazione di salvataggio richiede diversi secondi vorrei fare in modo che le immagini vengano dapprima tutte acquisite e poi salvate dopo le acquisizioni.

Nel codice seguente le informazioni sulle immagini da salvare si trovano all'interno del ciclo // Enter the grab loop ed in particolare all'interno della variabile grabResult che corrisponde all'immagine numero j. Cosi come scritto il codice ad ogni ciclo l'immagine viene passata alla funzione SavePointCloud che si occupa di salvare l'immagine dopo l'acquisizione.

Quello che vorrei fare costruire una funzione che ad ogni ciclo mi sistemi il valore grabResult all'interno di un vettore di n elementi alla posizione j. In questo modo modo modificando opportunamente la funzione SavePointCloud potrei salvare le immagini dopo averle tutte acquisite.

Qualcuno ha un idea su come fare?


int Sample::run(){ int j;
const size_t nBuffers = 1; // Number of buffers to be used for grabbing.
const size_t nImagesToGrab = 4; // Number of images to grab.
size_t nImagesGrabbed = j = 0;


try
{
//
// Open and parameterize the camera.
//


setupCamera();


//
// Grab and process images
//


// Let the camera class use our allocator.
// When the application doesn't provide an allocator, a default one that allocates memory buffers
// on the heap will be used automatically.
m_Camera.SetBufferAllocator( new CustomAllocator(), true); // m_Camera takes ownership and will clean-up allocator.


// Allocate the memory buffers and prepare image acquisition.
m_Camera.PrepareAcquisition( nBuffers );


// Enqueue all buffers to be filled with image data.
for ( size_t i = 0; i < nBuffers; ++i )
{
m_Camera.QueueBuffer( i );
}


// Start the acquisition engine.
m_Camera.StartAcquisition();


// Now, the acquisition can be started on the camera.
m_Camera.IssueAcquisitionStartCommand(); // The camera continuously sends data now.


// Enter the grab loop
do
{
GrabResult grabResult;
// Wait up to 1000 ms for the next grabbed buffer available in the
// acquisition engine's output queue.
m_Camera.GetGrabResult( grabResult, 1000 );


// Check whether a buffer has been grabbed successfully.
if ( grabResult.status == GrabResult::Timeout )
{
cerr << "Timeout occurred." << endl;
// The timeout might be caused by a removal of the camera. Check if the camera
// is still connected.
if ( ! m_Camera.IsConnected() )
{
cerr << "Camera has been removed." << endl;
}
break; // exit loop
}
if ( grabResult.status != GrabResult::Ok )
{
cerr << "Failed to grab image." << endl;
break; // exit loop
}
nImagesGrabbed++; j++;
// We can process the buffer now. The buffer will not be overwritten with new data until
// it is explicitly placed in the acquisition engine's input queue again.
SavePointCloud( grabResult, j );

// We finished processing the data, put the buffer back into the acquisition
// engine's input queue to be filled with new image data.
m_Camera.QueueBuffer( grabResult.hBuffer );


} while ( nImagesGrabbed < nImagesToGrab );

// Stop the camera
m_Camera.IssueAcquisitionStopCommand();


// Stop the acquisition engine and release memory buffers and other resources used for grabbing.
m_Camera.FinishAcquisition();


// Close the connection to the camera
m_Camera.Close();
}
catch ( const GenICam::GenericException& e )
{
cerr << "Exception occurred: " << e.GetDescription() << endl;
// After successfully opening the camera, the IsConnected method can be used
// to check if the device is still connected.
if ( m_Camera.IsOpen() && ! m_Camera.IsConnected() )
{
cerr << "Camera has been removed." << endl;
}
return EXIT_FAILURE;
}


return nImagesGrabbed == nImagesToGrab ? EXIT_SUCCESS : EXIT_FAILURE;
}


void Sample::SavePointCloud( const GrabResult& grabResult, int j){
std::ostringstream oss;
oss << "points" << j << ".pcd";
const char* fileName = oss.str().c_str();

BufferParts parts;
m_Camera.GetBufferParts( grabResult, parts);



if ( parts.empty() )
{
cerr << "No valid image data." << endl;

}


// If the point cloud is enabled, the first part always contains the point cloud data.
if ( parts[0].dataFormat != PFNC_Coord3D_ABC32f )
{
cerr << "Unexpected data format for the first image part. Coord3D_ABC32f is expected." << endl;

}


const bool saveIntensity = parts.size() > 1;
if ( saveIntensity && parts[1].dataFormat != PFNC_Mono16 )
{
cerr << "Unexpected data format for the second image part. Mono 16 is expected." << endl;

}


ofstream o( oss.str().c_str() );
if ( ! o )
{
cerr << "Error:\tFailed to create file "<< oss.str().c_str() << endl;

}


cout << "Writing point cloud to file " << oss.str().c_str() << "...";
CToFCamera::Coord3D *pPoint = (CToFCamera::Coord3D*) parts[0].pData;
uint16_t *pIntensity = saveIntensity ? (uint16_t*) parts[1].pData : NULL;
const size_t nPixel = parts[0].width * parts[0].height;


WritePcdHeader( o, parts[0].width, parts[0].height, saveIntensity );



for ( size_t i = 0; i < nPixel; ++i )
{
// Check if there are valid 3D coordinates for that pixel.
if ( pPoint->IsValid() )
{
o.precision( 0 ); // Coordinates will be written as whole numbers.

// Write the coordinates of the next point. Note: Since the coordinate system
// used by the CloudCompare tool is different from the one used by the ToF camera,
// we apply a 180-degree rotation around the x-axis by writing the negative
// values of the y and z coordinates.
o << std::fixed << pPoint->x << ' ' << -pPoint->y << ' ' << -pPoint->z;

if ( saveIntensity )
{
// Save the intensity as an RGB value.
uint8_t gray = *pIntensity >> 8;
uint32_t rgb = (uint32_t) gray << 16 | (uint32_t) gray << 8 | (uint32_t) gray;
// The point cloud library data format represents RGB values as floats.
float fRgb = * (float*) &rgb;
o.unsetf(ios_base::floatfield); // Switch to default float formatting
o.precision(9); // Intensity information will be written with highest precision.
o << ' ' << fRgb << endl;
}
}
else
{
o << "nan nan nan 0" << endl;
}
pPoint++;
pIntensity++;
}
o.close();
cout << "done." << endl;



}


int main(int argc, char* argv[]){
int exitCode = EXIT_SUCCESS;

try
{
CToFCamera::InitProducer();


Sample processing;
exitCode = processing.run();
}
catch ( GenICam::GenericException& e )
{
cerr << "Exception occurred: " << endl << e.GetDescription() << endl;
exitCode = EXIT_FAILURE;
}


// Release the GenTL producer and all of its resources.
// Note: Don't call TerminateProducer() until the destructor of the CToFCamera
// class has been called. The destructor may require resources which may not
// be available anymore after TerminateProducer() has been called.
if ( CToFCamera::IsProducerInitialized() )
CToFCamera::TerminateProducer(); // Won't throw any exceptions


cout << endl << "Press Enter to exit." << endl;
while (cin.get() != '\n');


return exitCode;
}

Loading