Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extend wide angle cameras to support L8 and L16 image formats #1097

Merged
merged 2 commits into from
Jan 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions ogre/src/OgreWideAngleCamera.cc
Original file line number Diff line number Diff line change
Expand Up @@ -798,11 +798,11 @@ void OgreWideAngleCamera::PostRender()
PixelFormat format = this->ImageFormat();
unsigned int channelCount = PixelUtil::ChannelCount(format);
unsigned int bytesPerChannel = PixelUtil::BytesPerChannel(format);

unsigned int bufferSize = len * channelCount * bytesPerChannel;
if (!this->dataPtr->wideAngleImage)
this->dataPtr->wideAngleImage = new unsigned char[len * channelCount];
this->dataPtr->wideAngleImage = new unsigned char[bufferSize];
if (!this->dataPtr->imageBuffer)
this->dataPtr->imageBuffer = new unsigned char[len * channelCount];
this->dataPtr->imageBuffer = new unsigned char[bufferSize];

// get image data
Ogre::RenderTarget *rt =
Expand All @@ -813,7 +813,7 @@ void OgreWideAngleCamera::PostRender()

// fill image data
memcpy(this->dataPtr->wideAngleImage, this->dataPtr->imageBuffer,
height*width*channelCount*bytesPerChannel);
bufferSize);

this->dataPtr->newImageFrame(
this->dataPtr->wideAngleImage, width, height, channelCount,
Expand Down
84 changes: 59 additions & 25 deletions ogre2/src/Ogre2WideAngleCamera.cc
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,10 @@ class gz::rendering::Ogre2WideAngleCamera::Implementation
/// changed
public: bool backgroundMaterialDirty = false;

/// \brief Destination image data - used if image format is not rgb
/// and needs to be converted to another format.
public: std::unique_ptr<unsigned char []> dstImgData;

explicit Implementation(gz::rendering::Ogre2WideAngleCamera &_owner) :
workspaceListener(_owner)
{
Expand Down Expand Up @@ -1261,40 +1265,70 @@ void Ogre2WideAngleCamera::PostRender()
if (this->dataPtr->newImageFrame.ConnectionCount() <= 0u)
return;

PixelFormat format = this->ImageFormat();
const unsigned int width = this->ImageWidth();
const unsigned int height = this->ImageHeight();
unsigned int channelCount = PixelUtil::ChannelCount(format);
unsigned int bytesPerChannel = PixelUtil::BytesPerChannel(format);

// blit data from gpu to cpu
Ogre::Image2 image;
image.convertFromTexture(this->dataPtr->ogreStitchTexture[kStichFinalTexture],
0u, 0u);
Ogre::TextureBox box = image.getData(0u);

// Convert in-place from RGBA32 to RGB24 reusing the same memory region.
// The data contained will no longer be meaningful to Image2, but that
// class will no longer manipulate that data. We also store it contiguously
// (which is what gazebo expects), instead of aligning rows to 4 bytes like
// Ogre does. This saves RAM and lots of bandwidth.
uint8_t *RESTRICT_ALIAS rgb24 =
reinterpret_cast<uint8_t * RESTRICT_ALIAS>(box.data);
for (size_t y = 0; y < box.height; ++y)
Ogre::TextureGpu *texture =
this->dataPtr->ogreStitchTexture[kStichFinalTexture];
void *rawData = nullptr;
Ogre::Image2 ogreImage;
if (format == PF_R8G8B8)
{
uint8_t *RESTRICT_ALIAS rgba32 =
reinterpret_cast<uint8_t * RESTRICT_ALIAS>(box.at(0u, y, 0u));
for (size_t x = 0; x < box.width; ++x)
ogreImage.convertFromTexture(texture, 0u, 0u);
Ogre::TextureBox box = ogreImage.getData(0u);

// Convert in-place from RGBA32 to RGB24 reusing the same memory region.
// The data contained will no longer be meaningful to Image2, but that
// class will no longer manipulate that data. We also store it contiguously
// (which is what gazebo expects), instead of aligning rows to 4 bytes like
// Ogre does. This saves RAM and lots of bandwidth.
uint8_t *RESTRICT_ALIAS rgb24 =
reinterpret_cast<uint8_t * RESTRICT_ALIAS>(box.data);
for (size_t y = 0; y < box.height; ++y)
{
*rgb24++ = *rgba32++;
*rgb24++ = *rgba32++;
*rgb24++ = *rgba32++;
++rgba32;
uint8_t *RESTRICT_ALIAS rgba32 =
reinterpret_cast<uint8_t * RESTRICT_ALIAS>(box.at(0u, y, 0u));
for (size_t x = 0; x < box.width; ++x)
{
*rgb24++ = *rgba32++;
*rgb24++ = *rgba32++;
*rgb24++ = *rgba32++;
++rgba32;
}
}
rawData = box.data;
}

PixelFormat format = this->ImageFormat();
unsigned int channelCount = PixelUtil::ChannelCount(format);
this->dataPtr->newImageFrame(reinterpret_cast<uint8_t *>(box.data), width,
else
{
// convert to destination format
Ogre::PixelFormatGpu dstOgrePf = Ogre2Conversions::Convert(format);
Ogre::TextureBox dstBox(
texture->getInternalWidth(), texture->getInternalHeight(),
texture->getDepth(), texture->getNumSlices(),
static_cast<uint32_t>(
Ogre::PixelFormatGpuUtils::getBytesPerPixel(dstOgrePf)),
static_cast<uint32_t>(Ogre::PixelFormatGpuUtils::getSizeBytes(
texture->getInternalWidth(), 1u, 1u, 1u, dstOgrePf, 1u)),
static_cast<uint32_t>(Ogre::PixelFormatGpuUtils::getSizeBytes(
texture->getInternalWidth(), texture->getInternalHeight(), 1u, 1u,
dstOgrePf, 1u)));
if (!this->dataPtr->dstImgData)
{
this->dataPtr->dstImgData = std::make_unique<unsigned char []>(
width * height * channelCount * bytesPerChannel);
}
dstBox.data = this->dataPtr->dstImgData.get();
Ogre::Image2::copyContentsToMemory(texture, texture->getEmptyBox(0u),
dstBox, dstOgrePf);
rawData = dstBox.data;
}
this->dataPtr->newImageFrame(reinterpret_cast<uint8_t *>(rawData), width,
height, channelCount,
PixelUtil::Name(this->ImageFormat()));
PixelUtil::Name(format));

// Uncomment to debug wide angle cameraoutput
// gzdbg << "wxh: " << width << " x " << height << std::endl;
Expand Down
146 changes: 146 additions & 0 deletions test/integration/wide_angle_camera.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ std::mutex g_mutex;

/// \brief WideAngle buffer
unsigned char *g_buffer = nullptr;
unsigned char *g_bufferL8 = nullptr;
unsigned char *g_bufferL16 = nullptr;

/// \brief counter of received wideAngle msgs
int g_counter = 0;
Expand All @@ -68,6 +70,41 @@ void OnNewWideAngleFrame(const unsigned char *_data,
g_mutex.unlock();
}

//////////////////////////////////////////////////
/// \brief callback to get the wide angle camera image data
void OnNewWideAngleFrameMono(const unsigned char *_data,
unsigned int _width, unsigned int _height,
unsigned int _channels,
const std::string &_format)
{
g_mutex.lock();

unsigned int bytesPerChannel = 0u;
unsigned int bufferSize = 0u;
if (_format == "L8")
{
bytesPerChannel = 1u;
bufferSize = _width * _height * _channels * bytesPerChannel;
if (!g_bufferL8)
g_bufferL8 = new unsigned char[bufferSize];
memcpy(g_bufferL8, _data, bufferSize);
}
else if (_format == "L16")
{
bytesPerChannel = 2u;
bufferSize = _width * _height * _channels * bytesPerChannel;
if (!g_bufferL16)
g_bufferL16 = new unsigned char[bufferSize];
memcpy(g_bufferL16, _data, bufferSize);
}

ASSERT_NE(0u, bytesPerChannel);
ASSERT_NE(0u, bufferSize);

g_counter++;
g_mutex.unlock();
}

//////////////////////////////////////////////////
TEST_F(WideAngleCameraTest, GZ_UTILS_TEST_DISABLED_ON_WIN32(WideAngleCamera))
{
Expand Down Expand Up @@ -339,6 +376,8 @@ TEST_F(WideAngleCameraTest, GZ_UTILS_TEST_DISABLED_ON_WIN32(WideAngleCamera))
EXPECT_EQ(bSumQ[1][0], bSumQ[0][1]);

// Clean up
delete [] g_buffer;
g_buffer = nullptr;
engine->DestroyScene(scene);
}

Expand Down Expand Up @@ -460,3 +499,110 @@ TEST_F(WideAngleCameraTest, GZ_UTILS_TEST_DISABLED_ON_WIN32(Projection))

ASSERT_EQ(1u, camera.use_count());
}

//////////////////////////////////////////////////
TEST_F(WideAngleCameraTest,
GZ_UTILS_TEST_DISABLED_ON_WIN32(WideAngleCameraMono))
{
CHECK_UNSUPPORTED_ENGINE("optix");

gz::rendering::ScenePtr scene = engine->CreateScene("scene");
ASSERT_NE(nullptr, scene);
scene->SetAmbientLight(1.0, 1.0, 1.0);
scene->SetBackgroundColor(0.2, 0.2, 0.2);

rendering::VisualPtr root = scene->RootVisual();

unsigned int width = 20u;
unsigned int height = 20u;

// Create Wide Angle camera
auto cameraL8 = scene->CreateWideAngleCamera("WideAngleCameraL8");
ASSERT_NE(cameraL8, nullptr);
cameraL8->SetImageFormat(PF_L8);

auto cameraL16 = scene->CreateWideAngleCamera("WideAngleCameraL16");
ASSERT_NE(cameraL16, nullptr);
cameraL16->SetImageFormat(PF_L16);

CameraLens lens;
lens.SetCustomMappingFunction(1.05, 4.0, AFT_TAN, 1.0, 0.0);
lens.SetType(MFT_CUSTOM);
lens.SetCutOffAngle(GZ_PI);

cameraL8->SetLens(lens);
cameraL8->SetHFOV(2.6);
cameraL8->SetImageWidth(width);
cameraL8->SetImageHeight(height);
scene->RootVisual()->AddChild(cameraL8);

cameraL16->SetLens(lens);
cameraL16->SetHFOV(2.6);
cameraL16->SetImageWidth(width);
cameraL16->SetImageHeight(height);
scene->RootVisual()->AddChild(cameraL16);

// create blue material
MaterialPtr blue = scene->CreateMaterial();
blue->SetAmbient(0.0, 0.0, 0.3);
blue->SetDiffuse(0.0, 0.0, 0.8);
blue->SetSpecular(0.5, 0.5, 0.5);

// create box visual in front of cameras
VisualPtr box = scene->CreateVisual();
box->AddGeometry(scene->CreateBox());
box->SetOrigin(0.0, 0.0, 0.0);
box->SetLocalPosition(2, 0, 0);
box->SetLocalScale(1, 1, 1);
box->SetMaterial(blue);
root->AddChild(box);

// Set a callback on the camera sensor to get a wide angle camera frame
gz::common::ConnectionPtr connection =
cameraL8->ConnectNewWideAngleFrame(
std::bind(OnNewWideAngleFrameMono,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
std::placeholders::_4, std::placeholders::_5));
ASSERT_NE(nullptr, connection);
gz::common::ConnectionPtr connection2 =
cameraL16->ConnectNewWideAngleFrame(
std::bind(OnNewWideAngleFrameMono,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
std::placeholders::_4, std::placeholders::_5));
ASSERT_NE(nullptr, connection2);

g_counter = 0;

// Update and verify
cameraL8->Update();
EXPECT_EQ(1, g_counter);

cameraL16->Update();
EXPECT_EQ(2, g_counter);

// Verify image format
EXPECT_EQ(PF_L8, cameraL8->ImageFormat());
EXPECT_EQ(PF_L16, cameraL16->ImageFormat());

// verify cameras can see the box in the middle and the pixel color value
// should be darker than the background (pixel at top center of image)
const unsigned int step = width;
unsigned int topMid = static_cast<unsigned int>(
step / 2.0);
unsigned int mid = static_cast<unsigned int>(
height / 2.0 * step + step / 2.0);
unsigned int topMidValue8 = g_bufferL8[topMid];
uint16_t topMidValue16 = reinterpret_cast<uint16_t *>(g_bufferL16)[topMid];
unsigned int midValue8 = g_bufferL8[mid];
uint16_t midValue16 = reinterpret_cast<uint16_t *>(g_bufferL16)[mid];

EXPECT_GT(topMidValue8, midValue8);
EXPECT_GT(topMidValue16, midValue16);

// Clean up
delete [] g_bufferL8;
g_bufferL8 = nullptr;
delete [] g_bufferL16;
g_bufferL16 = nullptr;
engine->DestroyScene(scene);
}
Loading