[libcamera-devel] [PATCH 1/1] Add fake pipeline handler
Harvey Yang
chenghaoyang at chromium.org
Wed Oct 12 09:59:25 CEST 2022
---
meson_options.txt | 2 +-
src/android/camera_capabilities.cpp | 1 +
src/libcamera/pipeline/fake/fake.cpp | 518 ++++++++++++++++++++++++
src/libcamera/pipeline/fake/meson.build | 3 +
src/libcamera/pipeline_handler.cpp | 2 +-
test/camera/camera_reconfigure.cpp | 2 +-
6 files changed, 525 insertions(+), 3 deletions(-)
create mode 100644 src/libcamera/pipeline/fake/fake.cpp
create mode 100644 src/libcamera/pipeline/fake/meson.build
diff --git a/meson_options.txt b/meson_options.txt
index f1d67808..f08dfc5f 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -37,7 +37,7 @@ option('lc-compliance',
option('pipelines',
type : 'array',
- choices : ['ipu3', 'raspberrypi', 'rkisp1', 'simple', 'uvcvideo', 'vimc'],
+ choices : ['ipu3', 'raspberrypi', 'rkisp1', 'simple', 'uvcvideo', 'vimc', 'fake'],
description : 'Select which pipeline handlers to include')
option('qcam',
diff --git a/src/android/camera_capabilities.cpp b/src/android/camera_capabilities.cpp
index 64bd8dde..730ceafc 100644
--- a/src/android/camera_capabilities.cpp
+++ b/src/android/camera_capabilities.cpp
@@ -1077,6 +1077,7 @@ int CameraCapabilities::initializeStaticMetadata()
{
const Span<const Rectangle> &rects =
properties.get(properties::PixelArrayActiveAreas).value_or(Span<const Rectangle>{});
+ // TODO: initialize at least one Rectangle as default.
std::vector<int32_t> data{
static_cast<int32_t>(rects[0].x),
static_cast<int32_t>(rects[0].y),
diff --git a/src/libcamera/pipeline/fake/fake.cpp b/src/libcamera/pipeline/fake/fake.cpp
new file mode 100644
index 00000000..518de6aa
--- /dev/null
+++ b/src/libcamera/pipeline/fake/fake.cpp
@@ -0,0 +1,518 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * ipu3.cpp - Pipeline handler for Intel Fake
+ */
+
+#include <algorithm>
+#include <iomanip>
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/formats.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Fake)
+
+uint64_t CurrentTimestamp()
+{
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) < 0) {
+ LOG(Fake, Error) << "Get clock time fails";
+ return 0;
+ }
+
+ return ts.tv_sec * 1'000'000'000LL + ts.tv_nsec;
+}
+
+static const ControlInfoMap::Map FakeControls = {
+ { &controls::draft::PipelineDepth, ControlInfo(2, 3) },
+};
+
+class FakeCameraData : public Camera::Private
+{
+public:
+ struct Resolution {
+ Size size;
+ std::vector<int> frame_rates;
+ std::vector<std::string> formats;
+ };
+
+ FakeCameraData(PipelineHandler *pipe)
+ : Camera::Private(pipe), supportsFlips_(false)
+ {
+ }
+
+ std::vector<Resolution> supported_resolutions_;
+
+ Stream outStream_;
+ Stream vfStream_;
+ Stream rawStream_;
+
+ // TODO: Check if we should support.
+ bool supportsFlips_;
+ Transform rotationTransform_;
+
+ // TODO: remove
+ /* Requests for which no buffer has been queued to the CIO2 device yet. */
+ std::queue<Request *> pendingRequests_;
+ /* Requests queued to the CIO2 device but not yet processed by the ImgU. */
+ std::queue<Request *> processingRequests_;
+
+ // ControlInfoMap ipaControls_;
+ bool started_ = false;
+};
+
+class FakeCameraConfiguration : public CameraConfiguration
+{
+public:
+ static constexpr unsigned int kBufferCount = 4; // 4~6
+ static constexpr unsigned int kMaxStreams = 3;
+
+ FakeCameraConfiguration(FakeCameraData *data);
+
+ Status validate() override;
+
+private:
+ /*
+ * The FakeCameraData instance is guaranteed to be valid as long as the
+ * corresponding Camera instance is valid. In order to borrow a
+ * reference to the camera data, store a new reference to the camera.
+ */
+ const FakeCameraData *data_;
+};
+
+class PipelineHandlerFake : public PipelineHandler
+{
+public:
+ static constexpr unsigned int V4L2_CID_Fake_PIPE_MODE = 0x009819c1;
+ static constexpr Size kViewfinderSize{ 1280, 720 };
+
+ enum FakePipeModes {
+ FakePipeModeVideo = 0,
+ FakePipeModeStillCapture = 1,
+ };
+
+ PipelineHandlerFake(CameraManager *manager);
+
+ CameraConfiguration *generateConfiguration(Camera *camera,
+ const StreamRoles &roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ FakeCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<FakeCameraData *>(camera->_d());
+ }
+
+ int registerCameras();
+
+ static bool registered_;
+};
+
+bool PipelineHandlerFake::registered_ = false;
+
+FakeCameraConfiguration::FakeCameraConfiguration(FakeCameraData *data)
+ : CameraConfiguration()
+{
+ data_ = data;
+}
+
+CameraConfiguration::Status FakeCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ // Transform combined = transform * data_->rotationTransform_;
+
+ // /*
+ // * We combine the platform and user transform, but must "adjust away"
+ // * any combined result that includes a transposition, as we can't do
+ // * those. In this case, flipping only the transpose bit is helpful to
+ // * applications - they either get the transform they requested, or have
+ // * to do a simple transpose themselves (they don't have to worry about
+ // * the other possible cases).
+ // */
+ // if (!!(combined & Transform::Transpose)) {
+ // /*
+ // * Flipping the transpose bit in "transform" flips it in the
+ // * combined result too (as it's the last thing that happens),
+ // * which is of course clearing it.
+ // */
+ // transform ^= Transform::Transpose;
+ // combined &= ~Transform::Transpose;
+ // status = Adjusted;
+ // }
+
+ // /*
+ // * We also check if the sensor doesn't do h/vflips at all, in which
+ // * case we clear them, and the application will have to do everything.
+ // */
+ // if (!data_->supportsFlips_ && !!combined) {
+ // /*
+ // * If the sensor can do no transforms, then combined must be
+ // * changed to the identity. The only user transform that gives
+ // * rise to this is the inverse of the rotation. (Recall that
+ // * combined = transform * rotationTransform.)
+ // */
+ // transform = -data_->rotationTransform_;
+ // combined = Transform::Identity;
+ // status = Adjusted;
+ // }
+
+ /*
+ * Store the final combined transform that configure() will need to
+ * apply to the sensor to save us working it out again.
+ */
+ // combinedTransform_ = combined;
+
+ /* Cap the number of entries to the available streams. */
+ if (config_.size() > kMaxStreams) {
+ config_.resize(kMaxStreams);
+ status = Adjusted;
+ }
+
+ /*
+ * Validate the requested stream configuration and select the sensor
+ * format by collecting the maximum RAW stream width and height and
+ * picking the closest larger match.
+ *
+ * If no RAW stream is requested use the one of the largest YUV stream,
+ * plus margin pixels for the IF and BDS rectangle to downscale.
+ *
+ * \todo Clarify the IF and BDS margins requirements.
+ */
+ unsigned int rawCount = 0;
+ unsigned int yuvCount = 0;
+ Size rawRequirement;
+ Size maxYuvSize;
+ Size rawSize;
+
+ for (const StreamConfiguration &cfg : config_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ rawCount++;
+ rawSize = std::max(rawSize, cfg.size);
+ } else {
+ yuvCount++;
+ maxYuvSize = std::max(maxYuvSize, cfg.size);
+ rawRequirement.expandTo(cfg.size);
+ }
+ }
+
+ // TODO: Base on number of cameras?
+ if (rawCount > 1 || yuvCount > 2) {
+ LOG(Fake, Debug) << "Camera configuration not supported";
+ return Invalid;
+ }
+
+ /*
+ * Generate raw configuration from CIO2.
+ *
+ * The output YUV streams will be limited in size to the maximum frame
+ * size requested for the RAW stream, if present.
+ *
+ * If no raw stream is requested, generate a size from the largest YUV
+ * stream, aligned to the ImgU constraints and bound
+ * by the sensor's maximum resolution. See
+ * https://bugs.libcamera.org/show_bug.cgi?id=32
+ */
+ // TODO
+ if (rawSize.isNull())
+ rawSize = rawRequirement;
+
+ /*
+ * Adjust the configurations if needed and assign streams while
+ * iterating them.
+ */
+ bool mainOutputAvailable = true;
+ for (unsigned int i = 0; i < config_.size(); ++i) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(config_[i].pixelFormat);
+ const StreamConfiguration originalCfg = config_[i];
+ StreamConfiguration *cfg = &config_[i];
+
+ LOG(Fake, Debug) << "Validating stream: " << config_[i].toString();
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ /* Initialize the RAW stream with the CIO2 configuration. */
+ cfg->size = rawSize;
+ // TODO: check
+ cfg->pixelFormat = formats::SBGGR10_IPU3;
+ cfg->bufferCount = FakeCameraConfiguration::kBufferCount;
+ cfg->stride = info.stride(cfg->size.width, 0, 64);
+ cfg->frameSize = info.frameSize(cfg->size, 64);
+ cfg->setStream(const_cast<Stream *>(&data_->rawStream_));
+
+ LOG(Fake, Debug) << "Assigned " << cfg->toString()
+ << " to the raw stream";
+ } else {
+ /* Assign and configure the main and viewfinder outputs. */
+
+ cfg->pixelFormat = formats::NV12;
+ cfg->bufferCount = kBufferCount;
+ cfg->stride = info.stride(cfg->size.width, 0, 1);
+ cfg->frameSize = info.frameSize(cfg->size, 1);
+
+ /*
+ * Use the main output stream in case only one stream is
+ * requested or if the current configuration is the one
+ * with the maximum YUV output size.
+ */
+ if (mainOutputAvailable &&
+ (originalCfg.size == maxYuvSize || yuvCount == 1)) {
+ cfg->setStream(const_cast<Stream *>(&data_->outStream_));
+ mainOutputAvailable = false;
+
+ LOG(Fake, Debug) << "Assigned " << cfg->toString()
+ << " to the main output";
+ } else {
+ cfg->setStream(const_cast<Stream *>(&data_->vfStream_));
+
+ LOG(Fake, Debug) << "Assigned " << cfg->toString()
+ << " to the viewfinder output";
+ }
+ }
+
+ if (cfg->pixelFormat != originalCfg.pixelFormat ||
+ cfg->size != originalCfg.size) {
+ LOG(Fake, Debug)
+ << "Stream " << i << " configuration adjusted to "
+ << cfg->toString();
+ status = Adjusted;
+ }
+ }
+
+ return status;
+}
+
+PipelineHandlerFake::PipelineHandlerFake(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+ // TODO: read the fake hal spec file.
+}
+
+CameraConfiguration *PipelineHandlerFake::generateConfiguration(Camera *camera,
+ const StreamRoles &roles)
+{
+ FakeCameraData *data = cameraData(camera);
+ FakeCameraConfiguration *config = new FakeCameraConfiguration(data);
+
+ if (roles.empty())
+ return config;
+
+ Size minSize, sensorResolution;
+ for (const auto& resolution : data->supported_resolutions_) {
+ if (minSize.isNull() || minSize > resolution.size)
+ minSize = resolution.size;
+
+ sensorResolution = std::max(sensorResolution, resolution.size);
+ }
+
+ for (const StreamRole role : roles) {
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ unsigned int bufferCount;
+ PixelFormat pixelFormat;
+ Size size;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ size = sensorResolution;
+ pixelFormat = formats::NV12;
+ bufferCount = FakeCameraConfiguration::kBufferCount;
+ streamFormats[pixelFormat] = { { minSize, size } };
+
+ break;
+
+ case StreamRole::Raw: {
+ // TODO: check
+ pixelFormat = formats::SBGGR10_IPU3;
+ size = sensorResolution;
+ bufferCount = FakeCameraConfiguration::kBufferCount;
+ streamFormats[pixelFormat] = { { minSize, size } };
+
+ break;
+ }
+
+ case StreamRole::Viewfinder:
+ case StreamRole::VideoRecording: {
+ /*
+ * Default viewfinder and videorecording to 1280x720,
+ * capped to the maximum sensor resolution and aligned
+ * to the ImgU output constraints.
+ */
+ size = sensorResolution;
+ pixelFormat = formats::NV12;
+ bufferCount = FakeCameraConfiguration::kBufferCount;
+ streamFormats[pixelFormat] = { { minSize, size } };
+
+ break;
+ }
+
+ default:
+ LOG(Fake, Error)
+ << "Requested stream role not supported: " << role;
+ delete config;
+ return nullptr;
+ }
+
+ StreamFormats formats(streamFormats);
+ StreamConfiguration cfg(formats);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = bufferCount;
+ config->addConfiguration(cfg);
+ }
+
+ if (config->validate() == CameraConfiguration::Invalid)
+ return {};
+
+ return config;
+}
+
+int PipelineHandlerFake::configure(Camera *camera, CameraConfiguration *c)
+{
+ if (camera || c)
+ return 0;
+ return 0;
+}
+
+int PipelineHandlerFake::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ // Assume it's never called.
+ LOG(Fake, Fatal) << "exportFrameBuffers should never be called";
+ if (camera || stream || buffers)
+ return -EINVAL;
+ return -EINVAL;
+}
+
+int PipelineHandlerFake::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ FakeCameraData *data = cameraData(camera);
+ data->started_ = true;
+
+ return 0;
+}
+
+void PipelineHandlerFake::stopDevice(Camera *camera)
+{
+ FakeCameraData *data = cameraData(camera);
+
+ data->started_ = false;
+}
+
+int PipelineHandlerFake::queueRequestDevice(Camera *camera, Request *request)
+{
+ if (!camera)
+ return -EINVAL;
+
+ for (auto it : request->buffers())
+ completeBuffer(request, it.second);
+
+ // TODO: request.metadata()
+ request->metadata().set(controls::SensorTimestamp, CurrentTimestamp());
+ completeRequest(request);
+
+ return 0;
+}
+
+bool PipelineHandlerFake::match(DeviceEnumerator *enumerator)
+{
+ // TODO: exhaust all devices in |enumerator|.
+ if (!enumerator)
+ LOG(Fake, Info) << "Invalid enumerator";
+
+ if (registered_)
+ return false;
+
+ registered_ = true;
+ return registerCameras() == 0;
+}
+
+/**
+ * \brief Initialise ImgU and CIO2 devices associated with cameras
+ *
+ * Initialise the two ImgU instances and create cameras with an associated
+ * CIO2 device instance.
+ *
+ * \return 0 on success or a negative error code for error or if no camera
+ * has been created
+ * \retval -ENODEV no camera has been created
+ */
+int PipelineHandlerFake::registerCameras()
+{
+ std::unique_ptr<FakeCameraData> data =
+ std::make_unique<FakeCameraData>(this);
+ std::set<Stream *> streams = {
+ &data->outStream_,
+ &data->vfStream_,
+ &data->rawStream_,
+ };
+
+ // TODO: Read from config or from IPC.
+ // TODO: Check with Han-lin: Can this function be called more than once?
+ data->supported_resolutions_.resize(2);
+ data->supported_resolutions_[0].size = Size(1920, 1080);
+ data->supported_resolutions_[0].frame_rates.push_back(30);
+ data->supported_resolutions_[0].formats.push_back("YCbCr_420_888");
+ data->supported_resolutions_[0].formats.push_back("BLOB");
+ data->supported_resolutions_[1].size = Size(1280, 720);
+ data->supported_resolutions_[1].frame_rates.push_back(30);
+ data->supported_resolutions_[1].frame_rates.push_back(60);
+ data->supported_resolutions_[1].formats.push_back("YCbCr_420_888");
+ data->supported_resolutions_[1].formats.push_back("BLOB");
+
+ // TODO: Assign different locations for different cameras based on config.
+ data->properties_.set(properties::Location, properties::CameraLocationFront);
+ data->properties_.set(properties::PixelArrayActiveAreas, { Rectangle(Size(1920, 1080)) });
+
+ // TODO: Set FrameDurationLimits based on config.
+ ControlInfoMap::Map controls{};
+ int64_t min_frame_duration = 30, max_frame_duration = 60;
+ controls[&controls::FrameDurationLimits] = ControlInfo(min_frame_duration, max_frame_duration);
+ data->controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
+
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), "\\_SB_.PCI0.I2C4.CAM1" /* cameraId */, streams);
+
+ registerCamera(std::move(camera));
+
+ return 0;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerFake)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/fake/meson.build b/src/libcamera/pipeline/fake/meson.build
new file mode 100644
index 00000000..13980b4a
--- /dev/null
+++ b/src/libcamera/pipeline/fake/meson.build
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([ 'fake.cpp' ])
diff --git a/src/libcamera/pipeline_handler.cpp b/src/libcamera/pipeline_handler.cpp
index e5cb751c..4261154d 100644
--- a/src/libcamera/pipeline_handler.cpp
+++ b/src/libcamera/pipeline_handler.cpp
@@ -536,7 +536,7 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
cameras_.push_back(camera);
if (mediaDevices_.empty())
- LOG(Pipeline, Fatal)
+ LOG(Pipeline, Error)
<< "Registering camera with no media devices!";
/*
diff --git a/test/camera/camera_reconfigure.cpp b/test/camera/camera_reconfigure.cpp
index d12e2413..06c87730 100644
--- a/test/camera/camera_reconfigure.cpp
+++ b/test/camera/camera_reconfigure.cpp
@@ -98,7 +98,7 @@ private:
return TestFail;
}
- requests_.push_back(move(request));
+ requests_.push_back(std::move(request));
}
camera_->requestCompleted.connect(this, &CameraReconfigure::requestComplete);
--
2.38.0.rc1.362.ged0d419d3c-goog
More information about the libcamera-devel
mailing list