[libcamera-devel] [PATCH 06/14] libcamera: converter: Add v4l2 m2m converter implementation

Jacopo Mondi jacopo at jmondi.org
Thu Sep 15 11:46:21 CEST 2022


Hi Xavier

On Thu, Sep 08, 2022 at 08:48:42PM +0200, Xavier Roumegue via libcamera-devel wrote:
> Introduce a converter implementation relying on a v4l2 m2m device, mostly
> based on the current simple pipeline converter implementation.
>
> The main change is the introduction of Mapping object which can be
> loaded through a configuration file which define vertices remapping
> coordinates. Those latters can be applied by any classes derived from
> this base class which define the apply_mapping() method.
>

I'll skip questions on things not clear to me about the converter
implementation as I understand this basically comes from the simple
pipeline converter implementation [*]

I would then focus more on two general questions:
1) Do we want to make a dir for converter/post-processors instead of
placing them in src/libcamera

2) Is the format expected in the configuration file documented
anywhere ?

[*] Not true, I will ask a question anyway, see below :)

> Signed-off-by: Xavier Roumegue <xavier.roumegue at oss.nxp.com>
> ---
>  .../libcamera/internal/converter_v4l2_m2m.h   | 120 +++++
>  include/libcamera/internal/meson.build        |   1 +
>  src/libcamera/converter_v4l2_m2m.cpp          | 504 ++++++++++++++++++
>  src/libcamera/meson.build                     |   1 +
>  4 files changed, 626 insertions(+)
>  create mode 100644 include/libcamera/internal/converter_v4l2_m2m.h
>  create mode 100644 src/libcamera/converter_v4l2_m2m.cpp
>
> diff --git a/include/libcamera/internal/converter_v4l2_m2m.h b/include/libcamera/internal/converter_v4l2_m2m.h
> new file mode 100644
> index 00000000..3667b128
> --- /dev/null
> +++ b/include/libcamera/internal/converter_v4l2_m2m.h
> @@ -0,0 +1,120 @@
> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
> +/*
> + * Copyright 2022 NXP
> + *
> + * converter_v4l2_m2m.h - V4l2 M2M Format converter interface
> + */
> +
> +#pragma once
> +
> +#include <functional>
> +#include <map>
> +#include <memory>
> +#include <string>
> +#include <tuple>
> +#include <vector>
> +
> +#include <libcamera/base/log.h>
> +#include <libcamera/base/signal.h>
> +
> +#include <libcamera/geometry.h>
> +#include <libcamera/pixel_format.h>
> +
> +#include "libcamera/internal/converter.h"
> +
> +namespace libcamera {
> +
> +class FrameBuffer;
> +class MediaDevice;
> +class Size;
> +class SizeRange;
> +struct StreamConfiguration;
> +class V4L2M2MDevice;
> +class V4L2M2MConverter;
> +class Converter;
> +
> +class V4L2M2MConverter : public Converter
> +{
> +protected:
> +	class Mapping
> +	{
> +	public:
> +		Mapping(const Size &input, const Size &output, const std::vector<uint32_t> &map)
> +			: input_(input), output_(output), map_(map) {}
> +		Size getInputSize() const { return input_; }
> +		Size getOutputSize() const { return output_; }
> +		std::size_t getLength() const { return map_.size(); }
> +		const uint32_t *getMapping() const { return map_.data(); }
> +
> +	private:
> +		Size input_;
> +		Size output_;
> +		std::vector<uint32_t> map_;
> +	};
> +
> +	class Stream : protected Loggable
> +	{
> +	public:
> +		Stream(V4L2M2MConverter *converter, unsigned int index);
> +
> +		bool isValid() const { return m2m_ != nullptr; }
> +
> +		int configure(const StreamConfiguration &inputCfg,
> +			      const StreamConfiguration &outputCfg);
> +		int exportBuffers(unsigned int count,
> +				  std::vector<std::unique_ptr<FrameBuffer>> *buffers);
> +
> +		int start();
> +		void stop();
> +
> +		int queueBuffers(FrameBuffer *input, FrameBuffer *output);
> +		std::unique_ptr<V4L2M2MDevice> m2m_;
> +
> +	protected:
> +		std::string logPrefix() const override;
> +
> +	private:
> +		void captureBufferReady(FrameBuffer *buffer);
> +		void outputBufferReady(FrameBuffer *buffer);
> +
> +		V4L2M2MConverter *converter_;
> +		unsigned int index_;
> +
> +		unsigned int inputBufferCount_;
> +		unsigned int outputBufferCount_;
> +	};
> +
> +	std::unique_ptr<V4L2M2MDevice> m2m_;
> +
> +	std::vector<Stream> streams_;
> +	std::vector<Mapping> mappings_;
> +	std::map<FrameBuffer *, unsigned int> queue_;
> +
> +public:
> +	V4L2M2MConverter(MediaDevice *media);
> +
> +	int loadConfiguration(const std::string &filename) override;
> +
> +	bool isValid() const { return m2m_ != nullptr; }
> +
> +	std::vector<PixelFormat> formats(PixelFormat input);
> +	SizeRange sizes(const Size &input);
> +
> +	std::tuple<unsigned int, unsigned int>
> +	strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
> +
> +	int configure(const StreamConfiguration &inputCfg,
> +		      const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
> +	int exportBuffers(unsigned int ouput, unsigned int count,
> +			  std::vector<std::unique_ptr<FrameBuffer>> *buffers);
> +
> +	int start();
> +	void stop();
> +
> +	int queueBuffers(FrameBuffer *input,
> +			 const std::map<unsigned int, FrameBuffer *> &outputs);
> +
> +	virtual int applyMapping([[maybe_unused]] Stream *stream, [[maybe_unused]] Mapping &mapping) { return 0; };
> +};
> +
> +} /* namespace libcamera */
> diff --git a/include/libcamera/internal/meson.build b/include/libcamera/internal/meson.build
> index 8f50d755..132de5ef 100644
> --- a/include/libcamera/internal/meson.build
> +++ b/include/libcamera/internal/meson.build
> @@ -20,6 +20,7 @@ libcamera_internal_headers = files([
>      'control_serializer.h',
>      'control_validator.h',
>      'converter.h',
> +    'converter_v4l2_m2m.h',
>      'delayed_controls.h',
>      'device_enumerator.h',
>      'device_enumerator_sysfs.h',
> diff --git a/src/libcamera/converter_v4l2_m2m.cpp b/src/libcamera/converter_v4l2_m2m.cpp
> new file mode 100644
> index 00000000..942e6e6f
> --- /dev/null
> +++ b/src/libcamera/converter_v4l2_m2m.cpp
> @@ -0,0 +1,504 @@
> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
> +/*
> + * Copyright (C) 2020, Laurent Pinchart
> + * Copyright 2022 NXP
> + *
> + * converter_v4l2_m2m.cpp - V4L2 M2M Format converter
> + */
> +
> +#include <algorithm>
> +#include <limits.h>
> +
> +#include <libcamera/base/file.h>
> +#include <libcamera/base/log.h>
> +#include <libcamera/base/signal.h>
> +#include <libcamera/base/utils.h>
> +
> +#include <libcamera/framebuffer.h>
> +#include <libcamera/geometry.h>
> +#include <libcamera/stream.h>
> +
> +#include "libcamera/internal/converter_v4l2_m2m.h"
> +#include "libcamera/internal/media_device.h"
> +#include "libcamera/internal/v4l2_videodevice.h"
> +#include "libcamera/internal/yaml_parser.h"
> +
> +namespace libcamera {
> +
> +LOG_DECLARE_CATEGORY(Converter)
> +
> +/* -----------------------------------------------------------------------------
> + * V4L2M2MConverter::Stream
> + */
> +
> +V4L2M2MConverter::Stream::Stream(V4L2M2MConverter *converter, unsigned int index)
> +	: converter_(converter), index_(index)
> +{
> +	m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode_);
> +
> +	m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady);
> +	m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady);
> +
> +	int ret = m2m_->open();
> +	if (ret < 0)
> +		m2m_.reset();

Why are we re-creating and opening the m2m_ device everytime we create
a stream ? Isn't it the same created and open when the
V4L2M2MConverter is created ?

Thanks
  j

> +}
> +
> +int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
> +					const StreamConfiguration &outputCfg)
> +{
> +	V4L2PixelFormat videoFormat =
> +		m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
> +
> +	V4L2DeviceFormat format;
> +	format.fourcc = videoFormat;
> +	format.size = inputCfg.size;
> +	format.planesCount = 1;
> +	format.planes[0].bpl = inputCfg.stride;
> +
> +	int ret = m2m_->output()->setFormat(&format);
> +	if (ret < 0) {
> +		LOG(Converter, Error)
> +			<< "Failed to set input format: " << strerror(-ret);
> +		return ret;
> +	}
> +
> +	if (format.fourcc != videoFormat || format.size != inputCfg.size ||
> +	    format.planes[0].bpl != inputCfg.stride) {
> +		LOG(Converter, Error)
> +			<< "Input format not supported (requested "
> +			<< inputCfg.size << "-" << videoFormat
> +			<< ", got " << format << ")";
> +		return -EINVAL;
> +	}
> +
> +	/* Set the pixel format and size on the output. */
> +	videoFormat = m2m_->capture()->toV4L2PixelFormat(outputCfg.pixelFormat);
> +	format = {};
> +	format.fourcc = videoFormat;
> +	format.size = outputCfg.size;
> +
> +	ret = m2m_->capture()->setFormat(&format);
> +	if (ret < 0) {
> +		LOG(Converter, Error)
> +			<< "Failed to set output format: " << strerror(-ret);
> +		return ret;
> +	}
> +
> +	if (format.fourcc != videoFormat || format.size != outputCfg.size) {
> +		LOG(Converter, Error)
> +			<< "Output format not supported";
> +		return -EINVAL;
> +	}
> +
> +	inputBufferCount_ = inputCfg.bufferCount;
> +	outputBufferCount_ = outputCfg.bufferCount;
> +
> +	for (Mapping &mapping : converter_->mappings_) {
> +		ControlList ctrls;
> +		if (mapping.getInputSize() == inputCfg.size && mapping.getOutputSize() == outputCfg.size) {
> +			LOG(Converter, Debug)
> +				<< "Got a configuration match "
> +				<< inputCfg.size << " --> " << outputCfg.size;
> +			converter_->applyMapping(this, mapping);
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +int V4L2M2MConverter::Stream::exportBuffers(unsigned int count,
> +					    std::vector<std::unique_ptr<FrameBuffer>> *buffers)
> +{
> +	return m2m_->capture()->exportBuffers(count, buffers);
> +}
> +
> +int V4L2M2MConverter::Stream::start()
> +{
> +	int ret = m2m_->output()->importBuffers(inputBufferCount_);
> +	if (ret < 0)
> +		return ret;
> +
> +	ret = m2m_->capture()->importBuffers(outputBufferCount_);
> +	if (ret < 0) {
> +		stop();
> +		return ret;
> +	}
> +
> +	ret = m2m_->output()->streamOn();
> +	if (ret < 0) {
> +		stop();
> +		return ret;
> +	}
> +
> +	ret = m2m_->capture()->streamOn();
> +	if (ret < 0) {
> +		stop();
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +void V4L2M2MConverter::Stream::stop()
> +{
> +	m2m_->capture()->streamOff();
> +	m2m_->output()->streamOff();
> +	m2m_->capture()->releaseBuffers();
> +	m2m_->output()->releaseBuffers();
> +}
> +
> +int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
> +{
> +	int ret = m2m_->output()->queueBuffer(input);
> +	if (ret < 0)
> +		return ret;
> +
> +	ret = m2m_->capture()->queueBuffer(output);
> +	if (ret < 0)
> +		return ret;
> +
> +	return 0;
> +}
> +
> +std::string V4L2M2MConverter::Stream::logPrefix() const
> +{
> +	return "stream" + std::to_string(index_);
> +}
> +
> +void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
> +{
> +	auto it = converter_->queue_.find(buffer);
> +	if (it == converter_->queue_.end())
> +		return;
> +
> +	if (!--it->second) {
> +		converter_->inputBufferReady.emit(buffer);
> +		converter_->queue_.erase(it);
> +	}
> +}
> +
> +void V4L2M2MConverter::Stream::captureBufferReady(FrameBuffer *buffer)
> +{
> +	converter_->outputBufferReady.emit(buffer);
> +}
> +
> +/* -----------------------------------------------------------------------------
> + * V4L2M2MConverter
> + */
> +
> +V4L2M2MConverter::V4L2M2MConverter(MediaDevice *media)
> +	: Converter(media)
> +{
> +	if (deviceNode_.empty())
> +		return;
> +
> +	m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode_);
> +	int ret = m2m_->open();
> +	if (ret < 0) {
> +		m2m_.reset();
> +		return;
> +	}
> +}
> +
> +int V4L2M2MConverter::loadConfiguration(const std::string &filename)
> +{
> +	LOG(Converter, Debug)
> +		<< "Parsing configuration file " << filename;
> +
> +	File file(filename);
> +
> +	if (!file.open(File::OpenModeFlag::ReadOnly)) {
> +		int ret = file.error();
> +		LOG(Converter, Error)
> +			<< "Failed to open configuration file "
> +			<< filename << ": " << strerror(-ret);
> +		return ret;
> +	}
> +
> +	std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
> +	if (!data)
> +		return -EINVAL;
> +
> +	if (!data->contains("mappings")) {
> +		LOG(Converter, Error)
> +			<< "Vertex mapping key missing";
> +		return -EINVAL;
> +	}
> +
> +	const YamlObject &mappings = (*data)["mappings"];
> +	if (!mappings.isList() || mappings.size() == 0) {
> +		LOG(Converter, Error)
> +			<< "Invalid mappings entry";
> +		return -EINVAL;
> +	}
> +
> +	LOG(Converter, Debug)
> +		<< "Parsing " << mappings.size() << " mappings";
> +	mappings_.clear();
> +	mappings_.reserve(mappings.size());
> +
> +	for (std::size_t i = 0; i < mappings.size(); i++) {
> +		const YamlObject &mapping = mappings[i];
> +		if (!mapping.isDictionary()) {
> +			LOG(Converter, Error)
> +				<< "Mapping is not a dictionnary";
> +			return -EINVAL;
> +		}
> +
> +		if (!mapping.contains("input-resolution")) {
> +			LOG(Converter, Error)
> +				<< "Input resolution missing";
> +			return -EINVAL;
> +		}
> +
> +		if (!mapping.contains("output-resolution")) {
> +			LOG(Converter, Error)
> +				<< "Output resolution missing";
> +			return -EINVAL;
> +		}
> +
> +		if (!mapping.contains("mapping")) {
> +			LOG(Converter, Error)
> +				<< "Mapping table missing";
> +			return -EINVAL;
> +		}
> +
> +		const YamlObject &input_res = mapping["input-resolution"];
> +		if (!input_res.isList() || input_res.size() != 2) {
> +			LOG(Converter, Error)
> +				<< "Incorrect input resolution";
> +			return -EINVAL;
> +		}
> +
> +		const YamlObject &output_res = mapping["output-resolution"];
> +		if (!output_res.isList() || output_res.size() != 2) {
> +			LOG(Converter, Error)
> +				<< "Incorrect output resolution";
> +			return -EINVAL;
> +		}
> +
> +		const YamlObject &map = mapping["mapping"];
> +		if (!map.isList() || map.size() == 0) {
> +			LOG(Converter, Error)
> +				<< "Incorrect mapping entries";
> +			return -EINVAL;
> +		}
> +
> +		Size input(input_res[0].get<uint32_t>(0), input_res[1].get<uint32_t>(0));
> +		Size output(output_res[0].get<uint32_t>(0), output_res[1].get<uint32_t>(0));
> +		const auto &mapVector = map.getList<uint32_t>().value_or(utils::defopt);
> +
> +		LOG(Converter, Debug)
> +			<< "Input/Output mapping resolution " << input << " ---> " << output;
> +		mappings_.emplace_back(Mapping(input, output, mapVector));
> +	}
> +
> +	return mappings.size();
> +}
> +
> +std::vector<PixelFormat> V4L2M2MConverter::formats(PixelFormat input)
> +{
> +	if (!m2m_)
> +		return {};
> +
> +	/*
> +	 * Set the format on the input side (V4L2 output) of the converter to
> +	 * enumerate the conversion capabilities on its output (V4L2 capture).
> +	 */
> +	V4L2DeviceFormat v4l2Format;
> +	v4l2Format.fourcc = m2m_->output()->toV4L2PixelFormat(input);
> +	v4l2Format.size = { 1, 1 };
> +
> +	int ret = m2m_->output()->setFormat(&v4l2Format);
> +	if (ret < 0) {
> +		LOG(Converter, Error)
> +			<< "Failed to set format: " << strerror(-ret);
> +		return {};
> +	}
> +
> +	if (v4l2Format.fourcc != m2m_->output()->toV4L2PixelFormat(input)) {
> +		LOG(Converter, Debug)
> +			<< "Input format " << input << " not supported.";
> +		return {};
> +	}
> +
> +	std::vector<PixelFormat> pixelFormats;
> +
> +	for (const auto &format : m2m_->capture()->formats()) {
> +		PixelFormat pixelFormat = format.first.toPixelFormat();
> +		if (pixelFormat)
> +			pixelFormats.push_back(pixelFormat);
> +	}
> +
> +	return pixelFormats;
> +}
> +
> +SizeRange V4L2M2MConverter::sizes(const Size &input)
> +{
> +	if (!m2m_)
> +		return {};
> +
> +	/*
> +	 * Set the size on the input side (V4L2 output) of the converter to
> +	 * enumerate the scaling capabilities on its output (V4L2 capture).
> +	 */
> +	V4L2DeviceFormat format;
> +	format.fourcc = V4L2PixelFormat();
> +	format.size = input;
> +
> +	int ret = m2m_->output()->setFormat(&format);
> +	if (ret < 0) {
> +		LOG(Converter, Error)
> +			<< "Failed to set format: " << strerror(-ret);
> +		return {};
> +	}
> +
> +	SizeRange sizes;
> +
> +	format.size = { 1, 1 };
> +	ret = m2m_->capture()->setFormat(&format);
> +	if (ret < 0) {
> +		LOG(Converter, Error)
> +			<< "Failed to set format: " << strerror(-ret);
> +		return {};
> +	}
> +
> +	sizes.min = format.size;
> +
> +	format.size = { UINT_MAX, UINT_MAX };
> +	ret = m2m_->capture()->setFormat(&format);
> +	if (ret < 0) {
> +		LOG(Converter, Error)
> +			<< "Failed to set format: " << strerror(-ret);
> +		return {};
> +	}
> +
> +	sizes.max = format.size;
> +
> +	return sizes;
> +}
> +
> +std::tuple<unsigned int, unsigned int>
> +V4L2M2MConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
> +				     const Size &size)
> +{
> +	V4L2DeviceFormat format;
> +	format.fourcc = m2m_->capture()->toV4L2PixelFormat(pixelFormat);
> +	format.size = size;
> +
> +	int ret = m2m_->capture()->tryFormat(&format);
> +	if (ret < 0)
> +		return std::make_tuple(0, 0);
> +
> +	return std::make_tuple(format.planes[0].bpl, format.planes[0].size);
> +}
> +
> +int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
> +				const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
> +{
> +	int ret = 0;
> +
> +	streams_.clear();
> +	streams_.reserve(outputCfgs.size());
> +
> +	for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
> +		Stream &stream = streams_.emplace_back(this, i);
> +
> +		if (!stream.isValid()) {
> +			LOG(Converter, Error)
> +				<< "Failed to create stream " << i;
> +			ret = -EINVAL;
> +			break;
> +		}
> +
> +		ret = stream.configure(inputCfg, outputCfgs[i]);
> +		if (ret < 0)
> +			break;
> +	}
> +
> +	if (ret < 0) {
> +		streams_.clear();
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +int V4L2M2MConverter::exportBuffers(unsigned int output, unsigned int count,
> +				    std::vector<std::unique_ptr<FrameBuffer>> *buffers)
> +{
> +	if (output >= streams_.size())
> +		return -EINVAL;
> +
> +	return streams_[output].exportBuffers(count, buffers);
> +}
> +
> +int V4L2M2MConverter::start()
> +{
> +	int ret;
> +
> +	for (Stream &stream : streams_) {
> +		ret = stream.start();
> +		if (ret < 0) {
> +			stop();
> +			return ret;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +void V4L2M2MConverter::stop()
> +{
> +	for (Stream &stream : utils::reverse(streams_))
> +		stream.stop();
> +}
> +
> +int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
> +				   const std::map<unsigned int, FrameBuffer *> &outputs)
> +{
> +	unsigned int mask = 0;
> +	int ret;
> +
> +	/*
> +	 * Validate the outputs as a sanity check: at least one output is
> +	 * required, all outputs must reference a valid stream and no two
> +	 * outputs can reference the same stream.
> +	 */
> +	if (outputs.empty())
> +		return -EINVAL;
> +
> +	for (auto [index, buffer] : outputs) {
> +		if (!buffer)
> +			return -EINVAL;
> +		if (index >= streams_.size())
> +			return -EINVAL;
> +		if (mask & (1 << index))
> +			return -EINVAL;
> +
> +		mask |= 1 << index;
> +	}
> +
> +	/* Queue the input and output buffers to all the streams. */
> +	for (auto [index, buffer] : outputs) {
> +		ret = streams_[index].queueBuffers(input, buffer);
> +		if (ret < 0)
> +			return ret;
> +	}
> +
> +	/*
> +	 * Add the input buffer to the queue, with the number of streams as a
> +	 * reference count. Completion of the input buffer will be signalled by
> +	 * the stream that releases the last reference.
> +	 */
> +	queue_.emplace(std::piecewise_construct,
> +		       std::forward_as_tuple(input),
> +		       std::forward_as_tuple(outputs.size()));
> +
> +	return 0;
> +}
> +
> +REGISTER_CONVERTER("v4l2_m2m", V4L2M2MConverter, "pxp")
> +
> +} /* namespace libcamera */
> diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build
> index a261d4b4..b12c8401 100644
> --- a/src/libcamera/meson.build
> +++ b/src/libcamera/meson.build
> @@ -14,6 +14,7 @@ libcamera_sources = files([
>      'control_serializer.cpp',
>      'control_validator.cpp',
>      'converter.cpp',
> +    'converter_v4l2_m2m.cpp',
>      'delayed_controls.cpp',
>      'device_enumerator.cpp',
>      'device_enumerator_sysfs.cpp',
> --
> 2.37.3
>


More information about the libcamera-devel mailing list