[libcamera-devel] [PATCH v5] android: libyuv: Introduce PostProcessorYuv

Laurent Pinchart laurent.pinchart at ideasonboard.com
Thu Feb 18 08:20:00 CET 2021


Hi Hiro,

Thank you for the patch.

On Thu, Feb 04, 2021 at 10:04:20PM +0000, Hirokazu Honda wrote:
> This adds PostProcessorYuv. It supports NV12 buffer scaling
> using libyuv.
> 
> Signed-off-by: Hirokazu Honda <hiroh at chromium.org>
> Reviewed-by: Jacopo Mondi <jacopo at jmondi.org>
> 
> ---
>  src/android/meson.build                |   1 +
>  src/android/yuv/post_processor_yuv.cpp | 145 +++++++++++++++++++++++++
>  src/android/yuv/post_processor_yuv.h   |  42 +++++++
>  3 files changed, 188 insertions(+)
>  create mode 100644 src/android/yuv/post_processor_yuv.cpp
>  create mode 100644 src/android/yuv/post_processor_yuv.h
> 
> diff --git a/src/android/meson.build b/src/android/meson.build
> index 95d0f420..50481eeb 100644
> --- a/src/android/meson.build
> +++ b/src/android/meson.build
> @@ -49,6 +49,7 @@ android_hal_sources = files([
>      'jpeg/exif.cpp',
>      'jpeg/post_processor_jpeg.cpp',
>      'jpeg/thumbnailer.cpp',
> +    'yuv/post_processor_yuv.cpp'
>  ])
> 
>  android_camera_metadata_sources = files([
> diff --git a/src/android/yuv/post_processor_yuv.cpp b/src/android/yuv/post_processor_yuv.cpp
> new file mode 100644
> index 00000000..aecb921f
> --- /dev/null
> +++ b/src/android/yuv/post_processor_yuv.cpp
> @@ -0,0 +1,145 @@
> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
> +/*
> + * Copyright (C) 2021, Google Inc.
> + *
> + * post_processor_yuv.cpp - Post Processor using libyuv
> + */
> +
> +#include "post_processor_yuv.h"
> +
> +#include <libyuv/scale.h>
> +
> +#include <libcamera/formats.h>
> +#include <libcamera/geometry.h>
> +#include <libcamera/internal/formats.h>
> +#include <libcamera/internal/log.h>

Internal headers should use "", not <>, and be included after public
headers.

Bonus points to anyone who adds a check for this to checkstyle.py ;-)

> +#include <libcamera/pixel_format.h>
> +
> +using namespace libcamera;
> +
> +LOG_DEFINE_CATEGORY(YUV)
> +
> +int PostProcessorYuv::configure(const StreamConfiguration &inCfg,
> +				const StreamConfiguration &outCfg)
> +{
> +	if (inCfg.pixelFormat != outCfg.pixelFormat) {
> +		LOG(YUV, Error) << "Pixel format conversion is not supported"
> +				<< " (from " << inCfg.toString()
> +				<< " to " << outCfg.toString() << ")";

Would it make sense to print inCfg.pixelFormat and outCfg.pixelFormat
instead of the full inCfg and outCfg ?

> +		return -EINVAL;
> +	}
> +
> +	if (inCfg.size < outCfg.size) {
> +		LOG(YUV, Error) << "Up-scaling is not supported"
> +				<< " (from " << inCfg.toString()
> +				<< " to " << outCfg.toString() << ")";

Same here with .size ?

> +		return -EINVAL;
> +	}
> +
> +	if (inCfg.pixelFormat != formats::NV12) {
> +		LOG(YUV, Error) << "Unsupported format " << inCfg.pixelFormat
> +				<< " (only NV12 is supported)";
> +		return -EINVAL;
> +	}
> +
> +	calculateLengths(inCfg, outCfg);
> +	return 0;
> +}
> +
> +int PostProcessorYuv::process(const FrameBuffer &source,
> +			      libcamera::MappedBuffer *destination,
> +			      [[maybe_unused]] const CameraMetadata &requestMetadata,
> +			      [[maybe_unused]] CameraMetadata *metadata)
> +{
> +	if (!isValidBuffers(source, *destination))
> +		return -EINVAL;
> +
> +	const MappedFrameBuffer sourceMapped(&source, PROT_READ);
> +	if (!sourceMapped.isValid()) {
> +		LOG(YUV, Error) << "Failed to mmap camera frame buffer";
> +		return -EINVAL;
> +	}
> +
> +	int ret = libyuv::NV12Scale(sourceMapped.maps()[0].data(),
> +				    sourceStride_[0],
> +				    sourceMapped.maps()[1].data(),
> +				    sourceStride_[1],
> +				    sourceSize_.width, sourceSize_.height,
> +				    destination->maps()[0].data(),
> +				    destinationStride_[0],
> +				    destination->maps()[1].data(),
> +				    destinationStride_[1],
> +				    destinationSize_.width,
> +				    destinationSize_.height,
> +				    libyuv::FilterMode::kFilterBilinear);
> +	if (ret) {
> +		LOG(YUV, Error) << "Failed NV12 scaling: " << ret;
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +bool PostProcessorYuv::isValidBuffers(const FrameBuffer &source,
> +				      const libcamera::MappedBuffer &destination) const
> +{
> +	if (source.planes().size() != 2u) {

Is the u suffix needed ? Same below.

> +		LOG(YUV, Error) << "Invalid number of source planes: "
> +				<< source.planes().size();
> +		return false;
> +	}
> +	if (destination.maps().size() != 2u) {
> +		LOG(YUV, Error) << "Invalid number of destination planes: "
> +				<< destination.maps().size();
> +		return false;
> +	}
> +
> +	if (source.planes()[0].length < sourceLength_[0] ||
> +	    source.planes()[1].length < sourceLength_[1]) {
> +		LOG(YUV, Error) << "The source planes lengths are too small"
> +				<< ", actual size: {"
> +				<< source.planes()[0].length << ", "
> +				<< source.planes()[1].length << "}"
> +				<< ", expected size: {"

Tiny optimization, you can merge the "}" from the previous line with
this string. Same below.

> +				<< sourceLength_[0] << ", "
> +				<< sourceLength_[1] << "}";
> +		return false;
> +	}
> +	if (destination.maps()[0].size() < destinationLength_[0] ||
> +	    destination.maps()[1].size() < destinationLength_[1]) {
> +		LOG(YUV, Error)
> +			<< "The destination planes lengths are too small"
> +			<< ", actual size: {" << destination.maps()[0].size()
> +			<< ", " << destination.maps()[1].size() << "}"
> +			<< ", expected size: {" << sourceLength_[0] << ", "
> +			<< sourceLength_[1] << "}";
> +		return false;
> +	}
> +
> +	return true;
> +}
> +
> +void PostProcessorYuv::calculateLengths(const StreamConfiguration &inCfg,
> +					const StreamConfiguration &outCfg)
> +{
> +	ASSERT(inCfg.pixelFormat == formats::NV12);
> +	ASSERT(outCfg.pixelFormat == formats::NV12);

Do we need this, given that the caller checks that both formats are NV12
?

> +
> +	sourceSize_ = inCfg.size;
> +	destinationSize_ = outCfg.size;
> +
> +	const PixelFormatInfo &nv12Info = PixelFormatInfo::info(formats::NV12);
> +	for (unsigned int i = 0; i < 2; i++) {
> +		sourceStride_[i] = nv12Info.stride(sourceSize_.width, i, 1);

Shouldn't this use inCfg.stride instead of calculating it ? We can't
control the stride of the input, it may depend on hardware constraints.

> +		destinationStride_[i] = nv12Info.stride(destinationSize_.width, i, 1);
> +
> +		const unsigned int vertSubSample =
> +			nv12Info.planes[i].verticalSubSampling;
> +		sourceLength_[i] =
> +			nv12Info.stride(sourceSize_.width, i, 1) *

This should use sourceStride_[i] instead of recalculating it.

> +			((sourceSize_.height + vertSubSample - 1) / vertSubSample);
> +		destinationLength_[i] =
> +			nv12Info.stride(destinationSize_.width, i, 1) *

Same here, destinationStride_[i].

> +			((destinationSize_.height + vertSubSample - 1) / vertSubSample);

It feels like a few more helpers function in the PixelFormatInfo could
be useful, as well as maybe a utils::div_round_up() (or similar). Not a
requirement for this patch of course.

With these small issues addressed,

Reviewed-by: Laurent Pinchart <laurent.pinchart at ideasonboard.com>

I can apply those changes when pushing the patch, there's no need to
resubmit. Could you let me know if you disagree with some of the
comments ?

> +	}
> +}
> diff --git a/src/android/yuv/post_processor_yuv.h b/src/android/yuv/post_processor_yuv.h
> new file mode 100644
> index 00000000..c58b4cf7
> --- /dev/null
> +++ b/src/android/yuv/post_processor_yuv.h
> @@ -0,0 +1,42 @@
> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
> +/*
> + * Copyright (C) 2021, Google Inc.
> + *
> + * post_processor_yuv.h - Post Processor using libyuv
> + */
> +#ifndef __ANDROID_POST_PROCESSOR_YUV_H__
> +#define __ANDROID_POST_PROCESSOR_YUV_H__
> +
> +#include "../post_processor.h"
> +
> +#include <libcamera/geometry.h>
> +
> +class CameraDevice;
> +
> +class PostProcessorYuv : public PostProcessor
> +{
> +public:
> +	PostProcessorYuv() = default;
> +
> +	int configure(const libcamera::StreamConfiguration &incfg,
> +		      const libcamera::StreamConfiguration &outcfg) override;
> +	int process(const libcamera::FrameBuffer &source,
> +		    libcamera::MappedBuffer *destination,
> +		    const CameraMetadata &requestMetadata,
> +		    CameraMetadata *metadata) override;
> +
> +private:
> +	bool isValidBuffers(const libcamera::FrameBuffer &source,
> +			    const libcamera::MappedBuffer &destination) const;
> +	void calculateLengths(const libcamera::StreamConfiguration &inCfg,
> +			      const libcamera::StreamConfiguration &outCfg);
> +
> +	libcamera::Size sourceSize_;
> +	libcamera::Size destinationSize_;
> +	unsigned int sourceLength_[2] = {};
> +	unsigned int destinationLength_[2] = {};
> +	unsigned int sourceStride_[2] = {};
> +	unsigned int destinationStride_[2] = {};
> +};
> +
> +#endif /* __ANDROID_POST_PROCESSOR_YUV_H__ */

-- 
Regards,

Laurent Pinchart


More information about the libcamera-devel mailing list