[libcamera-devel] [PATCH v4] android: libyuv: Introduce PostProcessorYuv

Jacopo Mondi jacopo at jmondi.org
Thu Feb 4 10:43:32 CET 2021


Hi Hiro,

On Thu, Feb 04, 2021 at 05:55:38PM +0900, Hirokazu Honda wrote:
> On Thu, Feb 4, 2021 at 4:58 PM Hirokazu Honda <hiroh at chromium.org> wrote:
> >
> > This adds PostProcessorYuv. It supports NV12 buffer scaling
> > using libyuv.
> >
> > Signed-off-by: Hirokazu Honda <hiroh at chromium.org>
> > ---
> >  src/android/meson.build                |   1 +
> >  src/android/yuv/post_processor_yuv.cpp | 136 +++++++++++++++++++++++++
> >  src/android/yuv/post_processor_yuv.h   |  42 ++++++++
> >  3 files changed, 179 insertions(+)
> >  create mode 100644 src/android/yuv/post_processor_yuv.cpp
> >  create mode 100644 src/android/yuv/post_processor_yuv.h
> >
> > diff --git a/src/android/meson.build b/src/android/meson.build
> > index 95d0f420..50481eeb 100644
> > --- a/src/android/meson.build
> > +++ b/src/android/meson.build
> > @@ -49,6 +49,7 @@ android_hal_sources = files([
> >      'jpeg/exif.cpp',
> >      'jpeg/post_processor_jpeg.cpp',
> >      'jpeg/thumbnailer.cpp',
> > +    'yuv/post_processor_yuv.cpp'
> >  ])
> >
> >  android_camera_metadata_sources = files([
> > diff --git a/src/android/yuv/post_processor_yuv.cpp b/src/android/yuv/post_processor_yuv.cpp
> > new file mode 100644
> > index 00000000..fc31c0ca
> > --- /dev/null
> > +++ b/src/android/yuv/post_processor_yuv.cpp
> > @@ -0,0 +1,136 @@
> > +/* SPDX-License-Identifier: LGPL-2.1-or-later */
> > +/*
> > + * Copyright (C) 2021, Google Inc.
> > + *
> > + * post_processor_yuv.cpp - Post Processor using libyuv
> > + */
> > +
> > +#include "post_processor_yuv.h"
> > +
> > +#include <libyuv/scale.h>
> > +
> > +#include <libcamera/formats.h>
> > +#include <libcamera/geometry.h>
> > +#include <libcamera/internal/formats.h>
> > +#include <libcamera/internal/log.h>
> > +#include <libcamera/pixel_format.h>
> > +
> > +using namespace libcamera;
> > +
> > +LOG_DEFINE_CATEGORY(YUV)
> > +
> > +int PostProcessorYuv::configure(const StreamConfiguration &inCfg,
> > +                               const StreamConfiguration &outCfg)
> > +{
> > +       if (inCfg.pixelFormat != outCfg.pixelFormat) {
> > +               LOG(YUV, Error) << "Pixel format conversion is not supported"
> > +                               << " (from " << inCfg.toString()
> > +                               << " to " << outCfg.toString() << ")";
> > +               return -EINVAL;
> > +       }
> > +
> > +       if (inCfg.size < outCfg.size) {
> > +               LOG(YUV, Error) << "Up-scaling is not supported"
> > +                               << " (from " << inCfg.toString()
> > +                               << " to " << outCfg.toString() << ")";
> > +               return -EINVAL;
> > +       }
> > +
> > +       if (inCfg.pixelFormat != formats::NV12) {
> > +               LOG(YUV, Error) << "Unsupported format " << inCfg.pixelFormat
> > +                               << " (only NV12 is supported)";
> > +               return -EINVAL;
> > +       }
> > +
> > +       calculateLengths(inCfg, outCfg);
> > +       return 0;
> > +}
> > +
> > +int PostProcessorYuv::process(const FrameBuffer &source,
> > +                             libcamera::MappedBuffer *destination,
> > +                             [[maybe_unused]] const CameraMetadata &requestMetadata,
> > +                             [[maybe_unused]] CameraMetadata *metadata)
> > +{
> > +       if (!isValidBuffers(source, *destination))
> > +               return -EINVAL;
> > +
> > +       const MappedFrameBuffer sourceMapped(&source, PROT_READ);
> > +       if (!sourceMapped.isValid()) {
> > +               LOG(YUV, Error) << "Failed to mmap camera frame buffer";
> > +               return -EINVAL;
> > +       }
> > +
> > +       return 0 == libyuv::NV12Scale(sourceMapped.maps()[0].data(),
> > +                                     sourceStride_[0],
> > +                                     sourceMapped.maps()[1].data(),
> > +                                     sourceStride_[1],
> > +                                     sourceSize_.width, sourceSize_.height,
> > +                                     destination->maps()[0].data(),
> > +                                     destinationStride_[0],
> > +                                     destination->maps()[1].data(),
> > +                                     destinationStride_[1],
> > +                                     destinationSize_.width,
> > +                                     destinationSize_.height,
> > +                                     libyuv::FilterMode::kFilterBilinear);
> > +}
>
> Self-review: We have to modify this to return 0 on success.
>
>        int ret = libyuv::NV12Scale(sourceMapped.maps()[0].data(),
>                                    sourceStride_[0],
>                                    sourceMapped.maps()[1].data(),
>                                    sourceStride_[1],
>                                    sourceSize_.width, sourceSize_.height,
>                                    destination->maps()[0].data(),
>                                    destinationStride_[0],
>                                    destination->maps()[1].data(),
>                                    destinationStride_[1],
>                                    destinationSize_.width,
>                                    destinationSize_.height,
>                                    libyuv::FilterMode::kFilterBilinear);
>        if (ret) {
>                LOG(YUV, Error) << "Failed scaling";

Might be useful to print the error code as well ?

>                return -EINVAL;
>        }
>        return 0;
>
> I will apply this change in the next version.
>
> -Hiro
>
> > +
> > +bool PostProcessorYuv::isValidBuffers(const FrameBuffer &source,
> > +                                     const libcamera::MappedBuffer &destination) const
> > +{
> > +       if (source.planes().size() != 2u) {
> > +               LOG(YUV, Error) << "Invalid number of source planes: "
> > +                               << source.planes().size();
> > +               return false;
> > +       }
> > +       if (destination.maps().size() != 2u) {
> > +               LOG(YUV, Error) << "Invalid number of destination planes: "
> > +                               << destination.maps().size();
> > +               return false;
> > +       }
> > +
> > +       if (source.planes()[0].length < sourceLength_[0] ||
> > +           source.planes()[1].length < sourceLength_[1]) {
> > +               LOG(YUV, Error) << "The source planes lengths are too small"
> > +                               << ", actual size: {"
> > +                               << source.planes()[0].length << ", "
> > +                               << source.planes()[1].length << "}"
> > +                               << ", expected size: {"
> > +                               << sourceLength_[0] << ", "
> > +                               << sourceLength_[1] << "}";
> > +               return false;
> > +       }
> > +       if (destination.maps()[0].size() < destinationLength_[0] ||
> > +           destination.maps()[1].size() < destinationLength_[1]) {
> > +               LOG(YUV, Error)
> > +                       << "The destination planes lengths are too small"
> > +                       << ", actual size: {" << destination.maps()[0].size()
> > +                       << ", " << destination.maps()[1].size() << "}"
> > +                       << ", expected size: {" << sourceLength_[0] << ", "
> > +                       << sourceLength_[1] << "}";

These are very long lines. It might be just me but I would break this
in three lines, to make it easier to consume. Up to you :)

> > +               return false;
> > +       }
> > +
> > +       return true;
> > +}
> > +
> > +void PostProcessorYuv::calculateLengths(const StreamConfiguration &inCfg,
> > +                                       const StreamConfiguration &outCfg)
> > +{
> > +       sourceSize_ = inCfg.size;
> > +       destinationSize_ = outCfg.size;
> > +
> > +       const PixelFormatInfo &nv12Info = PixelFormatInfo::info(formats::NV12);

Do we have to assume NV12 or can we use the inCfg.pixelFormat (or outCfg's one
as format has to be the same in input and output buffers).

> > +       for (unsigned int i = 0; i < 2; i++) {
> > +               const unsigned int vertSubSample =
> > +                       nv12Info.planes[i].verticalSubSampling;
> > +               sourceLength_[i] =
> > +                       nv12Info.stride(sourceSize_.width, i, 1) *
> > +                       ((sourceSize_.height + vertSubSample - 1) / vertSubSample);
> > +               destinationLength_[i] =
> > +                       nv12Info.stride(destinationSize_.width, i, 1) *
> > +                       ((destinationSize_.height + vertSubSample - 1) / vertSubSample);
> > +
> > +               sourceStride_[i] = nv12Info.stride(sourceSize_.width, i, 1);
> > +               destinationStride_[i] = nv12Info.stride(destinationSize_.width, i, 1);

I would assign strides first, then use them for the lengthes
calculation. A minor thing though

> > +       }
> > +}
> > diff --git a/src/android/yuv/post_processor_yuv.h b/src/android/yuv/post_processor_yuv.h
> > new file mode 100644
> > index 00000000..9406e274
> > --- /dev/null
> > +++ b/src/android/yuv/post_processor_yuv.h
> > @@ -0,0 +1,42 @@
> > +/* SPDX-License-Identifier: LGPL-2.1-or-later */
> > +/*
> > + * Copyright (C) 2021, Google Inc.
> > + *
> > + * post_processor_yuv.h - Post Processor using libyuv
> > + */
> > +#ifndef __ANDROID_POST_PROCESSOR_YUV_H__
> > +#define __ANDROID_POST_PROCESSOR_YUV_H__
> > +
> > +#include <libcamera/geometry.h>
> > +
> > +#include "../post_processor.h"

I would move this to be the first include.

All minors, the patch looks good
Reviewed-by: Jacopo Mondi <jacopo at jmondi.org>

Thanks
  j

> > +
> > +class CameraDevice;
> > +
> > +class PostProcessorYuv : public PostProcessor
> > +{
> > +public:
> > +       PostProcessorYuv() = default;
> > +
> > +       int configure(const libcamera::StreamConfiguration &incfg,
> > +                     const libcamera::StreamConfiguration &outcfg) override;
> > +       int process(const libcamera::FrameBuffer &source,
> > +                   libcamera::MappedBuffer *destination,
> > +                   const CameraMetadata &requestMetadata,
> > +                   CameraMetadata *metadata) override;
> > +
> > +private:
> > +       bool isValidBuffers(const libcamera::FrameBuffer &source,
> > +                           const libcamera::MappedBuffer &destination) const;
> > +       void calculateLengths(const libcamera::StreamConfiguration &inCfg,
> > +                             const libcamera::StreamConfiguration &outCfg);
> > +
> > +       libcamera::Size sourceSize_;
> > +       libcamera::Size destinationSize_;
> > +       unsigned int sourceLength_[2] = {};
> > +       unsigned int destinationLength_[2] = {};
> > +       unsigned int sourceStride_[2] = {};
> > +       unsigned int destinationStride_[2] = {};
> > +};
> > +
> > +#endif /* __ANDROID_POST_PROCESSOR_YUV_H__ */
> > --
> > 2.30.0.365.g02bc693789-goog
> _______________________________________________
> libcamera-devel mailing list
> libcamera-devel at lists.libcamera.org
> https://lists.libcamera.org/listinfo/libcamera-devel


More information about the libcamera-devel mailing list