[libcamera-devel] [PATCH v5 3/4] qcam: add viewfinderGL class to accelerate the format convert
Laurent Pinchart
laurent.pinchart at ideasonboard.com
Sun Sep 6 18:18:07 CEST 2020
Hi Show,
Thank you for the patch.
On Fri, Sep 04, 2020 at 04:43:15PM +0800, Show Liu wrote:
> the viewfinderGL accelerates the format conversion by
> using OpenGL ES shader
>
> Signed-off-by: Show Liu <show.liu at linaro.org>
> ---
> src/qcam/meson.build | 2 +
> src/qcam/viewfinder_gl.cpp | 441 +++++++++++++++++++++++++++++++++++++
> src/qcam/viewfinder_gl.h | 97 ++++++++
> 3 files changed, 540 insertions(+)
> create mode 100644 src/qcam/viewfinder_gl.cpp
> create mode 100644 src/qcam/viewfinder_gl.h
>
> diff --git a/src/qcam/meson.build b/src/qcam/meson.build
> index a4bad0a..32c0fc3 100644
> --- a/src/qcam/meson.build
> +++ b/src/qcam/meson.build
> @@ -7,11 +7,13 @@ qcam_sources = files([
> 'main.cpp',
> 'main_window.cpp',
> 'viewfinder_qt.cpp',
> + 'viewfinder_gl.cpp',
Let's keep files alphabetically sorted.
> ])
>
> qcam_moc_headers = files([
> 'main_window.h',
> 'viewfinder_qt.h',
> + 'viewfinder_gl.h',
Here too.
> ])
>
> qcam_resources = files([
You need to set the minimum Qt version to 5.4, as QOpenGLWidget wasn't
available before that.
diff --git a/src/qcam/meson.build b/src/qcam/meson.build
index 32c0fc3e0f6b..9d3f189a896b 100644
--- a/src/qcam/meson.build
+++ b/src/qcam/meson.build
@@ -25,7 +25,8 @@ qt5 = import('qt5')
qt5_dep = dependency('qt5',
method : 'pkg-config',
modules : ['Core', 'Gui', 'Widgets'],
- required : get_option('qcam'))
+ required : get_option('qcam'),
+ version : '>=5.4')
if qt5_dep.found()
qcam_deps = [
Furthermore, Qt can be compiled without OpenGL support, in which case
this patch will fail to compile. The following change should address it.
diff --git a/meson.build b/meson.build
index b6c99ba8e0eb..5f7d619a79d0 100644
--- a/meson.build
+++ b/meson.build
@@ -26,6 +26,7 @@ libcamera_version = libcamera_git_version.split('+')[0]
# Configure the build environment.
cc = meson.get_compiler('c')
+cxx = meson.get_compiler('cpp')
config_h = configuration_data()
if cc.has_header_symbol('execinfo.h', 'backtrace')
diff --git a/src/qcam/meson.build b/src/qcam/meson.build
index 32c0fc3e0f6b..9bb48c0d06c5 100644
--- a/src/qcam/meson.build
+++ b/src/qcam/meson.build
@@ -7,18 +7,15 @@ qcam_sources = files([
'main.cpp',
'main_window.cpp',
'viewfinder_qt.cpp',
- 'viewfinder_gl.cpp',
])
qcam_moc_headers = files([
'main_window.h',
'viewfinder_qt.h',
- 'viewfinder_gl.h',
])
qcam_resources = files([
'assets/feathericons/feathericons.qrc',
- 'assets/shader/shaders.qrc'
])
qt5 = import('qt5')
@@ -44,6 +42,19 @@ if qt5_dep.found()
])
endif
+ if cxx.has_header_symbol('QOpenGLWidget', 'QOpenGLWidget',
+ dependencies : qt5_dep, args : '-fPIC')
+ qcam_sources += files([
+ 'viewfinder_gl.cpp',
+ ])
+ qcam_moc_headers += files([
+ 'viewfinder_gl.h',
+ ])
+ qcam_resources += files([
+ 'assets/shader/shaders.qrc'
+ ])
+ endif
+
# gcc 9 introduced a deprecated-copy warning that is triggered by Qt until
# Qt 5.13. clang 10 introduced the same warning, but detects more issues
# that are not fixed in Qt yet. Disable the warning manually in both cases.
Patch 4/4 will need to be updated to with conditional compilation on
QT_NO_OPENGL.
> diff --git a/src/qcam/viewfinder_gl.cpp b/src/qcam/viewfinder_gl.cpp
> new file mode 100644
> index 0000000..5591916
> --- /dev/null
> +++ b/src/qcam/viewfinder_gl.cpp
> @@ -0,0 +1,441 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/*
> + * Copyright (C) 2020, Linaro
> + *
> + * viewfinderGL.cpp - Render YUV format frame by OpenGL shader
> + */
> +
> +#include "viewfinder_gl.h"
> +
> +#include <QImage>
> +
> +#include <libcamera/formats.h>
> +
> +#define ATTRIB_VERTEX 0
> +#define ATTRIB_TEXTURE 1
Is there a guarantee that attribute locations match the declaration
order in the shader program ? Wouldn't it be better to use
shaderProgram.attributeLocation() to retrieve the attribute locations by
name (before linking), or shaderProgram.bindAttributeLocation() to set
them explicitly (after linking) ?
> +
> +static const QList<libcamera::PixelFormat> supportFormats{
s/supportFormats/supportedFormats/
> + libcamera::formats::NV12,
> + libcamera::formats::NV21,
> + libcamera::formats::NV16,
> + libcamera::formats::NV61,
> + libcamera::formats::NV24,
> + libcamera::formats::NV42,
> + libcamera::formats::YUV420,
> + libcamera::formats::YVU420
> +};
> +
> +ViewFinderGL::ViewFinderGL(QWidget *parent)
> + : QOpenGLWidget(parent),
> + buffer_(nullptr),
> + pFShader_(nullptr),
> + pVShader_(nullptr),
> + vbuf_(QOpenGLBuffer::VertexBuffer),
> + yuvDataPtr_(nullptr),
> + textureU_(QOpenGLTexture::Target2D),
> + textureV_(QOpenGLTexture::Target2D),
> + textureY_(QOpenGLTexture::Target2D)
Feel free to have multiple members per line if desired.
> +{
> +}
> +
> +ViewFinderGL::~ViewFinderGL()
> +{
> + removeShader();
> +
> + if (textureY_.isCreated())
> + textureY_.destroy();
> +
> + if (textureU_.isCreated())
> + textureU_.destroy();
> +
> + if (textureV_.isCreated())
> + textureV_.destroy();
Are these needed, or does the QOpenGLTexture destructor destroy the
textures ?
> +
> + vbuf_.destroy();
Same question for vbuf_.
> +}
> +
> +const QList<libcamera::PixelFormat> &ViewFinderGL::nativeFormats() const
> +{
> + return (::supportFormats);
No need for parentheses or an explicit namespace.
return supportedFormats;
> +}
> +
> +int ViewFinderGL::setFormat(const libcamera::PixelFormat &format,
> + const QSize &size)
> +{
> + int ret = 0;
> +
> + if (isFormatSupport(format)) {
> + format_ = format;
> + size_ = size;
> + } else {
> + ret = -1;
> + }
> + updateGeometry();
When the format change, don't you need to recreate the shaders ?
> + return ret;
> +}
> +
> +void ViewFinderGL::stop()
> +{
> + if (buffer_) {
> + renderComplete(buffer_);
> + buffer_ = nullptr;
> + }
> +}
> +
> +QImage ViewFinderGL::getCurrentImage()
> +{
> + QMutexLocker locker(&mutex_);
> +
> + return (grabFramebuffer());
return grabFrameBuffer();
> +}
> +
> +void ViewFinderGL::render(libcamera::FrameBuffer *buffer, MappedBuffer *map)
> +{
> + if (buffer->planes().size() != 1) {
> + qWarning() << "Multi-planar buffers are not supported";
> + return;
> + }
> +
> + if (buffer_)
> + renderComplete(buffer_);
> +
> + unsigned char *memory = static_cast<unsigned char *>(map->memory);
> + if (memory) {
Can memory be null ?
> + yuvDataPtr_ = memory;
> + update();
> + buffer_ = buffer;
> + }
> +}
> +
> +bool ViewFinderGL::isFormatSupport(const libcamera::PixelFormat &format)
> +{
As this function sets internal members based on the format, I would call
it selectFormat(), it does more than just checking if the format is
supported.
> + bool ret = true;
> + switch (format) {
> + case libcamera::formats::NV12:
> + horzSubSample_ = 2;
> + vertSubSample_ = 2;
> + vsrc_ = ":NV_vertex_shader.glsl";
> + fsrc_ = ":NV_2_planes_UV_f.glsl";
> + break;
> + case libcamera::formats::NV21:
> + horzSubSample_ = 2;
> + vertSubSample_ = 2;
> + vsrc_ = ":NV_vertex_shader.glsl";
> + fsrc_ = ":NV_2_planes_VU_f.glsl";
> + break;
> + case libcamera::formats::NV16:
> + horzSubSample_ = 2;
> + vertSubSample_ = 1;
> + vsrc_ = ":NV_vertex_shader.glsl";
> + fsrc_ = ":NV_2_planes_UV_f.glsl";
> + break;
> + case libcamera::formats::NV61:
> + horzSubSample_ = 2;
> + vertSubSample_ = 1;
> + vsrc_ = ":NV_vertex_shader.glsl";
> + fsrc_ = ":NV_2_planes_VU_f.glsl";
> + break;
> + case libcamera::formats::NV24:
> + horzSubSample_ = 1;
> + vertSubSample_ = 1;
> + vsrc_ = ":NV_vertex_shader.glsl";
> + fsrc_ = ":NV_2_planes_UV_f.glsl";
> + break;
> + case libcamera::formats::NV42:
> + horzSubSample_ = 1;
> + vertSubSample_ = 1;
> + vsrc_ = ":NV_vertex_shader.glsl";
> + fsrc_ = ":NV_2_planes_VU_f.glsl";
> + break;
> + case libcamera::formats::YUV420:
> + horzSubSample_ = 2;
> + vertSubSample_ = 2;
> + vsrc_ = ":NV_vertex_shader.glsl";
> + fsrc_ = ":NV_3_planes_UV_f.glsl";
> + break;
> + case libcamera::formats::YVU420:
> + horzSubSample_ = 2;
> + vertSubSample_ = 2;
> + vsrc_ = ":NV_vertex_shader.glsl";
> + fsrc_ = ":NV_3_planes_VU_f.glsl";
> + break;
> + default:
> + ret = false;
> + qWarning() << "[ViewFinderGL]:"
> + << "format not support yet.";
s/support yet./supported/
> + break;
> + };
> +
> + return ret;
> +}
> +
> +void ViewFinderGL::createVertexShader()
> +{
> + bool bCompile;
No need to prefix variables with the type name.
> + /* Create Vertex Shader */
> + pVShader_ = new QOpenGLShader(QOpenGLShader::Vertex, this);
> +
> + bCompile = pVShader_->compileSourceFile(vsrc_);
> + if (!bCompile) {
> + qWarning() << "[ViewFinderGL]:" << pVShader_->log();
> + }
This can simply be written
if (!pVShader_->compileSourceFile(vsrc_))
qWarning() << "[ViewFinderGL]:" << pVShader_->log();
> +
> + shaderProgram_.addShader(pVShader_);
Won't this crash if shader compilation failed ? I think
createVertexShader() should return a status as a bool.
> +}
> +
> +bool ViewFinderGL::createFragmentShader()
> +{
> + bool bCompile;
> +
> + /* Create Fragment Shader */
> + pFShader_ = new QOpenGLShader(QOpenGLShader::Fragment, this);
> +
> + bCompile = pFShader_->compileSourceFile(fsrc_);
> + if (!bCompile) {
if (!pFShader_->compileSourceFile(fsrc_)) {
> + qWarning() << "[ViewFinderGL]:" << pFShader_->log();
> + return bCompile;
return false;
> + }
> +
> + shaderProgram_.addShader(pFShader_);
> +
> + /* Link shader pipeline */
> + if (!shaderProgram_.link()) {
> + qWarning() << "[ViewFinderGL]:" << shaderProgram_.log();
> + close();
> + }
> +
> + /* Bind shader pipeline for use */
> + if (!shaderProgram_.bind()) {
> + qWarning() << "[ViewFinderGL]:" << shaderProgram_.log();
> + close();
> + }
> +
> + shaderProgram_.enableAttributeArray(ATTRIB_VERTEX);
> + shaderProgram_.enableAttributeArray(ATTRIB_TEXTURE);
> +
> + shaderProgram_.setAttributeBuffer(ATTRIB_VERTEX,
> + GL_FLOAT,
> + 0,
> + 2,
> + 2 * sizeof(GLfloat));
> + shaderProgram_.setAttributeBuffer(ATTRIB_TEXTURE,
> + GL_FLOAT,
> + 8 * sizeof(GLfloat),
> + 2,
> + 2 * sizeof(GLfloat));
> +
> + textureUniformY_ = shaderProgram_.uniformLocation("tex_y");
> + textureUniformU_ = shaderProgram_.uniformLocation("tex_u");
> + textureUniformV_ = shaderProgram_.uniformLocation("tex_v");
> +
> + if (!textureY_.isCreated())
> + textureY_.create();
> +
> + if (!textureU_.isCreated())
> + textureU_.create();
> +
> + if (!textureV_.isCreated())
> + textureV_.create();
> +
> + id_y_ = textureY_.textureId();
> + id_u_ = textureU_.textureId();
> + id_v_ = textureV_.textureId();
> + return true;
> +}
> +
> +void ViewFinderGL::configureTexture(unsigned int id)
> +{
> + glBindTexture(GL_TEXTURE_2D, id);
> + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
> + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
> + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
> + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
> +}
> +
> +void ViewFinderGL::removeShader()
> +{
> + if (shaderProgram_.isLinked()) {
> + shaderProgram_.release();
> + shaderProgram_.removeAllShaders();
> + }
> +
> + if (pFShader_)
> + delete pFShader_;
> +
> + if (pVShader_)
> + delete pVShader_;
> +}
> +
> +void ViewFinderGL::initializeGL()
> +{
> + initializeOpenGLFunctions();
> + glEnable(GL_TEXTURE_2D);
> + glDisable(GL_DEPTH_TEST);
> +
> + static const GLfloat vertices[]{
> + -1.0f, -1.0f, -1.0f, +1.0f,
> + +1.0f, +1.0f, +1.0f, -1.0f,
> + 0.0f, 1.0f, 0.0f, 0.0f,
> + 1.0f, 0.0f, 1.0f, 1.0f
> + };
This is vertex and texture coordinates, not just vertices. How about
writing it as follows ?
static const GLfloat coordinates[2][4][2] {
{
/* Vertex coordinates */
{ -1.0f, -1.0f },
{ -1.0f, +1.0f },
{ +1.0f, +1.0f },
{ +1.0f, -1.0f },
}, {
/* Texture coordinates */
{ 0.0f, 1.0f },
{ 0.0f, 0.0f },
{ 1.0f, 0.0f },
{ 1.0f, 1.0f },
},
};
There's something I don't get though, maybe you can help me understand
it. The vertex coordinates are copied directly to gl_Position in the
vertex shader, so they're essentially expressed in clip space, which I
understand has X pointing towards the right and Y pointing towards the
top. The texture coordinates, if my understand is correct again, have
their origin at the bottom-left corner too. The first vertex, (-1.0,
-1.0), which is at the bottom-left, then maps to texture coordinate
(0.0, 1.0), which is the top-left pixel of the texture. The image should
thus be flipped vertically. Why isn't it ? I'm sure I'm missing
somethign simple.
> +
> + vbuf_.create();
> + vbuf_.bind();
> + vbuf_.allocate(vertices, sizeof(vertices));
> +
> + /* Create Vertex Shader */
> + createVertexShader();
> +
> + glClearColor(1.0f, 1.0f, 1.0f, 0.0f);
> +}
> +
> +void ViewFinderGL::doRender()
> +{
> + switch (format_) {
> + case libcamera::formats::NV12:
> + case libcamera::formats::NV21:
> + case libcamera::formats::NV16:
> + case libcamera::formats::NV61:
> + case libcamera::formats::NV24:
> + case libcamera::formats::NV42:
> + /* activate texture 0 */
> + glActiveTexture(GL_TEXTURE0);
> + configureTexture(id_y_);
> + glTexImage2D(GL_TEXTURE_2D,
> + 0,
> + GL_RED,
> + size_.width(),
> + size_.height(),
> + 0,
> + GL_RED,
> + GL_UNSIGNED_BYTE,
> + yuvDataPtr_);
> + glUniform1i(textureUniformY_, 0);
Would it make sense to use
shaderProgram_.setUniformValue(textureUniformY_, 0);
(and similarly below) ?
> +
> + /* activate texture 1 */
> + glActiveTexture(GL_TEXTURE1);
> + configureTexture(id_u_);
> + glTexImage2D(GL_TEXTURE_2D,
> + 0,
> + GL_RG,
> + size_.width() / horzSubSample_,
> + size_.height() / vertSubSample_,
> + 0,
> + GL_RG,
> + GL_UNSIGNED_BYTE,
> + (char *)yuvDataPtr_ + size_.width() * size_.height());
> + glUniform1i(textureUniformU_, 1);
> + break;
A blank line here would increase readability. Same below.
> + case libcamera::formats::YUV420:
> + /* activate texture 0 */
> + glActiveTexture(GL_TEXTURE0);
> + configureTexture(id_y_);
> + glTexImage2D(GL_TEXTURE_2D,
> + 0,
> + GL_RED,
> + size_.width(),
> + size_.height(),
> + 0,
> + GL_RED,
> + GL_UNSIGNED_BYTE,
> + yuvDataPtr_);
> + glUniform1i(textureUniformY_, 0);
> +
> + /* activate texture 1 */
> + glActiveTexture(GL_TEXTURE1);
> + configureTexture(id_u_);
> + glTexImage2D(GL_TEXTURE_2D,
> + 0,
> + GL_RED,
> + size_.width() / horzSubSample_,
> + size_.height() / vertSubSample_,
> + 0,
> + GL_RED,
> + GL_UNSIGNED_BYTE,
> + (char *)yuvDataPtr_ + size_.width() * size_.height());
> + glUniform1i(textureUniformU_, 1);
> +
> + /* activate texture 2 */
> + glActiveTexture(GL_TEXTURE2);
> + configureTexture(id_v_);
> + glTexImage2D(GL_TEXTURE_2D,
> + 0,
> + GL_RED,
> + size_.width() / horzSubSample_,
> + size_.height() / vertSubSample_,
> + 0,
> + GL_RED,
> + GL_UNSIGNED_BYTE,
> + (char *)yuvDataPtr_ + size_.width() * size_.height() * 5 / 4);
> + glUniform1i(textureUniformV_, 2);
> + break;
> + case libcamera::formats::YVU420:
> + /* activate texture 0 */
> + glActiveTexture(GL_TEXTURE0);
> + configureTexture(id_y_);
> + glTexImage2D(GL_TEXTURE_2D,
> + 0,
> + GL_RED,
> + size_.width(),
> + size_.height(),
> + 0,
> + GL_RED,
> + GL_UNSIGNED_BYTE,
> + yuvDataPtr_);
> + glUniform1i(textureUniformY_, 0);
> +
> + /* activate texture 1 */
> + glActiveTexture(GL_TEXTURE2);
> + configureTexture(id_v_);
> + glTexImage2D(GL_TEXTURE_2D,
> + 0,
> + GL_RED,
> + size_.width() / horzSubSample_,
> + size_.height() / vertSubSample_,
> + 0,
> + GL_RED,
> + GL_UNSIGNED_BYTE,
> + (char *)yuvDataPtr_ + size_.width() * size_.height());
> + glUniform1i(textureUniformV_, 1);
OK, now I understand why the NV_3_planes_UV_f.glsl and
NV_3_planes_VU_f.glsl shaders are identical, you switch the U and V
planes here. I think we should then merge the two files into
NV_3_planes_f_glsl. The above line should become
glUniform1i(textureUniformU_, 2);
as you deal with texture 2 here (and a similar change is needed below),
and the two blocks should be swapped as the comments are incorrect (the
comment above refers to texture 1 while the code deals with texture 2).
> +
> + /* activate texture 2 */
> + glActiveTexture(GL_TEXTURE1);
> + configureTexture(id_u_);
> + glTexImage2D(GL_TEXTURE_2D,
> + 0,
> + GL_RED,
> + size_.width() / horzSubSample_,
> + size_.height() / vertSubSample_,
> + 0,
> + GL_RED,
> + GL_UNSIGNED_BYTE,
> + (char *)yuvDataPtr_ + size_.width() * size_.height() * 5 / 4);
> + glUniform1i(textureUniformU_, 2);
Please add a break here, let's not rely on implicit fall-through.
> + default:
> + break;
> + };
> +}
> +
> +void ViewFinderGL::paintGL()
> +{
> + if (pFShader_ == nullptr)
if (!pfShader_)
> + createFragmentShader();
> +
> + if (yuvDataPtr_) {
> + glClearColor(0.0, 0.0, 0.0, 1.0);
> + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
> +
> + doRender();
> + glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
> + }
> +}
> +
> +void ViewFinderGL::resizeGL(int w, int h)
> +{
> + glViewport(0, 0, w, h);
> +}
> +
> +QSize ViewFinderGL::sizeHint() const
> +{
> + return size_.isValid() ? size_ : QSize(640, 480);
> +}
> diff --git a/src/qcam/viewfinder_gl.h b/src/qcam/viewfinder_gl.h
> new file mode 100644
> index 0000000..e708c32
> --- /dev/null
> +++ b/src/qcam/viewfinder_gl.h
> @@ -0,0 +1,97 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/*
> + * Copyright (C) 2020, Linaro
> + *
> + * viewfinder_GL.h - OpenGL Viewfinder for rendering by OpenGL shader
> + *
> + */
> +#ifndef __VIEWFINDER_GL_H__
> +#define __VIEWFINDER_GL_H__
> +
> +#include <QImage>
> +#include <QMutex>
> +#include <QObject>
This header shouldn't be needed.
> +#include <QOpenGLBuffer>
> +#include <QOpenGLFunctions>
> +#include <QOpenGLShader>
> +#include <QOpenGLShaderProgram>
> +#include <QOpenGLTexture>
> +#include <QOpenGLWidget>
> +#include <QSize>
> +
> +#include <libcamera/buffer.h>
> +#include <libcamera/formats.h>
Missing blank line.
> +#include "viewfinder.h"
> +
> +class ViewFinderGL : public QOpenGLWidget,
> + public ViewFinder,
> + protected QOpenGLFunctions
> +{
> + Q_OBJECT
> +
> +public:
> + ViewFinderGL(QWidget *parent = 0);
= nullptr
> + ~ViewFinderGL();
> +
> + const QList<libcamera::PixelFormat> &nativeFormats() const override;
> +
> + int setFormat(const libcamera::PixelFormat &format, const QSize &size) override;
> + void render(libcamera::FrameBuffer *buffer, MappedBuffer *map) override;
> + void stop() override;
> +
> + QImage getCurrentImage() override;
> +
> +Q_SIGNALS:
> + void renderComplete(libcamera::FrameBuffer *buffer);
> +
> +protected:
> + void initializeGL() override;
> + void paintGL() override;
> + void resizeGL(int w, int h) override;
> + QSize sizeHint() const override;
> +
> +private:
> + bool isFormatSupport(const libcamera::PixelFormat &format);
s/isFormatSupport/isFormatSupported/
> +
> + void configureTexture(unsigned int id);
> + bool createFragmentShader();
> + void createVertexShader();
> + void removeShader();
> + void doRender();
> +
> + /* Captured image size, format and buffer */
> + libcamera::FrameBuffer *buffer_;
> + libcamera::PixelFormat format_;
> + QSize size_;
> +
> + /* OpenGL components for render */
s/render/rendering/
> + QOpenGLShader *pFShader_;
No need to prefix pointers with 'p'. I'd name this fragmentShader_.
> + QOpenGLShader *pVShader_;
Same here, vertexShader_.
> + QOpenGLShaderProgram shaderProgram_;
Is there a specific reason why pFShader_ and pVShader_ are pointers,
while shaderProgram_ is embedded directly in ViewFinderGL ?
> +
> + /* Vertex buffer */
> + QOpenGLBuffer vbuf_;
> +
> + /* Fragment and Vertex shader file name */
> + QString fsrc_;
fragmentShaderSrc_ ?
> + QString vsrc_;
And vertexShaderSrc_.
> +
> + unsigned char *yuvDataPtr_;
And no need for a Ptr suffix either :-)
> +
> + /* YUV texture planars and parameters */
> + GLuint id_u_;
> + GLuint id_v_;
> + GLuint id_y_;
> + GLuint textureUniformU_;
> + GLuint textureUniformV_;
> + GLuint textureUniformY_;
> + QOpenGLTexture textureU_;
> + QOpenGLTexture textureV_;
> + QOpenGLTexture textureY_;
> + unsigned int horzSubSample_;
> + unsigned int vertSubSample_;
> +
> + QImage image_;
This is never used.
> + QMutex mutex_; /* Prevent concurrent access to image_ */
> +};
> +#endif /* __VIEWFINDER_GL_H__ */
--
Regards,
Laurent Pinchart
More information about the libcamera-devel
mailing list