[PATCH v2 9/9] android: Support partial results

Jacopo Mondi jacopo.mondi at ideasonboard.com
Fri Nov 29 10:47:35 CET 2024


Hi Harvey

On Fri, Nov 29, 2024 at 05:29:27PM +0800, Cheng-Hao Yang wrote:
> Hi Jacopo,
>
> On Fri, Nov 29, 2024 at 12:14 AM Jacopo Mondi
> <jacopo.mondi at ideasonboard.com> wrote:
> >
> > Hi Harvey
> >
> > On Wed, Nov 27, 2024 at 09:25:59AM +0000, Harvey Yang wrote:
> > > With bufferCompleted and metadataAvailable signals, CameraDevice can
> > > support partial results. This allows applications to get results
> > > earlier, especially for buffers that would be blocked by other streams.
> > >
> > > Signed-off-by: Han-Lin Chen <hanlinchen at chromium.org>
> > > Co-developed-by: Harvey Yang <chenghaoyang at chromium.org>
> > > Signed-off-by: Harvey Yang <chenghaoyang at chromium.org>
> > > ---
> > >  src/android/camera_capabilities.cpp      |  11 +-
> > >  src/android/camera_capabilities.h        |   2 +
> > >  src/android/camera_device.cpp            | 750 ++++++++++++++++-------
> > >  src/android/camera_device.h              |  39 +-
> > >  src/android/camera_request.cpp           |  54 +-
> > >  src/android/camera_request.h             |  36 +-
> > >  src/android/camera_stream.cpp            |   4 +-
> > >  src/android/jpeg/post_processor_jpeg.cpp |   2 +-
> > >  8 files changed, 621 insertions(+), 277 deletions(-)
> >
> > This patch looks like a series of its own
> >
> > If there's any way it could be made easier consumable...
> >
> > >
> > > diff --git a/src/android/camera_capabilities.cpp b/src/android/camera_capabilities.cpp
> > > index b161bc6b3..bb0a3b755 100644
> > > --- a/src/android/camera_capabilities.cpp
> > > +++ b/src/android/camera_capabilities.cpp
> > > @@ -223,6 +223,14 @@ std::vector<U> setMetadata(CameraMetadata *metadata, uint32_t tag,
> > >
> > >  } /* namespace */
> > >
> > > +/**
> > > + * \var CameraCapabilities::kMaxMetadataPackIndex
> > > + *
> > > + * It defines how many sub-components a result will be composed of. This enables
> > > + * partial results. It's currently identical to
> > > + * ANDROID_REQUEST_PARTIAL_RESULT_COUNT.
> >
> > It's actually the other way around. It is used to populate
> > ANDROID_REQUEST_PARTIAL_RESULT_COUNT
>
> Thanks! Updated.
>
> >
> > > + */
> > > +
> > >  bool CameraCapabilities::validateManualSensorCapability()
> > >  {
> > >       const char *noMode = "Manual sensor capability unavailable: ";
> > > @@ -1416,9 +1424,8 @@ int CameraCapabilities::initializeStaticMetadata()
> > >       staticMetadata_->addEntry(ANDROID_SCALER_CROPPING_TYPE, croppingType);
> > >
> > >       /* Request static metadata. */
> > > -     int32_t partialResultCount = 1;
> > >       staticMetadata_->addEntry(ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
> > > -                               partialResultCount);
> > > +                               kMaxMetadataPackIndex);
> > >
> > >       {
> > >               /* Default the value to 2 if not reported by the camera. */
> > > diff --git a/src/android/camera_capabilities.h b/src/android/camera_capabilities.h
> > > index 56ac1efeb..b11f93241 100644
> > > --- a/src/android/camera_capabilities.h
> > > +++ b/src/android/camera_capabilities.h
> > > @@ -23,6 +23,8 @@
> > >  class CameraCapabilities
> > >  {
> > >  public:
> > > +     static constexpr int32_t kMaxMetadataPackIndex = 64;
> >
> > How is 64 been computed ? How does the a device-specific HAL usually
> > initialize this parameter ?
>
> I think we just use a large enough number as a workaround...
> Do you think we should introduce a control id that allows
> pipeline handlers to report it as a static metadata?
>
> >
> > > +
> > >       CameraCapabilities() = default;
> > >
> > >       int initialize(std::shared_ptr<libcamera::Camera> camera,
> > > diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp
> > > index e085e18b2..f03440b79 100644
> > > --- a/src/android/camera_device.cpp
> > > +++ b/src/android/camera_device.cpp
> > > @@ -252,6 +252,8 @@ CameraDevice::CameraDevice(unsigned int id, std::shared_ptr<Camera> camera)
> > >         facing_(CAMERA_FACING_FRONT), orientation_(0)
> > >  {
> > >       camera_->requestCompleted.connect(this, &CameraDevice::requestComplete);
> > > +     camera_->bufferCompleted.connect(this, &CameraDevice::bufferComplete);
> > > +     camera_->metadataAvailable.connect(this, &CameraDevice::metadataAvailable);
> > >
> > >       maker_ = "libcamera";
> > >       model_ = "cameraModel";
> > > @@ -438,8 +440,9 @@ void CameraDevice::stop()
> > >       camera_->stop();
> > >
> > >       {
> > > -             MutexLocker descriptorsLock(descriptorsMutex_);
> > > -             descriptors_ = {};
> > > +             MutexLocker descriptorsLock(pendingRequestMutex_);
> > > +             pendingRequests_ = {};
> > > +             pendingStreamBuffers_ = {};
> > >       }
> > >
> > >       streams_.clear();
> > > @@ -860,16 +863,39 @@ int CameraDevice::processControls(Camera3RequestDescriptor *descriptor)
> > >       return 0;
> > >  }
> > >
> > > +/*
> > > + * abortRequest() is only called before the request is queued into the device,
> > > + * i.e., there is no need to remove it from pendingRequests_ and
> > > + * pendingStreamBuffers_.
> > > + */
> > >  void CameraDevice::abortRequest(Camera3RequestDescriptor *descriptor) const
> > >  {
> > > -     notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_REQUEST);
> > > +     /*
> > > +      * Since the failed buffers do not have to follow the strict ordering
> > > +      * valid buffers do, and could be out-of-order with respect to valid
> > > +      * buffers, it's safe to send the aborted result back to the framework
> > > +      * immediately.
> > > +      */
> > > +     descriptor->status_ = Camera3RequestDescriptor::Status::Error;
> > > +     descriptor->finalResult_ = std::make_unique<Camera3ResultDescriptor>(descriptor);
> > >
> > > -     for (auto &buffer : descriptor->buffers_)
> > > +     Camera3ResultDescriptor *result = descriptor->finalResult_.get();
> > > +
> > > +     result->metadataPackIndex_ = 0;
> > > +     for (auto &buffer : descriptor->buffers_) {
> > >               buffer.status = StreamBuffer::Status::Error;
> > > +             result->buffers_.emplace_back(&buffer);
> > > +     }
> > >
> > > -     descriptor->status_ = Camera3RequestDescriptor::Status::Error;
> > > +     /*
> > > +      * After CAMERA3_MSG_ERROR_REQUEST is notified, for a given frame,
> > > +      * only process_capture_results with buffers of the status
> > > +      * CAMERA3_BUFFER_STATUS_ERROR are allowed. No further notifies or
> > > +      * process_capture_result with non-null metadata is allowed.
> > > +      */
> > > +     notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_REQUEST);
> > >
> > > -     sendCaptureResult(descriptor);
> > > +     sendCaptureResult(result);
> > >  }
> > >
> > >  bool CameraDevice::isValidRequest(camera3_capture_request_t *camera3Request) const
> > > @@ -1031,9 +1057,6 @@ int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Reques
> > >                        * */
> > >                       descriptor->internalBuffers_[cameraStream] = frameBuffer;
> > >                       LOG(HAL, Debug) << ss.str() << " (internal)";
> > > -
> > > -                     descriptor->pendingStreamsToProcess_.insert(
> > > -                             { cameraStream, &buffer });
> > >                       break;
> > >               }
> > >
> > > @@ -1066,8 +1089,6 @@ int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Reques
> > >                               << cameraStream->configuration().pixelFormat << "]"
> > >                               << " (mapped)";
> > >
> > > -             descriptor->pendingStreamsToProcess_.insert({ cameraStream, &buffer });
> > > -
> > >               /*
> > >                * Make sure the CameraStream this stream is mapped on has been
> > >                * added to the request.
> > > @@ -1154,8 +1175,10 @@ int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Reques
> > >       Request *request = descriptor->request_.get();
> > >
> > >       {
> > > -             MutexLocker descriptorsLock(descriptorsMutex_);
> > > -             descriptors_.push(std::move(descriptor));
> > > +             MutexLocker descriptorsLock(pendingRequestMutex_);
> > > +             for (auto &buffer : descriptor->buffers_)
> > > +                     pendingStreamBuffers_[buffer.stream].push_back(&buffer);
> > > +             pendingRequests_.emplace(std::move(descriptor));
> > >       }
> > >
> > >       camera_->queueRequest(request);
> > > @@ -1163,132 +1186,279 @@ int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Reques
> > >       return 0;
> > >  }
> > >
> > > -void CameraDevice::requestComplete(Request *request)
> > > +void CameraDevice::bufferComplete(libcamera::Request *request,
> > > +                               libcamera::FrameBuffer *frameBuffer)
> > >  {
> > >       Camera3RequestDescriptor *descriptor =
> > >               reinterpret_cast<Camera3RequestDescriptor *>(request->cookie());
> > >
> > > -     /*
> > > -      * Prepare the capture result for the Android camera stack.
> > > -      *
> > > -      * The buffer status is set to Success and later changed to Error if
> > > -      * post-processing/compression fails.
> > > -      */
> > > +     descriptor->partialResults_.emplace_back(new Camera3ResultDescriptor(descriptor));
> > > +     Camera3ResultDescriptor *camera3Result = descriptor->partialResults_.back().get();
> > > +
> > >       for (auto &buffer : descriptor->buffers_) {
> > > -             CameraStream *stream = buffer.stream;
> > > +             CameraStream *cameraStream = buffer.stream;
> > > +             if (buffer.srcBuffer != frameBuffer &&
> > > +                 buffer.frameBuffer.get() != frameBuffer)
> > > +                     continue;
> > >
> > > -             /*
> > > -              * Streams of type Direct have been queued to the
> > > -              * libcamera::Camera and their acquire fences have
> > > -              * already been waited on by the library.
> > > -              *
> > > -              * Acquire fences of streams of type Internal and Mapped
> > > -              * will be handled during post-processing.
> > > -              */
> > > -             if (stream->type() == CameraStream::Type::Direct) {
> > > -                     /* If handling of the fence has failed restore buffer.fence. */
> > > +             buffer.result = camera3Result;
> > > +             camera3Result->buffers_.emplace_back(&buffer);
> > > +
> > > +             StreamBuffer::Status status = StreamBuffer::Status::Success;
> > > +             if (frameBuffer->metadata().status != FrameMetadata::FrameSuccess) {
> > > +                     status = StreamBuffer::Status::Error;
> > > +             }
> >
> > no  {}
>
> Done
>
> >
> > > +             setBufferStatus(buffer, status);
> > > +
> > > +             switch (cameraStream->type()) {
> > > +             case CameraStream::Type::Direct: {
> > > +                     ASSERT(buffer.frameBuffer.get() == frameBuffer);
> > > +                     /*
> > > +                              * Streams of type Direct have been queued to the
> > > +                              * libcamera::Camera and their acquire fences have
> > > +                              * already been waited on by the library.
> > > +                              */
> >
> > weird indent
>
> Updated.
>
> >
> > >                       std::unique_ptr<Fence> fence = buffer.frameBuffer->releaseFence();
> > >                       if (fence)
> > >                               buffer.fence = fence->release();
> > > +                     break;
> > > +             }
> > > +             case CameraStream::Type::Mapped:
> > > +             case CameraStream::Type::Internal:
> > > +                     ASSERT(buffer.srcBuffer == frameBuffer);
> > > +                     if (status == StreamBuffer::Status::Error)
> > > +                             break;
> > > +
> > > +                     camera3Result->pendingBuffersToProcess_.emplace_back(&buffer);
> > > +
> > > +                     if (cameraStream->isJpegStream()) {
> > > +                             generateJpegExifMetadata(descriptor, &buffer);
> > > +
> > > +                             /*
> > > +                              * Allocate for post-processor to fill
> > > +                              * in JPEG related metadata.
> > > +                              */
> > > +                             camera3Result->resultMetadata_ = getJpegPartialResultMetadata();
> > > +                     }
> > > +
> > > +                     break;
> > > +             }
> > > +     }
> > > +
> > > +     for (auto iter = camera3Result->pendingBuffersToProcess_.begin();
> > > +          iter != camera3Result->pendingBuffersToProcess_.end();) {
> > > +             StreamBuffer *buffer = *iter;
> > > +             int ret = buffer->stream->process(buffer);
> > > +             if (ret) {
> > > +                     iter = camera3Result->pendingBuffersToProcess_.erase(iter);
> > > +                     setBufferStatus(*buffer, StreamBuffer::Status::Error);
> > > +                     LOG(HAL, Error) << "Failed to run post process of request "
> > > +                                     << descriptor->frameNumber_;
> > > +             } else {
> > > +                     iter++;
> > >               }
> > > -             buffer.status = StreamBuffer::Status::Success;
> > >       }
> > >
> > > +     if (camera3Result->pendingBuffersToProcess_.empty())
> > > +             checkAndCompleteReadyPartialResults(camera3Result);
> > > +}
> > > +
> > > +void CameraDevice::metadataAvailable(libcamera::Request *request,
> > > +                                  const libcamera::ControlList &metadata)
> > > +{
> > > +     ASSERT(!metadata.empty());
> > > +
> > > +     Camera3RequestDescriptor *descriptor =
> > > +             reinterpret_cast<Camera3RequestDescriptor *>(request->cookie());
> > > +
> > > +     descriptor->partialResults_.emplace_back(new Camera3ResultDescriptor(descriptor));
> > > +     Camera3ResultDescriptor *camera3Result = descriptor->partialResults_.back().get();
> > > +
> > >       /*
> > > -      * If the Request has failed, abort the request by notifying the error
> > > -      * and complete the request with all buffers in error state.
> > > +      * Notify shutter as soon as we have received SensorTimestamp.
> > >        */
> > > -     if (request->status() != Request::RequestComplete) {
> > > -             LOG(HAL, Error) << "Request " << request->cookie()
> > > -                             << " not successfully completed: "
> > > -                             << request->status();
> > > +     const auto &timestamp = metadata.get(controls::SensorTimestamp);
> > > +     if (timestamp) {
> > > +             notifyShutter(descriptor->frameNumber_, *timestamp);
> > > +             LOG(HAL, Debug) << "Request " << request->cookie() << " notifies shutter";
> > > +     }
> > > +
> > > +     camera3Result->resultMetadata_ = getPartialResultMetadata(metadata);
> > > +
> > > +     checkAndCompleteReadyPartialResults(camera3Result);
> > > +}
> > > +
> > > +void CameraDevice::requestComplete(Request *request)
> > > +{
> > > +     Camera3RequestDescriptor *camera3Request =
> > > +             reinterpret_cast<Camera3RequestDescriptor *>(request->cookie());
> > >
> > > -             descriptor->status_ = Camera3RequestDescriptor::Status::Error;
> > > +     switch (request->status()) {
> > > +     case Request::RequestComplete:
> > > +             camera3Request->status_ = Camera3RequestDescriptor::Status::Success;
> > > +             break;
> > > +     case Request::RequestCancelled:
> > > +             camera3Request->status_ = Camera3RequestDescriptor::Status::Error;
> > > +             break;
> > > +     case Request::RequestPending:
> > > +             LOG(HAL, Fatal) << "Try to complete an unfinished request";
> > > +             break;
> > >       }
> > >
> > > +     camera3Request->finalResult_ = std::make_unique<Camera3ResultDescriptor>(camera3Request);
> > > +     Camera3ResultDescriptor *result = camera3Request->finalResult_.get();
> > > +
> > >       /*
> > > -      * Notify shutter as soon as we have verified we have a valid request.
> > > -      *
> > > -      * \todo The shutter event notification should be sent to the framework
> > > -      * as soon as possible, earlier than request completion time.
> > > +      * On Android, The final result with metadata has to set the field as
> > > +      * CameraCapabilities::MaxMetadataPackIndex, and should be returned by
> > > +      * the submission order of the requests. Create a result as the final
> > > +      * result which is guranteed be sent in order by CompleteRequestDescriptor().
> > > +      */
> > > +     result->resultMetadata_ = getFinalResultMetadata(camera3Request->settings_);
> > > +     result->metadataPackIndex_ = CameraCapabilities::kMaxMetadataPackIndex;
> > > +
> > > +     /*
> > > +      * We need to check whether there are partial results pending for
> > > +      * post-processing, before we complete the request descriptor. Otherwise,
> > > +      * the callback of post-processing will complete the request instead.
> > >        */
> > > -     uint64_t sensorTimestamp = static_cast<uint64_t>(request->metadata()
> > > -                                                              .get(controls::SensorTimestamp)
> > > -                                                              .value_or(0));
> > > -     notifyShutter(descriptor->frameNumber_, sensorTimestamp);
> > > +     for (auto &r : camera3Request->partialResults_)
> > > +             if (!r->completed_)
> > > +                     return;
> > >
> > > -     LOG(HAL, Debug) << "Request " << request->cookie() << " completed with "
> > > -                     << descriptor->request_->buffers().size() << " streams";
> > > +     completeRequestDescriptor(camera3Request);
> > > +}
> > >
> > > +void CameraDevice::checkAndCompleteReadyPartialResults(Camera3ResultDescriptor *result)
> > > +{
> > >       /*
> > > -      * Generate the metadata associated with the captured buffers.
> > > +      * Android requires buffers for a given stream must be returned in FIFO
> > > +      * order. However, different streams are independent of each other, so
> > > +      * it is acceptable and expected that the buffer for request 5 for
> > > +      * stream A may be returned after the buffer for request 6 for stream
> > > +      * B is. And it is acceptable that the result metadata for request 6
> > > +      * for stream B is returned before the buffer for request 5 for stream
> > > +      * A is. As a result, if all buffers of a result are the most front
> > > +      * buffers of each stream, or the result contains no buffers, the result
> > > +      * is allowed to send. Collect ready results to send in the order which
> > > +      * follows the above rule.
> > >        *
> > > -      * Notify if the metadata generation has failed, but continue processing
> > > -      * buffers and return an empty metadata pack.
> > > +      * \todo The reprocessing result can be returned ahead of the pending
> > > +      * normal output results. But the FIFO ordering must be maintained for
> > > +      * all reprocessing results. Track the reprocessing buffer's order
> > > +      * independently when we have reprocessing API.
> > >        */
> > > -     descriptor->resultMetadata_ = getResultMetadata(*descriptor);
> > > -     if (!descriptor->resultMetadata_) {
> > > -             descriptor->status_ = Camera3RequestDescriptor::Status::Error;
> > > +     MutexLocker lock(pendingRequestMutex_);
> > >
> > > -             /*
> > > -              * The camera framework expects an empty metadata pack on error.
> > > -              *
> > > -              * \todo Check that the post-processor code handles this situation
> > > -              * correctly.
> > > -              */
> > > -             descriptor->resultMetadata_ = std::make_unique<CameraMetadata>(0, 0);
> > > -     }
> > > +     pendingPartialResults_.emplace_front(result);
> > > +     std::list<Camera3ResultDescriptor *> readyResults;
> > >
> > >       /*
> > > -      * Queue all the post-processing streams request at once. The completion
> > > -      * slot streamProcessingComplete() can only execute when we are out
> > > -      * this critical section. This helps to handle synchronous errors here
> > > -      * itself.
> > > +      * Error buffers do not have to follow the strict ordering as valid
> > > +      * buffers do. They're ready to be sent directly. Therefore, remove them
> > > +      * from the pendingBuffers so it won't block following valid buffers.
> > >        */
> > > -     auto iter = descriptor->pendingStreamsToProcess_.begin();
> > > -     while (iter != descriptor->pendingStreamsToProcess_.end()) {
> > > -             CameraStream *stream = iter->first;
> > > -             StreamBuffer *buffer = iter->second;
> > > +     for (auto &buffer : result->buffers_)
> > > +             if (buffer->status == StreamBuffer::Status::Error)
> > > +                     pendingStreamBuffers_[buffer->stream].remove(buffer);
> > >
> > > -             if (stream->isJpegStream()) {
> > > -                     generateJpegExifMetadata(descriptor, buffer);
> > > -             }
> > > +     /*
> > > +      * Exhaustly collect results which is ready to sent.
> > > +      */
> > > +     bool keepChecking;
> > > +     do {
> > > +             keepChecking = false;
> > > +             auto iter = pendingPartialResults_.begin();
> > > +             while (iter != pendingPartialResults_.end()) {
> > > +                     /*
> > > +                      * A result is considered as ready when all of the valid
> > > +                      * buffers of the result are at the front of the pending
> > > +                      * buffers associated with its stream.
> > > +                      */
> > > +                     bool ready = true;
> > > +                     for (auto &buffer : (*iter)->buffers_) {
> > > +                             if (buffer->status == StreamBuffer::Status::Error)
> > > +                                     continue;
> > >
> > > -             FrameBuffer *src = request->findBuffer(stream->stream());
> > > -             if (!src) {
> > > -                     LOG(HAL, Error) << "Failed to find a source stream buffer";
> > > -                     setBufferStatus(*buffer, StreamBuffer::Status::Error);
> > > -                     iter = descriptor->pendingStreamsToProcess_.erase(iter);
> > > -                     continue;
> > > -             }
> > > +                             auto &pendingBuffers = pendingStreamBuffers_[buffer->stream];
> > >
> > > -             ++iter;
> > > -             int ret = stream->process(buffer);
> > > -             if (ret) {
> > > -                     setBufferStatus(*buffer, StreamBuffer::Status::Error);
> > > -                     descriptor->pendingStreamsToProcess_.erase(stream);
> > > +                             ASSERT(!pendingBuffers.empty());
> > > +
> > > +                             if (pendingBuffers.front() != buffer) {
> > > +                                     ready = false;
> > > +                                     break;
> > > +                             }
> > > +                     }
> > > +
> > > +                     if (!ready) {
> > > +                             iter++;
> > > +                             continue;
> > > +                     }
> > > +
> > > +                     for (auto &buffer : (*iter)->buffers_)
> > > +                             if (buffer->status != StreamBuffer::Status::Error)
> > > +                                     pendingStreamBuffers_[buffer->stream].pop_front();
> > > +
> > > +                     /* Keep checking since pendingStreamBuffers has updated */
> > > +                     keepChecking = true;
> > > +
> > > +                     readyResults.emplace_back(*iter);
> > > +                     iter = pendingPartialResults_.erase(iter);
> > >               }
> > > +     } while (keepChecking);
> > > +
> > > +     lock.unlock();
> > > +
> > > +     for (auto &res : readyResults) {
> > > +             completePartialResultDescriptor(res);
> > >       }
> > > +}
> > > +
> > > +void CameraDevice::completePartialResultDescriptor(Camera3ResultDescriptor *result)
> >
> > There are really too many things going on here. I can't review this
> > patch in this form. Please provide a design rationale on the cover
> > letter and try to break this down to consumable pieces. It's hard to
> > review it in this form otherwise.
>
> I understand it's overwhelming... Just as I mentioned in the cover
> letter that I'm running out of ideas on how to break this one down...
>
> Note that originally 2nd, 3rd, 4th, 5th, 6th, 7th, and 9th patches
> belonged to one CL [1]. Just saying that I already spent quite
> some time to chop it down...
>
> Originally we used one signal that could report both buffers and
> metadata tags, while we have two signals for them respectively
> though. Let me check if I can split some changes based on that...
>
> [1]: https://chromium-review.googlesource.com/c/chromiumos/third_party/libcamera/+/5674663
>
> >
> > Only one question here: I see two functions
> > CameraDevice::getPartialResultMetadata and
> > CameraDevice::getFinalResultMetadata
> >
> > They handle different sets of metadata, what does define if a metadata
> > is part of the partial results or rather of the final ones ?
>
> The difference is that getFinalResultMetadata doesn't rely on
> any results from the pipeline handler. It sets metadata tags that
> we only have constants for now, and ones that we return the
> requested value from the capture controls.
>
> getPartialResultMetadata processes the conversion from
> libcamera::Request::metadata() to Android metadata tags,
> and the ones for post processors.

Thanks, but this is not what I'm after.

The two functions handle a different set of libcamera::controls right?

How can the HAL, which is a generic component that has to work with
multiple pipeline handlers, assume what metadata are sent as partial
results and which ones are sent along with a completed request ?

>
> BR,
> Harvey
>
> >
> > > +{
> > > +     Camera3RequestDescriptor *request = result->request_;
> > > +     result->completed_ = true;
> > > +
> > > +     /*
> > > +      * Android requires value of metadataPackIndex of partial results
> > > +      * set it to 0 if the result contains only buffers, Otherwise set it
> > > +      * Incrementally from 1 to MaxMetadataPackIndex - 1.
> > > +      */
> > > +     if (result->resultMetadata_)
> > > +             result->metadataPackIndex_ = request->nextPartialResultIndex_++;
> > > +     else
> > > +             result->metadataPackIndex_ = 0;
> > > +
> > > +     sendCaptureResult(result);
> > >
> > > -     if (descriptor->pendingStreamsToProcess_.empty())
> > > -             completeDescriptor(descriptor);
> > > +     /*
> > > +      * The Status would be changed from Pending to Success or Error only
> > > +      * when the requestComplete() has been called. It's garanteed that no
> > > +      * more partial results will be added to the request and the final result
> > > +      * is ready. In the case, if all partial results are completed, we can
> > > +      * complete the request.
> > > +      */
> > > +     if (request->status_ == Camera3RequestDescriptor::Status::Pending)
> > > +             return;
> > > +
> > > +     for (auto &r : request->partialResults_)
> > > +             if (!r->completed_)
> > > +                     return;
> > > +
> > > +     completeRequestDescriptor(request);
> > >  }
> > >
> > >  /**
> > >   * \brief Complete the Camera3RequestDescriptor
> > > - * \param[in] descriptor The Camera3RequestDescriptor that has completed
> > > + * \param[in] descriptor The Camera3RequestDescriptor
> > >   *
> > > - * The function marks the Camera3RequestDescriptor as 'complete'. It shall be
> > > - * called when all the streams in the Camera3RequestDescriptor have completed
> > > - * capture (or have been generated via post-processing) and the request is ready
> > > - * to be sent back to the framework.
> > > - *
> > > - * \context This function is \threadsafe.
> > > + * The function shall complete the descriptor only when all of the partial
> > > + * result has sent back to the framework, and send the final result according
> > > + * to the submission order of the requests.
> > >   */
> > > -void CameraDevice::completeDescriptor(Camera3RequestDescriptor *descriptor)
> > > +void CameraDevice::completeRequestDescriptor(Camera3RequestDescriptor *request)
> > >  {
> > > -     MutexLocker lock(descriptorsMutex_);
> > > -     descriptor->complete_ = true;
> > > +     request->complete_ = true;
> > >
> > >       sendCaptureResults();
> > >  }
> > > @@ -1304,15 +1474,23 @@ void CameraDevice::completeDescriptor(Camera3RequestDescriptor *descriptor)
> > >   * Stop iterating if the descriptor at the front of the queue is not complete.
> > >   *
> > >   * This function should never be called directly in the codebase. Use
> > > - * completeDescriptor() instead.
> > > + * completeRequestDescriptor() instead.
> > >   */
> > >  void CameraDevice::sendCaptureResults()
> > >  {
> > > -     while (!descriptors_.empty() && !descriptors_.front()->isPending()) {
> > > -             auto descriptor = std::move(descriptors_.front());
> > > -             descriptors_.pop();
> > > +     MutexLocker descriptorsLock(pendingRequestMutex_);
> > > +
> > > +     while (!pendingRequests_.empty()) {
> > > +             auto &descriptor = pendingRequests_.front();
> > > +             if (!descriptor->complete_)
> > > +                     break;
> > >
> > > -             sendCaptureResult(descriptor.get());
> > > +             /*
> > > +              * Android requires the final result of each request returns in
> > > +              * their submission order.
> > > +              */
> > > +             ASSERT(descriptor->finalResult_);
> > > +             sendCaptureResult(descriptor->finalResult_.get());
> > >
> > >               /*
> > >                * Call notify with CAMERA3_MSG_ERROR_RESULT to indicate some
> > > @@ -1323,18 +1501,20 @@ void CameraDevice::sendCaptureResults()
> > >                */
> > >               if (descriptor->status_ == Camera3RequestDescriptor::Status::Error)
> > >                       notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_RESULT);
> > > +
> > > +             pendingRequests_.pop();
> > >       }
> > >  }
> > >
> > > -void CameraDevice::sendCaptureResult(Camera3RequestDescriptor *request) const
> > > +void CameraDevice::sendCaptureResult(Camera3ResultDescriptor *result) const
> > >  {
> > >       std::vector<camera3_stream_buffer_t> resultBuffers;
> > > -     resultBuffers.reserve(request->buffers_.size());
> > > +     resultBuffers.reserve(result->buffers_.size());
> > >
> > > -     for (auto &buffer : request->buffers_) {
> > > +     for (auto &buffer : result->buffers_) {
> > >               camera3_buffer_status status = CAMERA3_BUFFER_STATUS_ERROR;
> > >
> > > -             if (buffer.status == StreamBuffer::Status::Success)
> > > +             if (buffer->status == StreamBuffer::Status::Success)
> > >                       status = CAMERA3_BUFFER_STATUS_OK;
> > >
> > >               /*
> > > @@ -1343,22 +1523,20 @@ void CameraDevice::sendCaptureResult(Camera3RequestDescriptor *request) const
> > >                * on the acquire fence in case we haven't done so
> > >                * ourselves for any reason.
> > >                */
> > > -             resultBuffers.push_back({ buffer.stream->camera3Stream(),
> > > -                                       buffer.camera3Buffer, status,
> > > -                                       -1, buffer.fence.release() });
> > > +             resultBuffers.push_back({ buffer->stream->camera3Stream(),
> > > +                                       buffer->camera3Buffer, status,
> > > +                                       -1, buffer->fence.release() });
> > >       }
> > >
> > >       camera3_capture_result_t captureResult = {};
> > >
> > > -     captureResult.frame_number = request->frameNumber_;
> > > +     captureResult.frame_number = result->request_->frameNumber_;
> > >       captureResult.num_output_buffers = resultBuffers.size();
> > >       captureResult.output_buffers = resultBuffers.data();
> > > +     captureResult.partial_result = result->metadataPackIndex_;
> > >
> > > -     if (request->status_ == Camera3RequestDescriptor::Status::Success)
> > > -             captureResult.partial_result = 1;
> > > -
> > > -     if (request->resultMetadata_)
> > > -             captureResult.result = request->resultMetadata_->getMetadata();
> > > +     if (result->resultMetadata_)
> > > +             captureResult.result = result->resultMetadata_->getMetadata();
> > >
> > >       callbacks_->process_capture_result(callbacks_, &captureResult);
> > >  }
> > > @@ -1371,10 +1549,6 @@ void CameraDevice::setBufferStatus(StreamBuffer &streamBuffer,
> > >               notifyError(streamBuffer.request->frameNumber_,
> > >                           streamBuffer.stream->camera3Stream(),
> > >                           CAMERA3_MSG_ERROR_BUFFER);
> > > -
> > > -             /* Also set error status on entire request descriptor. */
> > > -             streamBuffer.request->status_ =
> > > -                     Camera3RequestDescriptor::Status::Error;
> > >       }
> > >  }
> > >
> > > @@ -1396,26 +1570,25 @@ void CameraDevice::streamProcessingCompleteDelegate(StreamBuffer *streamBuffer,
> > >   * \param[in] streamBuffer The StreamBuffer for which processing is complete
> > >   * \param[in] status Stream post-processing status
> > >   *
> > > - * This function is called from the post-processor's thread whenever a camera
> > > + * This function is called from the camera's thread whenever a camera
> > >   * stream has finished post processing. The corresponding entry is dropped from
> > > - * the descriptor's pendingStreamsToProcess_ map.
> > > + * the result's pendingBufferToProcess_ list.
> > >   *
> > > - * If the pendingStreamsToProcess_ map is then empty, all streams requiring to
> > > - * be generated from post-processing have been completed. Mark the descriptor as
> > > - * complete using completeDescriptor() in that case.
> > > + * If the pendingBufferToProcess_ list is then empty, all streams requiring to
> > > + * be generated from post-processing have been completed.
> > >   */
> > >  void CameraDevice::streamProcessingComplete(StreamBuffer *streamBuffer,
> > >                                           StreamBuffer::Status status)
> > >  {
> > >       setBufferStatus(*streamBuffer, status);
> > >
> > > -     Camera3RequestDescriptor *request = streamBuffer->request;
> > > +     Camera3ResultDescriptor *result = streamBuffer->result;
> > > +     result->pendingBuffersToProcess_.remove(streamBuffer);
> > >
> > > -     request->pendingStreamsToProcess_.erase(streamBuffer->stream);
> > > -     if (!request->pendingStreamsToProcess_.empty())
> > > +     if (!result->pendingBuffersToProcess_.empty())
> > >               return;
> > >
> > > -     completeDescriptor(streamBuffer->request);
> > > +     checkAndCompleteReadyPartialResults(result);
> > >  }
> > >
> > >  std::string CameraDevice::logPrefix() const
> > > @@ -1469,23 +1642,12 @@ void CameraDevice::generateJpegExifMetadata(Camera3RequestDescriptor *request,
> > >       jpegExifMetadata->sensorSensitivityISO = 100;
> > >  }
> > >
> > > -/*
> > > - * Produce a set of fixed result metadata.
> > > - */
> > > -std::unique_ptr<CameraMetadata>
> > > -CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) const
> > > +std::unique_ptr<CameraMetadata> CameraDevice::getJpegPartialResultMetadata() const
> > >  {
> > > -     const ControlList &metadata = descriptor.request_->metadata();
> > > -     const CameraMetadata &settings = descriptor.settings_;
> > > -     camera_metadata_ro_entry_t entry;
> > > -     bool found;
> > > -
> > >       /*
> > > -      * \todo Keep this in sync with the actual number of entries.
> > > -      * Currently: 40 entries, 156 bytes
> > > +      * Reserve more capacity for the JPEG metadata set by the post-processor.
> > > +      * Currently: 8 entries, 82 bytes extra capaticy.
> > >        *
> > > -      * Reserve more space for the JPEG metadata set by the post-processor.
> > > -      * Currently:
> > >        * ANDROID_JPEG_GPS_COORDINATES (double x 3) = 24 bytes
> > >        * ANDROID_JPEG_GPS_PROCESSING_METHOD (byte x 32) = 32 bytes
> > >        * ANDROID_JPEG_GPS_TIMESTAMP (int64) = 8 bytes
> > > @@ -1497,7 +1659,215 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons
> > >        * Total bytes for JPEG metadata: 82
> > >        */
> > >       std::unique_ptr<CameraMetadata> resultMetadata =
> > > -             std::make_unique<CameraMetadata>(88, 166);
> > > +             std::make_unique<CameraMetadata>(8, 82);
> > > +     if (!resultMetadata->isValid()) {
> > > +             LOG(HAL, Error) << "Failed to allocate result metadata";
> > > +             return nullptr;
> > > +     }
> > > +
> > > +     return resultMetadata;
> > > +}
> > > +
> > > +std::unique_ptr<CameraMetadata>
> > > +CameraDevice::getPartialResultMetadata(const ControlList &metadata) const
> > > +{
> > > +     /*
> > > +      * \todo Keep this in sync with the actual number of entries.
> > > +      *
> > > +      * Reserve capacity for the metadata larger than 4 bytes which cannot
> > > +      * store in entries.
> > > +      * Currently: 6 entries, 40 bytes extra capaticy.
> > > +      *
> > > +      * ANDROID_SENSOR_TIMESTAMP (int64) = 8 bytes
> > > +      * ANDROID_REQUEST_PIPELINE_DEPTH (byte) = 1 byte
> > > +      * ANDROID_SENSOR_EXPOSURE_TIME (int64) = 8 bytes
> > > +      * ANDROID_CONTROL_AE_STATE (enum) = 4 bytes
> > > +      * ANDROID_CONTROL_AF_STATE (enum) = 4 bytes
> > > +      * ANDROID_SENSOR_SENSITIVITY (int32) = 4 bytes
> > > +      * ANDROID_CONTROL_AWB_STATE (enum) = 4 bytes
> > > +      * ANDROID_SENSOR_FRAME_DURATION (int64) = 8 bytes
> > > +      * ANDROID_SCALER_CROP_REGION (int32 X 4) = 16 bytes
> > > +      * ANDROID_SENSOR_TEST_PATTERN_MODE (enum) = 4 bytes
> > > +      * Total bytes for capacity: 61
> > > +      *
> > > +      * ANDROID_STATISTICS_FACE_RECTANGLES (int32[]) = 4*4*n bytes
> > > +      * ANDROID_STATISTICS_FACE_SCORES (byte[]) = n bytes
> > > +      * ANDROID_STATISTICS_FACE_LANDMARKS (int32[]) = 4*2*n bytes
> > > +      * ANDROID_STATISTICS_FACE_IDS (int32[]) = 4*n bytes
> > > +      *
> > > +      * \todo Calculate the entries and capacity by the input ControlList.
> > > +      */
> > > +
> > > +     const auto &faceDetectRectangles =
> > > +             metadata.get(controls::draft::FaceDetectFaceRectangles);
> > > +
> > > +     size_t entryCapacity = 10;
> > > +     size_t dataCapacity = 61;
> > > +     if (faceDetectRectangles) {
> > > +             entryCapacity += 4;
> > > +             dataCapacity += faceDetectRectangles->size() * 29;
> > > +     }
> > > +
> > > +     std::unique_ptr<CameraMetadata> resultMetadata =
> > > +             std::make_unique<CameraMetadata>(entryCapacity, dataCapacity);
> > > +     if (!resultMetadata->isValid()) {
> > > +             LOG(HAL, Error) << "Failed to allocate result metadata";
> > > +             return nullptr;
> > > +     }
> > > +
> > > +     if (faceDetectRectangles) {
> > > +             std::vector<int32_t> flatRectangles;
> > > +             for (const Rectangle &rect : *faceDetectRectangles) {
> > > +                     flatRectangles.push_back(rect.x);
> > > +                     flatRectangles.push_back(rect.y);
> > > +                     flatRectangles.push_back(rect.x + rect.width);
> > > +                     flatRectangles.push_back(rect.y + rect.height);
> > > +             }
> > > +             resultMetadata->addEntry(
> > > +                     ANDROID_STATISTICS_FACE_RECTANGLES, flatRectangles);
> > > +     }
> > > +
> > > +     const auto &faceDetectFaceScores =
> > > +             metadata.get(controls::draft::FaceDetectFaceScores);
> > > +     if (faceDetectRectangles && faceDetectFaceScores) {
> > > +             if (faceDetectFaceScores->size() != faceDetectRectangles->size()) {
> > > +                     LOG(HAL, Error) << "Pipeline returned wrong number of face scores; "
> > > +                                     << "Expected: " << faceDetectRectangles->size()
> > > +                                     << ", got: " << faceDetectFaceScores->size();
> > > +             }
> > > +             resultMetadata->addEntry(ANDROID_STATISTICS_FACE_SCORES,
> > > +                                      *faceDetectFaceScores);
> > > +     }
> > > +
> > > +     const auto &faceDetectFaceLandmarks =
> > > +             metadata.get(controls::draft::FaceDetectFaceLandmarks);
> > > +     if (faceDetectRectangles && faceDetectFaceLandmarks) {
> > > +             size_t expectedLandmarks = faceDetectRectangles->size() * 3;
> > > +             if (faceDetectFaceLandmarks->size() != expectedLandmarks) {
> > > +                     LOG(HAL, Error) << "Pipeline returned wrong number of face landmarks; "
> > > +                                     << "Expected: " << expectedLandmarks
> > > +                                     << ", got: " << faceDetectFaceLandmarks->size();
> > > +             }
> > > +
> > > +             std::vector<int32_t> androidLandmarks;
> > > +             for (const Point &landmark : *faceDetectFaceLandmarks) {
> > > +                     androidLandmarks.push_back(landmark.x);
> > > +                     androidLandmarks.push_back(landmark.y);
> > > +             }
> > > +             resultMetadata->addEntry(
> > > +                     ANDROID_STATISTICS_FACE_LANDMARKS, androidLandmarks);
> > > +     }
> > > +
> > > +     const auto &faceDetectFaceIds = metadata.get(controls::draft::FaceDetectFaceIds);
> > > +     if (faceDetectRectangles && faceDetectFaceIds) {
> > > +             if (faceDetectFaceIds->size() != faceDetectRectangles->size()) {
> > > +                     LOG(HAL, Error) << "Pipeline returned wrong number of face ids; "
> > > +                                     << "Expected: " << faceDetectRectangles->size()
> > > +                                     << ", got: " << faceDetectFaceIds->size();
> > > +             }
> > > +             resultMetadata->addEntry(ANDROID_STATISTICS_FACE_IDS, *faceDetectFaceIds);
> > > +     }
> > > +
> > > +     /* Add metadata tags reported by libcamera. */
> > > +     const auto &timestamp = metadata.get(controls::SensorTimestamp);
> > > +     if (timestamp)
> > > +             resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, *timestamp);
> > > +
> > > +     const auto &pipelineDepth = metadata.get(controls::draft::PipelineDepth);
> > > +     if (pipelineDepth)
> > > +             resultMetadata->addEntry(ANDROID_REQUEST_PIPELINE_DEPTH,
> > > +                                      *pipelineDepth);
> > > +
> > > +     if (metadata.contains(controls::EXPOSURE_TIME)) {
> > > +             const auto &exposureTime = metadata.get(controls::ExposureTime);
> > > +             int64_t exposure_time = static_cast<int64_t>(exposureTime.value_or(33'333));
> > > +             resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME, exposure_time * 1000ULL);
> > > +     }
> > > +
> > > +     if (metadata.contains(controls::draft::AE_STATE)) {
> > > +             const auto &aeState = metadata.get(controls::draft::AeState);
> > > +             resultMetadata->addEntry(ANDROID_CONTROL_AE_STATE, aeState.value_or(0));
> > > +     }
> > > +
> > > +     if (metadata.contains(controls::AF_STATE)) {
> > > +             const auto &afState = metadata.get(controls::AfState);
> > > +             resultMetadata->addEntry(ANDROID_CONTROL_AF_STATE, afState.value_or(0));
> > > +     }
> > > +
> > > +     if (metadata.contains(controls::ANALOGUE_GAIN)) {
> > > +             const auto &sensorSensitivity = metadata.get(controls::AnalogueGain).value_or(100);
> > > +             resultMetadata->addEntry(ANDROID_SENSOR_SENSITIVITY, static_cast<int>(sensorSensitivity));
> > > +     }
> > > +
> > > +     const auto &awbState = metadata.get(controls::draft::AwbState);
> > > +     if (metadata.contains(controls::draft::AWB_STATE)) {
> > > +             resultMetadata->addEntry(ANDROID_CONTROL_AWB_STATE, awbState.value_or(0));
> > > +     }
> > > +
> > > +     const auto &frameDuration = metadata.get(controls::FrameDuration);
> > > +     if (metadata.contains(controls::FRAME_DURATION)) {
> > > +             resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION, frameDuration.value_or(33'333'333));
> > > +     }
> > > +
> > > +     const auto &scalerCrop = metadata.get(controls::ScalerCrop);
> > > +     if (scalerCrop) {
> > > +             const Rectangle &crop = *scalerCrop;
> > > +             int32_t cropRect[] = {
> > > +                     crop.x,
> > > +                     crop.y,
> > > +                     static_cast<int32_t>(crop.width),
> > > +                     static_cast<int32_t>(crop.height),
> > > +             };
> > > +             resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, cropRect);
> > > +     }
> > > +
> > > +     const auto &testPatternMode = metadata.get(controls::draft::TestPatternMode);
> > > +     if (testPatternMode)
> > > +             resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE,
> > > +                                      *testPatternMode);
> > > +
> > > +     /*
> > > +      * Return the result metadata pack even is not valid: get() will return
> > > +      * nullptr.
> > > +      */
> > > +     if (!resultMetadata->isValid()) {
> > > +             LOG(HAL, Error) << "Failed to construct result metadata";
> > > +     }
> > > +
> > > +     if (resultMetadata->resized()) {
> > > +             auto [entryCount, dataCount] = resultMetadata->usage();
> > > +             LOG(HAL, Info)
> > > +                     << "Result metadata resized: " << entryCount
> > > +                     << " entries and " << dataCount << " bytes used";
> > > +     }
> > > +
> > > +     return resultMetadata;
> > > +}
> > > +
> > > +/*
> > > + * Produce a set of fixed result metadata.
> > > + */
> > > +std::unique_ptr<CameraMetadata>
> > > +CameraDevice::getFinalResultMetadata(const CameraMetadata &settings) const
> > > +{
> > > +     camera_metadata_ro_entry_t entry;
> > > +     bool found;
> > > +
> > > +     /*
> > > +      * \todo Retrieve metadata from corresponding libcamera controls.
> > > +      * \todo Keep this in sync with the actual number of entries.
> > > +      *
> > > +      * Reserve capacity for the metadata larger than 4 bytes which cannot
> > > +      * store in entries.
> > > +      * Currently: 31 entries, 16 bytes
> > > +      *
> > > +      * ANDROID_CONTROL_AE_TARGET_FPS_RANGE (int32 X 2) = 8 bytes
> > > +      * ANDROID_SENSOR_ROLLING_SHUTTER_SKEW (int64) = 8 bytes
> > > +      *
> > > +      * Total bytes: 16
> > > +      */
> > > +     std::unique_ptr<CameraMetadata> resultMetadata =
> > > +             std::make_unique<CameraMetadata>(31, 16);
> > >       if (!resultMetadata->isValid()) {
> > >               LOG(HAL, Error) << "Failed to allocate result metadata";
> > >               return nullptr;
> > > @@ -1536,8 +1906,7 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons
> > >                                        entry.data.i32, 2);
> > >
> > >       found = settings.getEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &entry);
> > > -     value = found ? *entry.data.u8 :
> > > -                     (uint8_t)ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
> > > +     value = found ? *entry.data.u8 : (uint8_t)ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
> > >       resultMetadata->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, value);
> > >
> > >       value = ANDROID_CONTROL_AE_STATE_CONVERGED;
> > > @@ -1620,95 +1989,6 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons
> > >       resultMetadata->addEntry(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
> > >                                rolling_shutter_skew);
> > >
> > > -     /* Add metadata tags reported by libcamera. */
> > > -     const int64_t timestamp = metadata.get(controls::SensorTimestamp).value_or(0);
> > > -     resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, timestamp);
> > > -
> > > -     const auto &pipelineDepth = metadata.get(controls::draft::PipelineDepth);
> > > -     if (pipelineDepth)
> > > -             resultMetadata->addEntry(ANDROID_REQUEST_PIPELINE_DEPTH,
> > > -                                      *pipelineDepth);
> > > -
> > > -     const auto &exposureTime = metadata.get(controls::ExposureTime);
> > > -     if (exposureTime)
> > > -             resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME,
> > > -                                      *exposureTime * 1000ULL);
> > > -
> > > -     const auto &frameDuration = metadata.get(controls::FrameDuration);
> > > -     if (frameDuration)
> > > -             resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION,
> > > -                                      *frameDuration * 1000);
> > > -
> > > -     const auto &faceDetectRectangles =
> > > -             metadata.get(controls::draft::FaceDetectFaceRectangles);
> > > -     if (faceDetectRectangles) {
> > > -             std::vector<int32_t> flatRectangles;
> > > -             for (const Rectangle &rect : *faceDetectRectangles) {
> > > -                     flatRectangles.push_back(rect.x);
> > > -                     flatRectangles.push_back(rect.y);
> > > -                     flatRectangles.push_back(rect.x + rect.width);
> > > -                     flatRectangles.push_back(rect.y + rect.height);
> > > -             }
> > > -             resultMetadata->addEntry(
> > > -                     ANDROID_STATISTICS_FACE_RECTANGLES, flatRectangles);
> > > -     }
> > > -
> > > -     const auto &faceDetectFaceScores =
> > > -             metadata.get(controls::draft::FaceDetectFaceScores);
> > > -     if (faceDetectRectangles && faceDetectFaceScores) {
> > > -             if (faceDetectFaceScores->size() != faceDetectRectangles->size()) {
> > > -                     LOG(HAL, Error) << "Pipeline returned wrong number of face scores; "
> > > -                                     << "Expected: " << faceDetectRectangles->size()
> > > -                                     << ", got: " << faceDetectFaceScores->size();
> > > -             }
> > > -             resultMetadata->addEntry(ANDROID_STATISTICS_FACE_SCORES,
> > > -                                      *faceDetectFaceScores);
> > > -     }
> > > -
> > > -     const auto &faceDetectFaceLandmarks =
> > > -             metadata.get(controls::draft::FaceDetectFaceLandmarks);
> > > -     if (faceDetectRectangles && faceDetectFaceLandmarks) {
> > > -             size_t expectedLandmarks = faceDetectRectangles->size() * 3;
> > > -             if (faceDetectFaceLandmarks->size() != expectedLandmarks) {
> > > -                     LOG(HAL, Error) << "Pipeline returned wrong number of face landmarks; "
> > > -                                     << "Expected: " << expectedLandmarks
> > > -                                     << ", got: " << faceDetectFaceLandmarks->size();
> > > -             }
> > > -
> > > -             std::vector<int32_t> androidLandmarks;
> > > -             for (const Point &landmark : *faceDetectFaceLandmarks) {
> > > -                     androidLandmarks.push_back(landmark.x);
> > > -                     androidLandmarks.push_back(landmark.y);
> > > -             }
> > > -             resultMetadata->addEntry(
> > > -                     ANDROID_STATISTICS_FACE_LANDMARKS, androidLandmarks);
> > > -     }
> > > -
> > > -     const auto &faceDetectFaceIds = metadata.get(controls::draft::FaceDetectFaceIds);
> > > -     if (faceDetectRectangles && faceDetectFaceIds) {
> > > -             if (faceDetectFaceIds->size() != faceDetectRectangles->size()) {
> > > -                     LOG(HAL, Error) << "Pipeline returned wrong number of face ids; "
> > > -                                     << "Expected: " << faceDetectRectangles->size()
> > > -                                     << ", got: " << faceDetectFaceIds->size();
> > > -             }
> > > -             resultMetadata->addEntry(ANDROID_STATISTICS_FACE_IDS, *faceDetectFaceIds);
> > > -     }
> > > -
> > > -     const auto &scalerCrop = metadata.get(controls::ScalerCrop);
> > > -     if (scalerCrop) {
> > > -             const Rectangle &crop = *scalerCrop;
> > > -             int32_t cropRect[] = {
> > > -                     crop.x, crop.y, static_cast<int32_t>(crop.width),
> > > -                     static_cast<int32_t>(crop.height),
> > > -             };
> > > -             resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, cropRect);
> > > -     }
> > > -
> > > -     const auto &testPatternMode = metadata.get(controls::draft::TestPatternMode);
> > > -     if (testPatternMode)
> > > -             resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE,
> > > -                                      *testPatternMode);
> > > -
> > >       /*
> > >        * Return the result metadata pack even is not valid: get() will return
> > >        * nullptr.
> > > diff --git a/src/android/camera_device.h b/src/android/camera_device.h
> > > index 3c46ff918..2aa6b2c09 100644
> > > --- a/src/android/camera_device.h
> > > +++ b/src/android/camera_device.h
> > > @@ -64,11 +64,13 @@ public:
> > >       const camera_metadata_t *constructDefaultRequestSettings(int type);
> > >       int configureStreams(camera3_stream_configuration_t *stream_list);
> > >       int processCaptureRequest(camera3_capture_request_t *request);
> > > +     void bufferComplete(libcamera::Request *request,
> > > +                         libcamera::FrameBuffer *buffer);
> > > +     void metadataAvailable(libcamera::Request *request,
> > > +                            const libcamera::ControlList &metadata);
> > >       void requestComplete(libcamera::Request *request);
> > >       void streamProcessingCompleteDelegate(StreamBuffer *bufferStream,
> > >                                             StreamBuffer::Status status);
> > > -     void streamProcessingComplete(StreamBuffer *bufferStream,
> > > -                                   StreamBuffer::Status status);
> > >
> > >  protected:
> > >       std::string logPrefix() const override;
> > > @@ -96,16 +98,26 @@ private:
> > >       void notifyError(uint32_t frameNumber, camera3_stream_t *stream,
> > >                        camera3_error_msg_code code) const;
> > >       int processControls(Camera3RequestDescriptor *descriptor);
> > > -     void completeDescriptor(Camera3RequestDescriptor *descriptor)
> > > -             LIBCAMERA_TSA_EXCLUDES(descriptorsMutex_);
> > > -     void sendCaptureResults() LIBCAMERA_TSA_REQUIRES(descriptorsMutex_);
> > > -     void sendCaptureResult(Camera3RequestDescriptor *request) const;
> > > +
> > > +     void checkAndCompleteReadyPartialResults(Camera3ResultDescriptor *result);
> > > +     void completePartialResultDescriptor(Camera3ResultDescriptor *result);
> > > +     void completeRequestDescriptor(Camera3RequestDescriptor *descriptor);
> > > +
> > > +     void streamProcessingComplete(StreamBuffer *bufferStream,
> > > +                                   StreamBuffer::Status status);
> > > +
> > > +     void sendCaptureResults();
> > > +     void sendCaptureResult(Camera3ResultDescriptor *result) const;
> > >       void setBufferStatus(StreamBuffer &buffer,
> > >                            StreamBuffer::Status status);
> > >       void generateJpegExifMetadata(Camera3RequestDescriptor *request,
> > >                                     StreamBuffer *buffer) const;
> > > -     std::unique_ptr<CameraMetadata> getResultMetadata(
> > > -             const Camera3RequestDescriptor &descriptor) const;
> > > +
> > > +     std::unique_ptr<CameraMetadata> getJpegPartialResultMetadata() const;
> > > +     std::unique_ptr<CameraMetadata> getPartialResultMetadata(
> > > +             const libcamera::ControlList &metadata) const;
> > > +     std::unique_ptr<CameraMetadata> getFinalResultMetadata(
> > > +             const CameraMetadata &settings) const;
> > >
> > >       unsigned int id_;
> > >       camera3_device_t camera3Device_;
> > > @@ -122,9 +134,14 @@ private:
> > >
> > >       std::vector<CameraStream> streams_;
> > >
> > > -     libcamera::Mutex descriptorsMutex_ LIBCAMERA_TSA_ACQUIRED_AFTER(stateMutex_);
> > > -     std::queue<std::unique_ptr<Camera3RequestDescriptor>> descriptors_
> > > -             LIBCAMERA_TSA_GUARDED_BY(descriptorsMutex_);
> > > +     /* Protects access to the pending requests and stream buffers. */
> > > +     libcamera::Mutex pendingRequestMutex_;
> > > +     std::queue<std::unique_ptr<Camera3RequestDescriptor>> pendingRequests_
> > > +             LIBCAMERA_TSA_GUARDED_BY(pendingRequestMutex_);
> > > +     std::map<CameraStream *, std::list<StreamBuffer *>> pendingStreamBuffers_
> > > +             LIBCAMERA_TSA_GUARDED_BY(pendingRequestMutex_);
> > > +
> > > +     std::list<Camera3ResultDescriptor *> pendingPartialResults_;
> > >
> > >       std::string maker_;
> > >       std::string model_;
> > > diff --git a/src/android/camera_request.cpp b/src/android/camera_request.cpp
> > > index a9240a83c..20e1b3e54 100644
> > > --- a/src/android/camera_request.cpp
> > > +++ b/src/android/camera_request.cpp
> > > @@ -43,25 +43,25 @@ using namespace libcamera;
> > >   * │  processCaptureRequest(camera3_capture_request_t request)   │
> > >   * │                                                             │
> > >   * │   - Create Camera3RequestDescriptor tracking this request   │
> > > - * │   - Streams requiring post-processing are stored in the     │
> > > - * │     pendingStreamsToProcess map                             │
> > > + * │   - Buffers requiring post-processing are marked by the     │
> > > + * │     CameraStream::Type as Mapped or Internal                │
> > >   * │   - Add this Camera3RequestDescriptor to descriptors' queue │
> > > - * │     CameraDevice::descriptors_                              │
> > > - * │                                                             │ ┌─────────────────────────┐
> > > - * │   - Queue the capture request to libcamera core ────────────┤►│libcamera core           │
> > > - * │                                                             │ ├─────────────────────────┤
> > > - * │                                                             │ │- Capture from Camera    │
> > > - * │                                                             │ │                         │
> > > - * │                                                             │ │- Emit                   │
> > > - * │                                                             │ │  Camera::requestComplete│
> > > - * │  requestCompleted(Request *request) ◄───────────────────────┼─┼────                     │
> > > - * │                                                             │ │                         │
> > > - * │   - Check request completion status                         │ └─────────────────────────┘
> > > + * │     CameraDevice::pendingRequests_                          │
> > > + * │                                                             │ ┌────────────────────────────────┐
> > > + * │   - Queue the capture request to libcamera core ────────────┤►│libcamera core                  │
> > > + * │                                                             │ ├────────────────────────────────┤
> > > + * │                                                             │ │- Capture from Camera           │
> > > + * │                                                             │ │                                │
> > > + * │                                                             │ │- Emit                          │
> > > + * │                                                             │ │  Camera::partialResultCompleted│
> > > + * │  partialResultComplete(Request *request, Result result*) ◄──┼─┼────                            │
> > > + * │                                                             │ │                                │
> > > + * │   - Check request completion status                         │ └────────────────────────────────┘
> > >   * │                                                             │
> > > - * │   - if (pendingStreamsToProcess > 0)                        │
> > > - * │      Queue all entries from pendingStreamsToProcess         │
> > > + * │   - if (pendingBuffersToProcess > 0)                        │
> > > + * │      Queue all entries from pendingBuffersToProcess         │
> > >   * │    else                                   │                 │
> > > - * │      completeDescriptor()                 │                 └──────────────────────┐
> > > + * │      completeResultDescriptor()           │                 └──────────────────────┐
> > >   * │                                           │                                        │
> > >   * │                ┌──────────────────────────┴───┬──────────────────┐                 │
> > >   * │                │                              │                  │                 │
> > > @@ -94,10 +94,10 @@ using namespace libcamera;
> > >   * │ |                                       |     |              |                     │
> > >   * │ | - Check and set buffer status         |     |     ....     |                     │
> > >   * │ | - Remove post+processing entry        |     |              |                     │
> > > - * │ |   from pendingStreamsToProcess        |     |              |                     │
> > > + * │ |   from pendingBuffersToProcess        |     |              |                     │
> > >   * │ |                                       |     |              |                     │
> > > - * │ | - if (pendingStreamsToProcess.empty())|     |              |                     │
> > > - * │ |        completeDescriptor             |     |              |                     │
> > > + * │ | - if (pendingBuffersToProcess.empty())|     |              |                     │
> > > + * │ |        completeResultDescriptor       |     |              |                     │
> > >   * │ |                                       |     |              |                     │
> > >   * │ +---------------------------------------+     +--------------+                     │
> > >   * │                                                                                    │
> > > @@ -148,6 +148,19 @@ Camera3RequestDescriptor::~Camera3RequestDescriptor()
> > >               sourceStream->putBuffer(frameBuffer);
> > >  }
> > >
> > > +/*
> > > + * \class Camera3ResultDescriptor
> > > + *
> > > + * A utility class that groups information about a capture result to be sent to
> > > + * framework.
> > > + */
> > > +Camera3ResultDescriptor::Camera3ResultDescriptor(Camera3RequestDescriptor *request)
> > > +     : request_(request), metadataPackIndex_(1), completed_(false)
> > > +{
> > > +}
> > > +
> > > +Camera3ResultDescriptor::~Camera3ResultDescriptor() = default;
> > > +
> > >  /**
> > >   * \class StreamBuffer
> > >   * \brief Group information for per-stream buffer of Camera3RequestDescriptor
> > > @@ -182,6 +195,9 @@ Camera3RequestDescriptor::~Camera3RequestDescriptor()
> > >   *
> > >   * \var StreamBuffer::request
> > >   * \brief Back pointer to the Camera3RequestDescriptor to which the StreamBuffer belongs
> > > + *
> > > + * \var StreamBuffer::result
> > > + * \brief Back pointer to the Camera3ResultDescriptor to which the StreamBuffer belongs
> > >   */
> > >  StreamBuffer::StreamBuffer(
> > >       CameraStream *cameraStream, const camera3_stream_buffer_t &buffer,
> > > diff --git a/src/android/camera_request.h b/src/android/camera_request.h
> > > index bd87b36fd..18386a905 100644
> > > --- a/src/android/camera_request.h
> > > +++ b/src/android/camera_request.h
> > > @@ -26,6 +26,7 @@
> > >  class CameraBuffer;
> > >  class CameraStream;
> > >
> > > +class Camera3ResultDescriptor;
> > >  class Camera3RequestDescriptor;
> > >
> > >  class StreamBuffer
> > > @@ -57,41 +58,62 @@ public:
> > >       const libcamera::FrameBuffer *srcBuffer = nullptr;
> > >       std::unique_ptr<CameraBuffer> dstBuffer;
> > >       std::optional<JpegExifMetadata> jpegExifMetadata;
> > > +     Camera3ResultDescriptor *result;
> > >       Camera3RequestDescriptor *request;
> > >
> > >  private:
> > >       LIBCAMERA_DISABLE_COPY(StreamBuffer)
> > >  };
> > >
> > > +class Camera3ResultDescriptor
> > > +{
> > > +public:
> > > +     Camera3ResultDescriptor(Camera3RequestDescriptor *request);
> > > +     ~Camera3ResultDescriptor();
> > > +
> > > +     Camera3RequestDescriptor *request_;
> > > +     uint32_t metadataPackIndex_;
> > > +
> > > +     std::unique_ptr<CameraMetadata> resultMetadata_;
> > > +     std::vector<StreamBuffer *> buffers_;
> > > +
> > > +     /* Keeps track of buffers waiting for post-processing. */
> > > +     std::list<StreamBuffer *> pendingBuffersToProcess_;
> > > +
> > > +     bool completed_;
> > > +
> > > +private:
> > > +     LIBCAMERA_DISABLE_COPY(Camera3ResultDescriptor)
> > > +};
> > > +
> > >  class Camera3RequestDescriptor
> > >  {
> > >  public:
> > >       enum class Status {
> > > +             Pending,
> > >               Success,
> > >               Error,
> > >       };
> > >
> > > -     /* Keeps track of streams requiring post-processing. */
> > > -     std::map<CameraStream *, StreamBuffer *> pendingStreamsToProcess_;
> > > -
> > >       Camera3RequestDescriptor(libcamera::Camera *camera,
> > >                                const camera3_capture_request_t *camera3Request);
> > >       ~Camera3RequestDescriptor();
> > >
> > > -     bool isPending() const { return !complete_; }
> > > -
> > >       uint32_t frameNumber_ = 0;
> > >
> > >       std::vector<StreamBuffer> buffers_;
> > >
> > >       CameraMetadata settings_;
> > >       std::unique_ptr<libcamera::Request> request_;
> > > -     std::unique_ptr<CameraMetadata> resultMetadata_;
> > >
> > >       std::map<CameraStream *, libcamera::FrameBuffer *> internalBuffers_;
> > >
> > >       bool complete_ = false;
> > > -     Status status_ = Status::Success;
> > > +     Status status_ = Status::Pending;
> > > +
> > > +     uint32_t nextPartialResultIndex_ = 1;
> > > +     std::unique_ptr<Camera3ResultDescriptor> finalResult_;
> > > +     std::vector<std::unique_ptr<Camera3ResultDescriptor>> partialResults_;
> > >
> > >  private:
> > >       LIBCAMERA_DISABLE_COPY(Camera3RequestDescriptor)
> > > diff --git a/src/android/camera_stream.cpp b/src/android/camera_stream.cpp
> > > index 53f292d4b..7837fd7aa 100644
> > > --- a/src/android/camera_stream.cpp
> > > +++ b/src/android/camera_stream.cpp
> > > @@ -121,8 +121,8 @@ int CameraStream::configure()
> > >                               else
> > >                                       bufferStatus = StreamBuffer::Status::Error;
> > >
> > > -                             cameraDevice_->streamProcessingComplete(streamBuffer,
> > > -                                                                     bufferStatus);
> > > +                             cameraDevice_->streamProcessingCompleteDelegate(streamBuffer,
> > > +                                                                             bufferStatus);
> > >                       });
> > >
> > >               worker_->start();
> > > diff --git a/src/android/jpeg/post_processor_jpeg.cpp b/src/android/jpeg/post_processor_jpeg.cpp
> > > index 48782b574..671e560ec 100644
> > > --- a/src/android/jpeg/post_processor_jpeg.cpp
> > > +++ b/src/android/jpeg/post_processor_jpeg.cpp
> > > @@ -119,7 +119,7 @@ void PostProcessorJpeg::process(StreamBuffer *streamBuffer)
> > >       ASSERT(jpegExifMetadata.has_value());
> > >
> > >       const CameraMetadata &requestMetadata = streamBuffer->request->settings_;
> > > -     CameraMetadata *resultMetadata = streamBuffer->request->resultMetadata_.get();
> > > +     CameraMetadata *resultMetadata = streamBuffer->result->resultMetadata_.get();
> > >       camera_metadata_ro_entry_t entry;
> > >       int ret;
> > >
> > > --
> > > 2.47.0.338.g60cca15819-goog
> > >


More information about the libcamera-devel mailing list