<div dir="ltr"><div dir="ltr">Hi Jacopo,<div><br></div><div>Thank you for your feedback.</div></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Fri, 29 Oct 2021 at 09:48, Jacopo Mondi <<a href="mailto:jacopo@jmondi.org">jacopo@jmondi.org</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Hi Naush,<br>
thanks for the update!<br>
<br>
On Thu, Oct 28, 2021 at 09:46:44AM +0100, Naushir Patuck wrote:<br>
> Switch the pipeline handler to use the new Unicam media controller based driver.<br>
> With this change, we directly talk to the sensor device driver to set controls<br>
> and set/get formats in the pipeline handler.<br>
><br>
> This change requires the accompanying Raspberry Pi linux kernel change at<br>
> <a href="https://github.com/raspberrypi/linux/pull/4645" rel="noreferrer" target="_blank">https://github.com/raspberrypi/linux/pull/4645</a>. If this kernel change is not<br>
> present, the pipeline handler will fail to run with an error message informing<br>
> the user to update the kernel build.<br>
><br>
> Signed-off-by: Naushir Patuck <<a href="mailto:naush@raspberrypi.com" target="_blank">naush@raspberrypi.com</a>><br>
> ---<br>
> .../pipeline/raspberrypi/raspberrypi.cpp | 167 ++++++++++++------<br>
> 1 file changed, 112 insertions(+), 55 deletions(-)<br>
><br>
> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp<br>
> index e078b8b9a811..8e3e96a4359f 100644<br>
> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp<br>
> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp<br>
> @@ -26,6 +26,7 @@<br>
> #include <libcamera/base/utils.h><br>
><br>
> #include <linux/bcm2835-isp.h><br>
> +#include <linux/media-bus-format.h><br>
> #include <linux/videodev2.h><br>
><br>
> #include "libcamera/internal/bayer_format.h"<br>
> @@ -48,6 +49,53 @@ LOG_DEFINE_CATEGORY(RPI)<br>
><br>
> namespace {<br>
><br>
> +/* Map of mbus codes to supported sizes reported by the sensor. */<br>
> +using SensorFormats = std::map<unsigned int, std::vector<Size>>;<br>
> +<br>
> +SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)<br>
> +{<br>
> + SensorFormats formats;<br>
> +<br>
> + for (auto const mbusCode : sensor->mbusCodes())<br>
> + formats.emplace(mbusCode, sensor->sizes(mbusCode));<br>
> +<br>
> + return formats;<br>
> +}<br>
<br>
We have V4L2Subdevice::Format defined as<br>
uing Formats = std::map<unsigned int, std::vector<SizeRange>>;<br>
<br>
but we don't expose the v4l2 subdevice formats from CameraSensor, and<br>
I think it still makes sense as CameraSensor::sizes() filters and sorts<br>
sizes instead of returning the raw subdevice SizeRange.<br>
<br>
So I think defining this part makes sense here even if it's a pattern<br>
which I assume it's repeated in many pipeline handlers.<br></blockquote><div><br></div><div>Indeed. I would have used the v4l2 subdevice formats from CameraSensor, but</div><div>it was not a public member. Not a problem though, as it is trivial for pipeline</div><div>handlers to the equivalent to my code above to get the formats list.</div><div> </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
<br>
> +<br>
> +PixelFormat mbusCodeToPixelFormat(unsigned int mbus_code,<br>
> + BayerFormat::Packing packingReq)<br>
> +{<br>
> + BayerFormat bayer = BayerFormat::fromMbusCode(mbus_code);<br>
> +<br>
> + ASSERT(bayer.isValid());<br>
> +<br>
> + bayer.packing = packingReq;<br>
> + PixelFormat pix = bayer.toPixelFormat();<br>
> +<br>
> + /*<br>
> + * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed<br>
> + * variants. So if the PixelFormat returns as invalid, use the non-packed<br>
> + * conversion instead.<br>
> + */<br>
> + if (!pix.isValid()) {<br>
> + bayer.packing = BayerFormat::Packing::None;<br>
> + pix = bayer.toPixelFormat();<br>
> + }<br>
> +<br>
> + return pix;<br>
> +}<br>
> +<br>
> +V4L2DeviceFormat toV4L2DeviceFormat(const V4L2SubdeviceFormat &format,<br>
> + BayerFormat::Packing packingReq)<br>
> +{<br>
> + const PixelFormat pix = mbusCodeToPixelFormat(format.mbus_code, packingReq);<br>
> + V4L2DeviceFormat deviceFormat;<br>
> +<br>
> + deviceFormat.fourcc = V4L2PixelFormat::fromPixelFormat(pix);<br>
> + deviceFormat.size = format.size;<br>
> + return deviceFormat;<br>
> +}<br>
> +<br>
> bool isRaw(PixelFormat &pixFmt)<br>
> {<br>
> /*<br>
> @@ -74,11 +122,10 @@ double scoreFormat(double desired, double actual)<br>
> return score;<br>
> }<br>
><br>
> -V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,<br>
> - const Size &req)<br>
> +V4L2SubdeviceFormat findBestFormat(const SensorFormats &formatsMap, const Size &req)<br>
> {<br>
> double bestScore = std::numeric_limits<double>::max(), score;<br>
> - V4L2DeviceFormat bestMode;<br>
> + V4L2SubdeviceFormat bestFormat;<br>
><br>
> #define PENALTY_AR 1500.0<br>
> #define PENALTY_8BIT 2000.0<br>
> @@ -88,19 +135,19 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,<br>
><br>
> /* Calculate the closest/best mode from the user requested size. */<br>
> for (const auto &iter : formatsMap) {<br>
> - V4L2PixelFormat v4l2Format = iter.first;<br>
> - const PixelFormatInfo &info = PixelFormatInfo::info(v4l2Format);<br>
> + const unsigned int mbusCode = iter.first;<br>
> + const PixelFormat format = mbusCodeToPixelFormat(mbusCode,<br>
> + BayerFormat::Packing::None);<br>
> + const PixelFormatInfo &info = PixelFormatInfo::info(format);<br>
><br>
> - for (const SizeRange &sz : iter.second) {<br>
> - double modeWidth = sz.contains(req) ? req.width : sz.max.width;<br>
> - double modeHeight = sz.contains(req) ? req.height : sz.max.height;<br>
> + for (const Size &size : iter.second) {<br>
> double reqAr = static_cast<double>(req.width) / req.height;<br>
> - double modeAr = modeWidth / modeHeight;<br>
> + double fmtAr = size.width / size.height;<br>
><br>
> /* Score the dimensions for closeness. */<br>
> - score = scoreFormat(req.width, modeWidth);<br>
> - score += scoreFormat(req.height, modeHeight);<br>
> - score += PENALTY_AR * scoreFormat(reqAr, modeAr);<br>
> + score = scoreFormat(req.width, size.width);<br>
> + score += scoreFormat(req.height, size.height);<br>
> + score += PENALTY_AR * scoreFormat(reqAr, fmtAr);<br>
><br>
> /* Add any penalties... this is not an exact science! */<br>
> if (!info.packed)<br>
> @@ -115,18 +162,18 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,<br>
><br>
> if (score <= bestScore) {<br>
> bestScore = score;<br>
> - bestMode.fourcc = v4l2Format;<br>
> - bestMode.size = Size(modeWidth, modeHeight);<br>
> + bestFormat.mbus_code = mbusCode;<br>
> + bestFormat.size = size;<br>
> }<br>
><br>
> - LOG(RPI, Info) << "Mode: " << modeWidth << "x" << modeHeight<br>
> - << " fmt " << v4l2Format.toString()<br>
> + LOG(RPI, Info) << "Format: " << size.toString()<br>
> + << " fmt " << format.toString()<br>
> << " Score: " << score<br>
> << " (best " << bestScore << ")";<br>
> }<br>
> }<br>
><br>
> - return bestMode;<br>
> + return bestFormat;<br>
> }<br>
><br>
> enum class Unicam : unsigned int { Image, Embedded };<br>
> @@ -170,6 +217,7 @@ public:<br>
> std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;<br>
><br>
> std::unique_ptr<CameraSensor> sensor_;<br>
> + SensorFormats sensorFormats_;<br>
> /* Array of Unicam and ISP device streams and associated buffers/streams. */<br>
> RPi::Device<Unicam, 2> unicam_;<br>
> RPi::Device<Isp, 4> isp_;<br>
> @@ -352,9 +400,10 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()<br>
> * Calculate the best sensor mode we can use based on<br>
> * the user request.<br>
> */<br>
> - V4L2VideoDevice::Formats fmts = data_->unicam_[Unicam::Image].dev()->formats();<br>
> - V4L2DeviceFormat sensorFormat = findBestMode(fmts, cfg.size);<br>
> - int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&sensorFormat);<br>
> + V4L2SubdeviceFormat sensorFormat = findBestFormat(data_->sensorFormats_, cfg.size);<br>
> + V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat,<br>
> + BayerFormat::Packing::CSI2);<br>
> + int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);<br>
> if (ret)<br>
> return Invalid;<br>
><br>
> @@ -366,7 +415,7 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()<br>
> * fetch the "native" (i.e. untransformed) Bayer order,<br>
> * because the sensor may currently be flipped!<br>
> */<br>
> - V4L2PixelFormat fourcc = sensorFormat.fourcc;<br>
> + V4L2PixelFormat fourcc = unicamFormat.fourcc;<br>
> if (data_->flipsAlterBayerOrder_) {<br>
> BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);<br>
> bayer.order = data_->nativeBayerOrder_;<br>
> @@ -375,15 +424,15 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()<br>
> }<br>
><br>
> PixelFormat sensorPixFormat = fourcc.toPixelFormat();<br>
<br>
This is not the sensorPixFormat anymore, but rather the unicam one,<br>
right ?<br></blockquote><div><br></div><div>Yes, that's correct! I will rename this variable to unicamPixFormat to </div><div>avoid any confusion in the next version.</div><div> </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
<br>
The rest looks good to me! Thanks<br>
Reviewed-by: Jacopo Mondi <<a href="mailto:jacopo@jmondi.org" target="_blank">jacopo@jmondi.org</a>><br></blockquote><div><br></div><div>Thanks!</div><div>Naush </div><div> </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
<br>
> - if (cfg.size != sensorFormat.size ||<br>
> + if (cfg.size != unicamFormat.size ||<br>
> cfg.pixelFormat != sensorPixFormat) {<br>
> - cfg.size = sensorFormat.size;<br>
> + cfg.size = unicamFormat.size;<br>
> cfg.pixelFormat = sensorPixFormat;<br>
> status = Adjusted;<br>
> }<br>
><br>
> - cfg.stride = sensorFormat.planes[0].bpl;<br>
> - cfg.frameSize = sensorFormat.planes[0].size;<br>
> + cfg.stride = unicamFormat.planes[0].bpl;<br>
> + cfg.frameSize = unicamFormat.planes[0].size;<br>
><br>
> rawCount++;<br>
> } else {<br>
> @@ -472,7 +521,7 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,<br>
> {<br>
> RPiCameraData *data = cameraData(camera);<br>
> CameraConfiguration *config = new RPiCameraConfiguration(data);<br>
> - V4L2DeviceFormat sensorFormat;<br>
> + V4L2SubdeviceFormat sensorFormat;<br>
> unsigned int bufferCount;<br>
> PixelFormat pixelFormat;<br>
> V4L2VideoDevice::Formats fmts;<br>
> @@ -487,9 +536,9 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,<br>
> switch (role) {<br>
> case StreamRole::Raw:<br>
> size = data->sensor_->resolution();<br>
> - fmts = data->unicam_[Unicam::Image].dev()->formats();<br>
> - sensorFormat = findBestMode(fmts, size);<br>
> - pixelFormat = sensorFormat.fourcc.toPixelFormat();<br>
> + sensorFormat = findBestFormat(data->sensorFormats_, size);<br>
> + pixelFormat = mbusCodeToPixelFormat(sensorFormat.mbus_code,<br>
> + BayerFormat::Packing::CSI2);<br>
> ASSERT(pixelFormat.isValid());<br>
> bufferCount = 2;<br>
> rawCount++;<br>
> @@ -572,6 +621,7 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)<br>
> for (auto const stream : data->streams_)<br>
> stream->reset();<br>
><br>
> + BayerFormat::Packing packing = BayerFormat::Packing::CSI2;<br>
> Size maxSize, sensorSize;<br>
> unsigned int maxIndex = 0;<br>
> bool rawStream = false;<br>
> @@ -590,6 +640,8 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)<br>
> */<br>
> sensorSize = cfg.size;<br>
> rawStream = true;<br>
> + /* Check if the user has explicitly set an unpacked format. */<br>
> + packing = BayerFormat::fromPixelFormat(cfg.pixelFormat).packing;<br>
> } else {<br>
> if (cfg.size > maxSize) {<br>
> maxSize = config->at(i).size;<br>
> @@ -615,24 +667,21 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)<br>
> }<br>
><br>
> /* First calculate the best sensor mode we can use based on the user request. */<br>
> - V4L2VideoDevice::Formats fmts = data->unicam_[Unicam::Image].dev()->formats();<br>
> - V4L2DeviceFormat sensorFormat = findBestMode(fmts, rawStream ? sensorSize : maxSize);<br>
> -<br>
> - ret = data->unicam_[Unicam::Image].dev()->setFormat(&sensorFormat);<br>
> + V4L2SubdeviceFormat sensorFormat = findBestFormat(data->sensorFormats_, rawStream ? sensorSize : maxSize);<br>
> + ret = data->sensor_->setFormat(&sensorFormat);<br>
> if (ret)<br>
> return ret;<br>
><br>
> - /*<br>
> - * The control ranges associated with the sensor may need updating<br>
> - * after a format change.<br>
> - * \todo Use the CameraSensor::setFormat API instead.<br>
> - */<br>
> - data->sensor_->updateControlInfo();<br>
> + V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat, packing);<br>
> + ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);<br>
> + if (ret)<br>
> + return ret;<br>
><br>
> LOG(RPI, Info) << "Sensor: " << camera->id()<br>
> - << " - Selected mode: " << sensorFormat.toString();<br>
> + << " - Selected sensor format: " << sensorFormat.toString()<br>
> + << " - Selected unicam format: " << unicamFormat.toString();<br>
><br>
> - ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);<br>
> + ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);<br>
> if (ret)<br>
> return ret;<br>
><br>
> @@ -754,8 +803,8 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)<br>
> data->ispMinCropSize_ = testCrop.size();<br>
><br>
> /* Adjust aspect ratio by providing crops on the input image. */<br>
> - Size size = sensorFormat.size.boundedToAspectRatio(maxSize);<br>
> - Rectangle crop = size.centeredTo(Rectangle(sensorFormat.size).center());<br>
> + Size size = unicamFormat.size.boundedToAspectRatio(maxSize);<br>
> + Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());<br>
> data->ispCrop_ = crop;<br>
><br>
> data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);<br>
> @@ -769,8 +818,11 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)<br>
> * supports it.<br>
> */<br>
> if (data->sensorMetadata_) {<br>
> - format = {};<br>
> + V4L2SubdeviceFormat embeddedFormat;<br>
> +<br>
> + data->sensor_->device()->getFormat(1, &embeddedFormat);<br>
> format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);<br>
> + format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;<br>
><br>
> LOG(RPI, Debug) << "Setting embedded data format.";<br>
> ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);<br>
> @@ -1000,6 +1052,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)<br>
> if (data->sensor_->init())<br>
> return false;<br>
><br>
> + data->sensorFormats_ = populateSensorFormats(data->sensor_);<br>
> +<br>
> ipa::RPi::SensorConfig sensorConfig;<br>
> if (data->loadIPA(&sensorConfig)) {<br>
> LOG(RPI, Error) << "Failed to load a suitable IPA library";<br>
> @@ -1026,6 +1080,11 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)<br>
> return false;<br>
> }<br>
><br>
> + if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {<br>
> + LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";<br>
> + return false;<br>
> + }<br>
> +<br>
> /*<br>
> * Setup our delayed control writer with the sensor default<br>
> * gain and exposure delays. Mark VBLANK for priority write.<br>
> @@ -1035,7 +1094,7 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)<br>
> { V4L2_CID_EXPOSURE, { sensorConfig.exposureDelay, false } },<br>
> { V4L2_CID_VBLANK, { sensorConfig.vblankDelay, true } }<br>
> };<br>
> - data->delayedCtrls_ = std::make_unique<DelayedControls>(data->unicam_[Unicam::Image].dev(), params);<br>
> + data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);<br>
> data->sensorMetadata_ = sensorConfig.sensorMetadata;<br>
><br>
> /* Register the controls that the Raspberry Pi IPA can handle. */<br>
> @@ -1062,15 +1121,14 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)<br>
> * As part of answering the final question, we reset the camera to<br>
> * no transform at all.<br>
> */<br>
> -<br>
> - V4L2VideoDevice *dev = data->unicam_[Unicam::Image].dev();<br>
> - const struct v4l2_query_ext_ctrl *hflipCtrl = dev->controlInfo(V4L2_CID_HFLIP);<br>
> + const V4L2Subdevice *sensor = data->sensor_->device();<br>
> + const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);<br>
> if (hflipCtrl) {<br>
> /* We assume it will support vflips too... */<br>
> data->supportsFlips_ = true;<br>
> data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;<br>
><br>
> - ControlList ctrls(dev->controls());<br>
> + ControlList ctrls(data->sensor_->controls());<br>
> ctrls.set(V4L2_CID_HFLIP, 0);<br>
> ctrls.set(V4L2_CID_VFLIP, 0);<br>
> data->setSensorControls(ctrls);<br>
> @@ -1078,9 +1136,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)<br>
><br>
> /* Look for a valid Bayer format. */<br>
> BayerFormat bayerFormat;<br>
> - for (const auto &iter : dev->formats()) {<br>
> - V4L2PixelFormat v4l2Format = iter.first;<br>
> - bayerFormat = BayerFormat::fromV4L2PixelFormat(v4l2Format);<br>
> + for (const auto &iter : data->sensorFormats_) {<br>
> + bayerFormat = BayerFormat::fromMbusCode(iter.first);<br>
> if (bayerFormat.isValid())<br>
> break;<br>
> }<br>
> @@ -1263,7 +1320,7 @@ int RPiCameraData::configureIPA(const CameraConfiguration *config)<br>
> }<br>
> }<br>
><br>
> - entityControls.emplace(0, unicam_[Unicam::Image].dev()->controls());<br>
> + entityControls.emplace(0, sensor_->controls());<br>
> entityControls.emplace(1, isp_[Isp::Input].dev()->controls());<br>
><br>
> /* Always send the user transform to the IPA. */<br>
> @@ -1387,10 +1444,10 @@ void RPiCameraData::setSensorControls(ControlList &controls)<br>
> ControlList vblank_ctrl;<br>
><br>
> vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));<br>
> - unicam_[Unicam::Image].dev()->setControls(&vblank_ctrl);<br>
> + sensor_->setControls(&vblank_ctrl);<br>
> }<br>
><br>
> - unicam_[Unicam::Image].dev()->setControls(&controls);<br>
> + sensor_->setControls(&controls);<br>
> }<br>
><br>
> void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)<br>
> --<br>
> 2.25.1<br>
><br>
</blockquote></div></div>