[libcamera-devel] [PATCH v4] py: cam.py: Provide live graph of request metadata
Daniel Oakley
daniel.oakley at ideasonboard.com
Fri Mar 3 18:47:42 CET 2023
Hey, sorry for bumping, but I would love for this to get a proper code
review. It would be nice to get this code into cam.py so people can
start using it if they need it :)
Thanks,
Daniel Oakley <daniel.oakley at ideasonboard.com>
On 02/02/2023 15:03, Daniel Oakley wrote:
> Metadata is very useful when improving specific camera configurations.
> Currently, there is an argument to display the metadata in text form,
> however this can be hard to visualise and spot changes or patterns over
> time. Therefore this proposed patch adds an argument to display this
> metadata in graph form.
>
> The metadata graph has 3 optional parameters:
> - refresh, number of times a second to update the graph
> - buffer, amount of historic/previous data to show
> - graphs, number of graphs to split the metadata between
> - autoscale, whether or not to autoscale the axis so all the data fits
>
> Displaying the graph does have some performance penalty, however this
> has been mostly mitigated through the refresh parameter. Despite this,
> graphing might not the best of ideas when using the camera to record or
> save data. This is mainly for debugging purposes.
>
> Suggested-by: Kieran Bingham <kieran.bingham at ideasonboard.com>
> Signed-off-by: Daniel Oakley <daniel.oakley at ideasonboard.com>
> ---
> This is the 4th version of my graph-metadata patch.
> v3: hopefully addressing the issue of there being a lot of additional
> code in the main cam.py which is optional. Most of the code has been
> moved into a cam_metadata_graph.py file, which is imported at the start
> of cam.py (as opposed to importing it twice throughout the code to make
> the GraphDrawer and process the arguments). I have slightly tweaked the
> error handling with the metadata-graph's arguments to catch a division
> by zero error for refresh=0 and added a comment explanation for the
> metadata-graph argument processor.
> v4: I forgot to add the copywrite information to the new file, hopefully
> I did it roughly correct.
> There should, again, be no functional change between V1 and V2 and v3
>
> src/py/cam/cam.py | 22 +++
> src/py/cam/cam_metadata_graph.py | 236 +++++++++++++++++++++++++++++++
> 2 files changed, 258 insertions(+)
> create mode 100644 src/py/cam/cam_metadata_graph.py
>
> diff --git a/src/py/cam/cam.py b/src/py/cam/cam.py
> index 967a72f5..e5f6910a 100755
> --- a/src/py/cam/cam.py
> +++ b/src/py/cam/cam.py
> @@ -6,6 +6,7 @@
> from typing import Any
> import argparse
> import binascii
> +import cam_metadata_graph as metadata_graph
> import libcamera as libcam
> import libcamera.utils
> import sys
> @@ -21,6 +22,7 @@ class CameraContext:
> opt_strict_formats: bool
> opt_crc: bool
> opt_metadata: bool
> + opt_metadata_graph: dict
> opt_save_frames: bool
> opt_capture: int
>
> @@ -39,6 +41,7 @@ class CameraContext:
> self.id = 'cam' + str(idx)
> self.reqs_queued = 0
> self.reqs_completed = 0
> + self.graph_drawer = None
>
> def do_cmd_list_props(self):
> print('Properties for', self.id)
> @@ -171,6 +174,9 @@ class CameraContext:
> self.stream_names[stream] = 'stream' + str(idx)
> print('{}-{}: stream config {}'.format(self.id, self.stream_names[stream], stream.configuration))
>
> + def initialize_graph_drawer(self, graph_arguments):
> + self.graph_drawer = metadata_graph.GraphDrawer(graph_arguments, self.camera.id)
> +
> def alloc_buffers(self):
> allocator = libcam.FrameBufferAllocator(self.camera)
>
> @@ -271,6 +277,12 @@ class CaptureState:
> for ctrl, val in reqmeta.items():
> print(f'\t{ctrl} = {val}')
>
> + if ctx.opt_metadata_graph:
> + if not ctx.graph_drawer:
> + ctx.initialize_graph_drawer(ctx.opt_metadata_graph)
> +
> + ctx.graph_drawer.update_graph(ts, req.metadata)
> +
> for stream, fb in buffers.items():
> stream_name = ctx.stream_names[stream]
>
> @@ -393,6 +405,7 @@ def main():
> parser.add_argument('--crc', nargs=0, type=bool, action=CustomAction, help='Print CRC32 for captured frames')
> parser.add_argument('--save-frames', nargs=0, type=bool, action=CustomAction, help='Save captured frames to files')
> parser.add_argument('--metadata', nargs=0, type=bool, action=CustomAction, help='Print the metadata for completed requests')
> + parser.add_argument('--metadata-graph', nargs='*', type=str, const=True, action=CustomAction, help='Live graph of metadata. Default args: graphs=3 buffer=100 autoscale=true refresh=10')
> parser.add_argument('--strict-formats', type=bool, nargs=0, action=CustomAction, help='Do not allow requested stream format(s) to be adjusted')
> parser.add_argument('-s', '--stream', nargs='+', action=CustomAction)
> args = parser.parse_args()
> @@ -418,6 +431,15 @@ def main():
> ctx.opt_metadata = args.metadata.get(cam_idx, False)
> ctx.opt_strict_formats = args.strict_formats.get(cam_idx, False)
> ctx.opt_stream = args.stream.get(cam_idx, ['role=viewfinder'])
> +
> + ctx.opt_metadata_graph = args.metadata_graph.get(cam_idx, False)
> + if ctx.opt_metadata_graph is not False:
> + ctx.opt_metadata_graph = metadata_graph.process_args(ctx.opt_metadata_graph)
> + # Invalid argument parameters return False
> + if ctx.opt_metadata_graph is False:
> + print('Invalid --metadata_graph arguments, see --help')
> + sys.exit(-1)
> +
> contexts.append(ctx)
>
> for ctx in contexts:
> diff --git a/src/py/cam/cam_metadata_graph.py b/src/py/cam/cam_metadata_graph.py
> new file mode 100644
> index 00000000..7992c0c7
> --- /dev/null
> +++ b/src/py/cam/cam_metadata_graph.py
> @@ -0,0 +1,236 @@
> +# SPDX-License-Identifier: GPL-2.0-or-later
> +# Copyright (C) 2023, Daniel Oakley <daniel.oakley at ideasonboard.com>
> +
> +import libcamera.utils
> +
> +
> +# GraphDrawer is a class that manages drawing the graph. It sets up the graph;
> +# manages graph settings; holds the metadata buffer (previous + current values);
> +# and updates the graph.
> +# We store physical lines for each metadata as their x (time) and y (data) can
> +# be directly updated instead of redrawing each axis - improves performance.
> +class GraphDrawer:
> + graph_number: int
> + buffer_size: int
> + autoscale_axis: bool
> + refresh_time: float
> +
> + BLOCKED_TYPES = [list, tuple, libcamera._libcamera.Rectangle]
> +
> + def __init__(self, graph_arguments, camera_name):
> + # We only import matplotlib here to reduce mandatory dependencies
> + import matplotlib.pyplot as plt
> + self.plt = plt
> +
> + self.graph_number = graph_arguments["graphs"]
> + self.buffer_size = graph_arguments["buffer"]
> + self.autoscale_axis = graph_arguments["autoscale"]
> + # Convert FPS into a duration in nanoseconds
> + self.refresh_time = 1 / graph_arguments["refresh"] * 10**9
> + self.previous_time = 0
> +
> + self.figure = plt.figure(camera_name)
> + self.axis = []
> + self.axis_group = {}
> + self.metadata_buffer = {}
> + self.lines = {}
> + self.blit_manager = self.BlitManager(self.figure.canvas)
> +
> + # Define the margins for drawing the graphs
> + self.figure.subplots_adjust(
> + top=0.95,
> + bottom=0.05,
> + left=0.1,
> + right=0.95,
> + hspace=0.2,
> + wspace=0.2
> + )
> +
> + self.plt.show(block=False)
> +
> + def update_graph(self, time, metadata):
> + # On the first request, allocate the each metadata entry to an axis
> + if len(self.axis) == 0:
> + self.__divide_subplots(self.graph_number, metadata)
> + self.plt.draw()
> +
> + for ctrl, val in metadata.items():
> + if type(val) in self.BLOCKED_TYPES or ctrl.name in ["SensorTimestamp"]:
> + continue
> +
> + # The metadata_buffer holds an x array and a y array for each ctrl
> + # where x is the time series and y is the metadata value
> + if not self.metadata_buffer.get(ctrl.name, False):
> + self.metadata_buffer[ctrl.name] = [[], []]
> +
> + # Create the lines and configure their style on the graph
> + if not self.lines.get(ctrl.name, False):
> + self.lines[ctrl.name], = self.axis_group[ctrl].plot([], label=ctrl.name)
> + self.blit_manager.add_artist(self.lines[ctrl.name])
> + self.lines[ctrl.name].set_alpha(0.7)
> + self.lines[ctrl.name].set_linewidth(2.4)
> +
> + self.metadata_buffer[ctrl.name][0].append(time)
> + self.metadata_buffer[ctrl.name][1].append(val)
> + # Remove the oldest entry to keep the buffer fixed at its max size
> + if len(self.metadata_buffer[ctrl.name][0]) > self.buffer_size:
> + del self.metadata_buffer[ctrl.name][0][0]
> + del self.metadata_buffer[ctrl.name][1][0]
> +
> + if time - self.previous_time >= self.refresh_time:
> + self.__animate()
> + self.previous_time = time
> +
> + # This method allocates the metadata into the correct number of graphs based
> + # on how different their example metadata is from their consecutive neighbour
> + def __divide_subplots(self, number_of_graphs, example_metadata):
> + # Create the correct number of axis positioned in a vertical stack
> + for i in range(1, number_of_graphs + 1):
> + axis = self.plt.subplot(number_of_graphs, 1, i)
> + # Remove the visible x axis as it is not redrawn with blit
> + axis.get_xaxis().set_ticks([])
> + self.axis.append(axis)
> +
> + cleansed_metadata = {}
> + for ctrl, val in example_metadata.items():
> + if not (type(val) in self.BLOCKED_TYPES or ctrl.name in ["SensorTimestamp"]):
> + cleansed_metadata[ctrl] = val
> +
> + # Summery of what the following code does:
> + # We first sort the metadata items by value so we can identify the
> + # difference between them.
> + # From there we can split them up based on the differences between them.
> + # We do this by sorting the the ctrls by their differences, and then
> + # adding N breaks into the standardly sorted list of ctrls (by values)
> + # next to those name. Where N is the number of graphs we want.
> + # We then go through the ctrls in order, adding them to the correct
> + # axis group (increasing their group if a break is found)
> +
> + # Sort the metadata lowest to highest so consecutive values can be compared
> + sorted_metadata = dict(sorted(cleansed_metadata.items(),
> + key=lambda item: item[1]))
> +
> + # Create the dictionary containing the {ctrl:percentage difference}
> + percent_diff = {}
> + prev_val = None
> + for ctrl,val in sorted_metadata.items():
> + if prev_val:
> + percent_diff[ctrl] = val/prev_val
> +
> + prev_val = val
> +
> + # Sort those percentage differences, highest to lowest, so we can find
> + # the appropriate break points to separate the metadata by
> + sorted_diffs = dict(sorted(percent_diff.items(),
> + key=lambda item: item[1], reverse=True))
> + # This the list of ctrls ordered by value, lowest to highest
> + sorted_ctrl = sorted(sorted_metadata, key=sorted_metadata.get)
> +
> + # Add the correct number breaks in, starting with the break that
> + # separates the greatest distance
> + i = 0
> + for ctrl, _ in sorted_diffs.items():
> + i += 1
> + if i >= number_of_graphs:
> + break
> +
> + sorted_ctrl.insert(sorted_ctrl.index(ctrl), "~BREAK~")
> +
> + # Put the ctrls with their correct axis group, incrementing group when
> + # a break is found.
> + group = 0
> + for ctrl in sorted_ctrl:
> + if ctrl == "~BREAK~":
> + group += 1
> + else:
> + self.axis_group[ctrl] = self.axis[group]
> +
> + def __animate(self):
> + for ctrl, series in self.metadata_buffer.items():
> + self.lines[ctrl].set_xdata(series[0])
> + self.lines[ctrl].set_ydata(series[1])
> +
> + # Scale and display the legend on the axis
> + for axis in self.axis:
> + axis.relim()
> + axis.legend(loc="upper left")
> + axis.autoscale_view()
> +
> + # Adjust the y scale to be bigger once if manual scaling
> + if axis.get_autoscaley_on() and not self.autoscale_axis:
> + axis_ymin, axis_ymax = axis.get_ybound()
> + axis_yrange = axis_ymax - axis_ymin
> + axis.set_ybound([axis_ymin - axis_yrange * 0.25,
> + axis_ymax + axis_yrange * 0.25])
> +
> + axis.set_autoscaley_on(self.autoscale_axis)
> + axis.set_autoscalex_on(True)
> +
> + self.blit_manager.update()
> +
> + # This BlitManager is derived from: (comments removed and init simplified)
> + # matplotlib.org/devdocs/tutorials/advanced/blitting.html#class-based-example
> + # The BlitManager manages redrawing the graph in a performant way. It also
> + # prompts a full re-draw (axis and legend) whenever the window is resized.
> + class BlitManager:
> + def __init__(self, canvas):
> + self.canvas = canvas
> + self._bg = None
> + self._artists = []
> +
> + self.cid = canvas.mpl_connect("draw_event", self.on_draw)
> +
> + def on_draw(self, event):
> + cv = self.canvas
> + if event is not None:
> + if event.canvas != cv:
> + raise RuntimeError
> + self._bg = cv.copy_from_bbox(cv.figure.bbox)
> + self._draw_animated()
> +
> + def add_artist(self, art):
> + if art.figure != self.canvas.figure:
> + raise RuntimeError
> + art.set_animated(True)
> + self._artists.append(art)
> +
> + def _draw_animated(self):
> + fig = self.canvas.figure
> + for a in self._artists:
> + fig.draw_artist(a)
> +
> + def update(self):
> + cv = self.canvas
> + fig = cv.figure
> + if self._bg is None:
> + self.on_draw(None)
> + else:
> + cv.restore_region(self._bg)
> + self._draw_animated()
> + cv.blit(fig.bbox)
> + cv.flush_events()
> +
> +
> +# This method processes the --metadata_graph arguments into a dictionary. If
> +# an error occurs while doing that (not valid data type or 0 value for refresh)
> +# false will be returned and the main script will exit with an error.
> +# Non-existing arguments are ignored.
> +def process_args(arguments):
> + mdg_args = []
> + for i in arguments:
> + mdg_args += i.split("=")
> + try:
> + args_dict = {
> + "graphs": (3 if "graphs" not in mdg_args else
> + int(mdg_args[mdg_args.index("graphs") + 1])),
> + "buffer": (100 if "buffer" not in mdg_args else
> + int(mdg_args[mdg_args.index("buffer") + 1])),
> + "refresh": (10.0 if "refresh" not in mdg_args else
> + 1/1/float(mdg_args[mdg_args.index("refresh") + 1])),
> + "autoscale": (True if "autoscale" not in mdg_args else
> + (mdg_args[mdg_args.index("autoscale") + 1]).lower() == "true")
> + }
> + return args_dict
> +
> + except (ValueError, ZeroDivisionError):
> + return False
More information about the libcamera-devel
mailing list