My Super8 film scanner

So this is the code I have now:

#!/usr/bin/python3

import cv2
import numpy as np
from picamera2 import Picamera2, Preview
from libcamera import Transform

# Print metadata
def metaprint(m):
	#print(m)
	print(m["ExposureTime"], m["AnalogueGain"], m["DigitalGain"])
	
# Drop [count] number of captures from camera
def drop(count):
	for c in range(count):
		request = picam2.capture_request()
		metaprint(request.get_metadata())
		request.release()

# Capture a specific exposure time from camera
def capture(images, exposure):
	for c in range(10):
		request = picam2.capture_request()
		metadata = request.get_metadata()
		metaprint(metadata)
		if metadata["ExposureTime"] == int(baseExposure*2**exposure):
			if metadata["AnalogueGain"] != 1.0:
				print(f'Warning: AnalogueGain = {metadata["AnalogueGain"]}')
			if metadata["DigitalGain"] != 1.0:
				print(f'Warning: DigitalGain = {metadata["DigitalGain"]}')
			images.append(request.make_array("main"))
			request.release()
			return
		request.release()
	print(f'Error: EV{exposure:+} not found!')
		
# Capture 4 exposures, and merge them using mertens
def capture_mertens(path):
	print(f'Capturing HDR image @{path}')

	# Set exposure to EV-1, and drop 1st capture from camera queue
	picam2.controls.ExposureTime = int(baseExposure*2**-1)
	drop(1)

	# Set exposure to EV-0, and drop 2nd capture from camera queue
	picam2.controls.ExposureTime = int(baseExposure*2**0)
	drop(1)

	# Set exposure to EV+1, and drop 3rd capture from camera queue
	picam2.controls.ExposureTime = int(baseExposure*2**1)
	drop(1)

	# Set exposure to EV+2
	picam2.controls.ExposureTime = int(baseExposure*2**2)
	
	# Capture frames 5,6,7,8
	images = []
	capture(images, -1)
	capture(images, 0)
	capture(images, 1)
	capture(images, 2)

	# Return exposure to EV-1
	picam2.controls.ExposureTime = int(baseExposure*2**-1)

	# Align images
	#alignMTB = cv2.createAlignMTB()
	#alignMTB.process(images, images)

	# Mertens merge all images
	print(f'- mertens merge')
	merge = cv2.createMergeMertens()
	merged = merge.process(images)
	# Normalize the image to 0.0 .. 1.0
	merged = cv2.normalize(merged, None, 0., 1., cv2.NORM_MINMAX)
	# Convert to 8bit
	merged = np.clip(merged * 255, 0, 255).astype(np.uint8)
	cv2.imwrite(f'{path}_result.jpg', merged)
	print(f'- done')

# Initialize camera, tuning and configuration
tuning = Picamera2.load_tuning_file("imx477_scientific.json")
picam2 = Picamera2(tuning=tuning)
capture_config = picam2.create_still_configuration(lores={}, main={"size": (2028, 1520), "format": "RGB888"}, raw={"size": picam2.sensor_resolution}, transform=Transform(hflip=1), buffer_count=2, display="lores")
picam2.configure(capture_config)
picam2.controls.AeEnable = False
picam2.controls.AnalogueGain = 1.0
picam2.controls.ColourGains = (4.35, 1.05) # calibrated red/blue gain (white balance)
picam2.controls.FrameDurationLimits = (10, 100000)
baseExposure = 24000 # calibrated base exposure time

# Start the camera with preview window
#picam2.start_preview(Preview.QTGL)
picam2.start()

# Give the camera some time to settle (drop 10 captures)
picam2.controls.ExposureTime = int(baseExposure*2**-1)
drop(10)

# Your code here; capture the film
for frame in range(10):
	capture_mertens(f'output/film01_frame{frame}')

# Stop the camera
picam2.stop()

The output is:


...

Capturing HDR image @output/film01_frame0
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
24000 1.0 1.0
48000 1.0 1.0
96000 1.0 1.0
- mertens merge
- done
Capturing HDR image @output/film01_frame1
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
12000 1.0 1.0
24000 1.0 1.0
48000 1.0 1.0
96000 1.0 1.0
- mertens merge
- done

...

As you can see I waste 6 captures, and use 4 captures. In total 10 captures = 1 second needed per HDR scan.

Ah nice one! I’ll try to improve the code further. If the looping over the 4 exposures is done in a separate thread, then we can perhaps use ALL captures from the camera.

Thanks, I’ll try this.

2 Likes