class VideoCapture:
def __init__(self, device_id):
- cv.CAP_GSTREAMER
- self.camera = cv.VideoCapture(device_id)
+
+ self.camera = cv.VideoCapture(device_id + cv.CAP_GSTREAMER)
self.camera.set(cv.CAP_PROP_FRAME_WIDTH, 1920.0)
self.camera.set(cv.CAP_PROP_FRAME_HEIGHT, 1080.0)
- self.queue = Queue.Queue()
+ self.queue = queue.Queue()
read_thread = threading.Thread(target=self.reader)
read_thread.daemon = True
read_thread.start()
# read frames as soon as they are available, keeping only most recent one
def reader(self):
while True:
- ret, frame = self.cap.read()
+ ret, frame = self.camera.read()
if not ret:
break
if not self.queue.empty():
pass
self.queue.put(frame)
+ def set(self, a, b):
+ return
+
def read(self):
- return self.queue.get()
+ return None, self.queue.get()
class camera():
def __init__(
self.show_debug = debug
self.dummy = dummy
- cv.CAP_GSTREAMER
-
- self.camera = cv.VideoCapture(device_id)
+ self.camera = VideoCapture(device_id)
- self.camera.set(cv.CAP_PROP_BUFFERSIZE, 1)
+ self.camera.set(cv.CAP_PROP_BUFFERSIZE, 38)
self.camera.set(cv.CAP_PROP_FRAME_WIDTH, 1920.0)
self.camera.set(cv.CAP_PROP_FRAME_HEIGHT, 1080.0)
cv.imshow("display", calibration_image)
cv.waitKey(0)
- capture = self.camera.read()
+ _, capture = self.camera.read()
# detect SIFT keypoints
sift = cv.SIFT_create()
for y in range(0, self.window_height, vingette_compression):
for x in range(0, self.window_size, vingette_compression):
- self.lookup_vingette[v, y, x] = capture[y, x, 2] - v
+ self.lookup_vingette[v // vingette_compression, y // vingette_compression, x // vingette_compression] = capture[y, x, 2] - v
color_compression = 90
- self.lookup_color = np.array((
+ self.lookup_color = np.zeros((
180 // color_compression + 1,
255 // color_compression + 1,
255 // color_compression + 1,
3
- ))
+ ), dtype=np.uint8)
for h in range(0, 180, color_compression):
for s in range(0, 255, color_compression):
image = self.last_display
else:
- image = self.camera.read()
+ _, image = self.camera.read()
self.last_capture = image
if self.homography is not None:
image = cv.warpPerspective(image, self.homography, self.display_size)
image = cv.resize(image, (self.window_size, self.window_height))
if self.lookup_vingette is not None and self.lookup_color is not None:
- for row in image:
- for pixel in row:
- pixel = self.lookup[pixel[0], pixel[1], pixel[2]]
+ pass
self.last_recovered = image
"""
sample_rate, data = wavfile.read("/home/will/Downloads/Adducci - Around the Horn.wav")
-# data = data[...,0]
+#data = data[...,0]
-new_rate = 22050.
+new_rate = 11025.
sample_count = round(len(data) * new_rate / sample_rate)
data = sps.resample(data, sample_count)
sample_rate = int(new_rate)
-window_size = 176
-window_height = 99
+window_size = 192
+window_height = 108
hop_size = window_size // 2
camera = camera(
window_height,
(1840, 1000),
device_id=2,
- debug=False,
- dummy=True
+ debug=True,
+ dummy=False
)
camera.calibrate()
-camera.get_lookup()
-print(camera.lookup_vingette)
-print(camera.lookup_color)
+#camera.get_lookup()
+#print(camera.lookup_vingette)
+#print(camera.lookup_color)
transform = fft(window_size, hop_size)
if segment_index == segment_count: segment_index = 0
slept = 0
- while len(audio) > 2 * segment_samples:
+ while len(audio) > 3 * segment_samples:
time.sleep(0.01)
slept += 1
print(f"slept {slept} times")