]> OzVa Git service - rust_fft/commitdiff
moved python proj to better place
authorMax Value <greenwoodw50@gmail.com>
Sat, 25 Oct 2025 00:25:24 +0000 (01:25 +0100)
committerMax Value <greenwoodw50@gmail.com>
Sat, 25 Oct 2025 00:25:24 +0000 (01:25 +0100)
15 files changed:
cube/__init__.py [new file with mode: 0644]
cube/camera.py [new file with mode: 0644]
cube/config.py [new file with mode: 0644]
cube/cube.py [new file with mode: 0755]
cube/frame.py [new file with mode: 0644]
cube/graph.py [new file with mode: 0755]
cube/test.py [new file with mode: 0755]
make.py [new file with mode: 0755]
requirements.txt [new file with mode: 0644]
src/cube/camera.py [deleted file]
src/cube/cube.py [deleted file]
src/cube/frame.py [deleted file]
src/cube/graph.py [deleted file]
src/cube/main.py [deleted file]
src/cube/test.py [deleted file]

diff --git a/cube/__init__.py b/cube/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cube/camera.py b/cube/camera.py
new file mode 100644 (file)
index 0000000..f46caff
--- /dev/null
@@ -0,0 +1,59 @@
+from .config import IMAGE_WIDTH, IMAGE_HEIGHT
+
+import numpy as np
+import cv2 as cv
+
+CAP_WAIT = 1
+
+class Camera():
+       def __init__(self, device):
+               cv.namedWindow("LUT Calibration", cv.WINDOW_GUI_NORMAL)
+
+               self.camera = cv.VideoCapture(device)
+               self.homography = None
+
+               #self.calibrate()
+
+       def get(self, image):
+               cv.imshow("LUT Calibration", image)
+               cv.waitKey(CAP_WAIT)
+
+               #_, capture = self.camera.read()
+               #capture = cv.warpPerspective(capture, self.homography, (IMAGE_WIDTH, IMAGE_HEIGHT))
+
+               return image
+               return capture
+
+       def calibrate(self):
+               calibration_image = cv.imread("../calibration.jpg")
+
+               # remove toolbar from named calibration window
+               cv.imshow("LUT Calibration", calibration_image)
+               cv.waitKey(0)
+
+               _, capture = self.camera.read()
+
+               sift = cv.SIFT_create()
+               kp1, des1 = sift.detectAndCompute(calibration_image, None)
+               kp2, des2 = sift.detectAndCompute(capture, None)
+
+               # get good matches between calibration image and the captured image
+               flann = cv.FlannBasedMatcher(
+                               {"algorithm": 1, "trees": 5},
+                               {"checks": 50}
+               )
+               matches = flann.knnMatch(des1, des2, k=2)
+
+               #get good matches via ratio test
+               good = []
+               for m,n in matches:
+                               if m.distance < 0.7 * n.distance:
+                                               good.append(m)
+
+               if len(good) > 10:
+                               src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
+                               dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
+                               self.homography, _ = cv.findHomography(dst_pts, src_pts, cv.RANSAC, 5.0)
+
+               else:
+                               raise Exception("Calibration failed")
diff --git a/cube/config.py b/cube/config.py
new file mode 100644 (file)
index 0000000..a66128e
--- /dev/null
@@ -0,0 +1,7 @@
+LUT_SIZE = 12
+
+IMAGE_WIDTH = 640#1920
+IMAGE_HEIGHT = 360#1080
+
+QR_SIZE = 100
+QR_PADDING = 10
diff --git a/cube/cube.py b/cube/cube.py
new file mode 100755 (executable)
index 0000000..b89256a
--- /dev/null
@@ -0,0 +1,103 @@
+#!.venv/bin/python
+
+from .frame import blank, generate, result
+from .config import LUT_SIZE
+
+from scipy.interpolate import make_interp_spline
+from tqdm import tqdm
+import numpy as np
+import cv2 as cv
+import time
+
+WAIT_TIME = 0.1
+WAIT_TICKS = 100
+
+class Cube():
+       def __init__(self, camera):
+               self.camera = camera
+               self.size = LUT_SIZE
+               self.lut = np.zeros((LUT_SIZE, LUT_SIZE, LUT_SIZE, 3), dtype=np.uint8)
+               self.s = 255 / (self.size-1)
+
+               print(f"""
+creating LUT...
+
+size:\t{self.size}
+scaler:\t{self.s}
+colors:\t{self.size**3}
+               """)
+
+       def process(self):
+               seq = [i * self.s for i in range(0,self.size)]
+               for r in tqdm(seq):
+                       for g in seq:
+                               for b in seq:
+                                       self.check(color=[r, g, b])
+
+               for _ in range(WAIT_TICKS):
+                       pass#self.check()
+                       #time.sleep(WAIT_TIME)
+
+               self.invert()
+
+       def invert(self):
+
+               print("inverting LUT...")
+
+               def iter_channels(a):
+                       r = self.lut[:,:,:,0]
+                       g = self.lut[:,:,:,1]
+                       b = self.lut[:,:,:,2]
+
+                       r = resample_channel(np.rot90(r, 1, (0,2)))
+                       g = resample_channel(np.rot90(g, 1, (1,2)))
+
+                       r = np.rot90(r, -1, (0,2))
+                       g = np.rot90(g, -1, (1,2))
+
+                       b = resample_channel(b)
+
+                       return np.stack((r, g, b), axis=-1)
+
+               def resample_channel(c):
+                       c = np.reshape(c, (self.size * self.size, self.size))
+
+                       for i in range(self.size * self.size):
+                               seq = np.linspace(0, 255, self.size)
+
+                               """
+                                       This is the section that does all the heavy lifting by reinterpolating the curves
+                                       at the right ordinates. scipy b splines should work better but might introduce
+                                       strange loops in weird color situations.
+                               """
+
+                               spl = make_interp_spline(c[i], seq)
+                               c[i] = spl(seq)
+
+                               # Alternative np splines
+
+                               #c[i] = np.interp(seq, c[i], seq)
+
+                       c = np.reshape(c, (self.size, self.size, self.size))
+                       return c
+
+               self.lut = iter_channels(self.lut)
+
+       def check(self, color=None):
+
+               if color is None:
+                       image = blank()
+               else:
+                       image = generate(color)
+
+               capture = self.camera.get(image)
+
+               data, new = result(capture)
+
+               if data is not None:
+                       data = np.divide(data, self.s).round().astype(np.uint8)
+
+                       self.lut[data[0], data[1], data[2]] = new
+
+
+
diff --git a/cube/frame.py b/cube/frame.py
new file mode 100644 (file)
index 0000000..110bd20
--- /dev/null
@@ -0,0 +1,71 @@
+#!.venv/bin/python
+
+from .config import QR_SIZE, QR_PADDING, IMAGE_WIDTH, IMAGE_HEIGHT
+
+from pyzbar.pyzbar import decode
+import numpy as np
+import cv2 as cv
+import random
+import qrcode
+import os
+
+def cast(a):
+       a = np.array(a, dtype=np.float64)
+       a[...,0] = ((a[...,0] / 255.) ** 2) * 255.
+       a[...,1] = ((a[...,1] / 255.) ** 2) * 255.
+       a[...,2] = ((a[...,2] / 255.) ** 2) * 255.
+       a = np.clip(a.astype(np.uint8), 0, 255)
+
+       return a
+
+def generate(color):
+       # make qr code
+       qr = qrcode.QRCode(
+               version=1,
+               error_correction=qrcode.constants.ERROR_CORRECT_L,
+               border=4,
+       )
+       qr.add_data(" ".join(["{:03d}".format(int(x)) for x in color]))
+       qr.make(fit=True)
+
+       # transform qr into array with correct shape
+       qr_image = np.array(qr.get_matrix())
+       qr_image = np.where(qr_image, 0, 255).astype(np.uint8)
+       qr_image = np.repeat(qr_image[:, :, np.newaxis], 3, axis=2)
+       qr_image = cv.resize(qr_image, (QR_SIZE,QR_SIZE), interpolation=cv.INTER_NEAREST)
+
+       color = cast(color)
+
+       # create color image of correct shape
+       c_image = np.array([[color[::-1]]], dtype=np.uint8)
+       c_image = cv.resize(c_image, (IMAGE_WIDTH, IMAGE_HEIGHT))
+
+       # put qr codes in the corners
+       tl = np.s_[:QR_SIZE,:QR_SIZE]
+       tr = np.s_[:QR_SIZE,-QR_SIZE:]
+       bl = np.s_[-QR_SIZE:,:QR_SIZE]
+       br = np.s_[-QR_SIZE:,-QR_SIZE:]
+
+       c_image[tl] = c_image[tr] = c_image[bl] = c_image[br] = qr_image
+
+       return c_image
+
+def blank():
+       image = np.zeros((IMAGE_HEIGHT,IMAGE_WIDTH,3), dtype=np.uint8)
+       return image
+
+def result(capture):
+       l = QR_SIZE + QR_PADDING
+
+       new = np.mean(capture[l:-l,l:-l], axis=(0,1)).astype(np.uint8)
+
+       codes = decode(capture)
+
+       if codes == []: return None, None
+
+       codes.sort(key=lambda x:x.quality)
+       data = codes[0].data
+       data = [int(x) for x in data.split()]
+       data = np.array(data)
+
+       return data, new[::-1]
diff --git a/cube/graph.py b/cube/graph.py
new file mode 100755 (executable)
index 0000000..79139f0
--- /dev/null
@@ -0,0 +1,49 @@
+#!.venv/bin/python
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+def show(a):
+       size = a.shape[0]
+       fig = plt.figure(figsize=plt.figaspect(1.))
+
+       ax = fig.add_subplot(2, 1, 1)
+
+       b = a.astype(np.float64)
+
+       ax.plot(b[1,1,...,2]+50, c="blue")
+       ax.plot(b[1,...,1,1]+25, c="green")
+       ax.plot(b[...,1,1,0], c="red")
+
+       ax = fig.add_subplot(2, 1, 2, projection='3d')
+
+       xs = []
+       ys = []
+       zs = []
+       cs = []
+
+       # look im going to do this in a lazy way please forgive me
+       for x in range(size):
+               for y in range(size):
+                       for z in range(size):
+                               xs.append(x)
+                               ys.append(y)
+                               zs.append(z)
+
+                               r, g, b = a[x][y][z]
+                               cs.append("#{0:02x}{1:02x}{2:02x}".format(r, g, b))
+
+       ax.scatter(xs, ys, zs, c=cs)
+       ax.set_xlabel("r")
+       ax.set_ylabel("g")
+       ax.set_zlabel("b")
+       plt.show()
+
+def compare(a, b):
+       plt.hist(a.flat, bins=range(100), fc='k', ec='k', color="red")
+       plt.hist(b.flat, bins=range(100), fc='k', ec='k', color="blue")
+       plt.show()
+
+if __name__ == "__main__":
+       a = np.load("../../cube.npy")
+       show(a)
diff --git a/cube/test.py b/cube/test.py
new file mode 100755 (executable)
index 0000000..f76e0e3
--- /dev/null
@@ -0,0 +1,69 @@
+from .frame import cast
+from .graph import compare
+
+import cv2 as cv
+import numpy as np
+import time
+
+def validate(cube):
+       print("testing LUT...")
+
+       image = cv.imread("src/calibration.jpg")
+       height, width, _ = image.shape
+       a = cast(np.flip(image, axis=-1)).astype(np.uint8)
+       casted = cast(np.flip(image, axis=-1)).astype(np.uint8)
+
+       start = time.time()
+
+       a = np.divide(a, cube.s)
+
+       c1 = np.floor(a).astype(np.uint8)
+       c2 = np.ceil(a).astype(np.uint8)
+       rem = np.remainder(a, 1)
+
+       def index_lut(a, i):
+               for ih in range(height):
+                       for iw in range(width):
+                               pos = i[ih,iw]
+                               pos = np.clip(pos, 0, a.shape[0])
+                               i[ih,iw] = a[pos[0], pos[1], pos[2]]
+
+               return i
+
+       c1 = index_lut(cube.lut, c1)
+       c2 = index_lut(cube.lut, c2)
+
+       a = c1 + np.array((c2 - c1) * rem, dtype=np.uint8)
+       a = np.flip(a, axis=-1)
+
+       dur = time.time() - start
+
+       casted = np.flip(casted, axis=-1)
+
+       # do the diff
+
+       diff = np.abs(image.sum(axis=-1, dtype=np.int16) - a.sum(axis=-1, dtype=np.int16))
+       diff = np.clip(diff, 0, 255).astype(np.uint8)
+       diff = np.stack((diff, diff, diff), axis=-1)
+
+       print(f"""
+cast mean:\t\t{np.mean(np.abs(casted - a))}
+
+max error:\t\t{diff.max()}
+mean error:\t\t{np.mean(diff)}
+standard deviation:\t{np.std(diff)}
+
+time taken:\t\t{dur}s
+          """)
+
+       # make the composite image
+
+       left = np.vstack((image, a), dtype=np.uint8)
+       right = np.vstack((casted, diff), dtype=np.uint8)
+
+       composite = np.hstack((left, right))
+
+       composite = cv.resize(composite, (640,360))
+       cv.imshow("LUT Calibration", composite)
+
+       cv.waitKey(0)
diff --git a/make.py b/make.py
new file mode 100755 (executable)
index 0000000..81232a8
--- /dev/null
+++ b/make.py
@@ -0,0 +1,23 @@
+#!.venv/bin/python
+
+from cube.camera import Camera
+from cube.graph import show
+from cube.test import validate
+from cube.cube import Cube
+
+from numpy import save
+import matplotlib.pyplot as plt
+
+if __name__ == "__main__":
+       eye = Camera(0)
+
+       lut = Cube(eye)
+       lut.process()
+
+       show(lut.lut)
+
+       validate(lut)
+
+       save("./cube.npy", lut.lut)
+
+
diff --git a/requirements.txt b/requirements.txt
new file mode 100644 (file)
index 0000000..6b5e457
--- /dev/null
@@ -0,0 +1,7 @@
+matplotlib
+opencv-python
+numpy
+pyzbar
+qrcode
+scipy
+tqdm
diff --git a/src/cube/camera.py b/src/cube/camera.py
deleted file mode 100644 (file)
index bed2a29..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-import numpy as np
-import cv2 as cv
-
-CAP_WAIT = 1
-
-class Camera():
-       def __init__(self, device):
-               cv.namedWindow("LUT Calibration", cv.WINDOW_GUI_NORMAL)
-
-               self.camera = cv.VideoCapture(device)
-               self.homography = None
-
-               #self.calibrate()
-
-       def get(self, image):
-               import main
-
-               cv.imshow("LUT Calibration", image)
-               cv.waitKey(CAP_WAIT)
-
-               #_, capture = self.camera.read()
-               #capture = cv.warpPerspective(capture, self.homography, (main.IMAGE_WIDTH, main.IMAGE_HEIGHT))
-
-               return image
-               return capture
-
-       def calibrate(self):
-               calibration_image = cv.imread("../calibration.jpg")
-
-               # remove toolbar from named calibration window
-               cv.imshow("LUT Calibration", calibration_image)
-               cv.waitKey(0)
-
-               _, capture = self.camera.read()
-
-               sift = cv.SIFT_create()
-               kp1, des1 = sift.detectAndCompute(calibration_image, None)
-               kp2, des2 = sift.detectAndCompute(capture, None)
-
-               # get good matches between calibration image and the captured image
-               flann = cv.FlannBasedMatcher(
-                               {"algorithm": 1, "trees": 5},
-                               {"checks": 50}
-               )
-               matches = flann.knnMatch(des1, des2, k=2)
-
-               #get good matches via ratio test
-               good = []
-               for m,n in matches:
-                               if m.distance < 0.7 * n.distance:
-                                               good.append(m)
-
-               if len(good) > 10:
-                               src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
-                               dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
-                               self.homography, _ = cv.findHomography(dst_pts, src_pts, cv.RANSAC, 5.0)
-
-               else:
-                               raise Exception("Calibration failed")
diff --git a/src/cube/cube.py b/src/cube/cube.py
deleted file mode 100755 (executable)
index dadd661..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-#!.venv/bin/python
-
-from scipy.interpolate import make_interp_spline
-from tqdm import tqdm
-import numpy as np
-import cv2 as cv
-import time
-
-WAIT_TIME = 0.1
-WAIT_TICKS = 100
-
-class Cube():
-       def __init__(self, camera, size):
-               self.camera = camera
-               self.size = size
-               self.lut = np.zeros((size, size, size, 3), dtype=np.uint8)
-               self.s = 255 / (self.size-1)
-
-               print(f"""
-creating LUT...
-
-size:\t{self.size}
-scaler:\t{self.s}
-colors:\t{self.size**3}
-               """)
-
-       def process(self):
-               seq = [i * self.s for i in range(0,self.size)]
-               for r in tqdm(seq):
-                       for g in seq:
-                               for b in seq:
-                                       self.check(color=[r, g, b])
-
-               for _ in range(WAIT_TICKS):
-                       pass#self.check()
-                       #time.sleep(WAIT_TIME)
-
-               self.invert()
-
-       def invert(self):
-
-               print("inverting LUT...")
-
-               def iter_channels(a):
-                       r = self.lut[:,:,:,0]
-                       g = self.lut[:,:,:,1]
-                       b = self.lut[:,:,:,2]
-
-                       r = resample_channel(np.rot90(r, 1, (0,2)))
-                       g = resample_channel(np.rot90(g, 1, (1,2)))
-
-                       r = np.rot90(r, -1, (0,2))
-                       g = np.rot90(g, -1, (1,2))
-
-                       b = resample_channel(b)
-
-                       return np.stack((r, g, b), axis=-1)
-
-               def resample_channel(c):
-                       c = np.reshape(c, (self.size * self.size, self.size))
-
-                       for i in range(self.size * self.size):
-                               seq = np.linspace(0, 255, self.size)
-
-                               """
-                                       This is the section that does all the heavy lifting by reinterpolating the curves
-                                       at the right ordinates. scipy b splines should work better but might introduce
-                                       strange loops in weird color situations.
-                               """
-
-                               spl = make_interp_spline(c[i], seq)
-                               c[i] = spl(seq)
-
-                               # Alternative np splines
-
-                               #c[i] = np.interp(seq, c[i], seq)
-
-                       c = np.reshape(c, (self.size, self.size, self.size))
-                       return c
-
-               self.lut = iter_channels(self.lut)
-
-       def check(self, color=None):
-               from frame import blank, generate, result
-
-               if color is None:
-                       image = blank()
-               else:
-                       image = generate(color)
-
-               capture = self.camera.get(image)
-
-               data, new = result(capture)
-
-               if data is not None:
-                       data = np.divide(data, self.s).round().astype(np.uint8)
-
-                       self.lut[data[0], data[1], data[2]] = new
-
-
-
diff --git a/src/cube/frame.py b/src/cube/frame.py
deleted file mode 100644 (file)
index b8850aa..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#!.venv/bin/python
-
-from pyzbar.pyzbar import decode
-import numpy as np
-import cv2 as cv
-import random
-import qrcode
-import os
-
-def cast(a):
-       a = np.array(a, dtype=np.float64)
-       a[...,0] = ((a[...,0] / 255.) ** 0.4) * 255.
-       a[...,1] = ((a[...,1] / 255.) ** 2) * 255.
-       a[...,2] = ((a[...,2] / 255.) ** 0.3) * 255.
-       a = np.clip(a.astype(np.uint8), 0, 255)
-
-       return a
-
-def generate(color):
-       import main
-
-       # make qr code
-       qr = qrcode.QRCode(
-               version=1,
-               error_correction=qrcode.constants.ERROR_CORRECT_L,
-               border=4,
-       )
-       qr.add_data(" ".join(["{:03d}".format(int(x)) for x in color]))
-       qr.make(fit=True)
-
-       # transform qr into array with correct shape
-       qr_image = np.array(qr.get_matrix())
-       qr_image = np.where(qr_image, 0, 255).astype(np.uint8)
-       qr_image = np.repeat(qr_image[:, :, np.newaxis], 3, axis=2)
-       qr_image = cv.resize(qr_image, (main.QR_SIZE,main.QR_SIZE), interpolation=cv.INTER_NEAREST)
-
-       color = cast(color)
-
-       # create color image of correct shape
-       c_image = np.array([[color[::-1]]], dtype=np.uint8)
-       c_image = cv.resize(c_image, (main.IMAGE_WIDTH, main.IMAGE_HEIGHT))
-
-       # put qr codes in the corners
-       tl = np.s_[:main.QR_SIZE,:main.QR_SIZE]
-       tr = np.s_[:main.QR_SIZE,-main.QR_SIZE:]
-       bl = np.s_[-main.QR_SIZE:,:main.QR_SIZE]
-       br = np.s_[-main.QR_SIZE:,-main.QR_SIZE:]
-
-       c_image[tl] = c_image[tr] = c_image[bl] = c_image[br] = qr_image
-
-       return c_image
-
-def blank():
-       import main
-
-       image = np.zeros((main.IMAGE_HEIGHT,main.IMAGE_WIDTH,3), dtype=np.uint8)
-       return image
-
-def result(capture):
-       import main
-
-       l = main.QR_SIZE + main.QR_PADDING
-
-       new = np.mean(capture[l:-l,l:-l], axis=(0,1)).astype(np.uint8)
-
-       codes = decode(capture)
-
-       if codes == []: return None, None
-
-       codes.sort(key=lambda x:x.quality)
-       data = codes[0].data
-       data = [int(x) for x in data.split()]
-       data = np.array(data)
-
-       return data, new[::-1]
diff --git a/src/cube/graph.py b/src/cube/graph.py
deleted file mode 100755 (executable)
index 79139f0..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#!.venv/bin/python
-
-import matplotlib.pyplot as plt
-import numpy as np
-
-def show(a):
-       size = a.shape[0]
-       fig = plt.figure(figsize=plt.figaspect(1.))
-
-       ax = fig.add_subplot(2, 1, 1)
-
-       b = a.astype(np.float64)
-
-       ax.plot(b[1,1,...,2]+50, c="blue")
-       ax.plot(b[1,...,1,1]+25, c="green")
-       ax.plot(b[...,1,1,0], c="red")
-
-       ax = fig.add_subplot(2, 1, 2, projection='3d')
-
-       xs = []
-       ys = []
-       zs = []
-       cs = []
-
-       # look im going to do this in a lazy way please forgive me
-       for x in range(size):
-               for y in range(size):
-                       for z in range(size):
-                               xs.append(x)
-                               ys.append(y)
-                               zs.append(z)
-
-                               r, g, b = a[x][y][z]
-                               cs.append("#{0:02x}{1:02x}{2:02x}".format(r, g, b))
-
-       ax.scatter(xs, ys, zs, c=cs)
-       ax.set_xlabel("r")
-       ax.set_ylabel("g")
-       ax.set_zlabel("b")
-       plt.show()
-
-def compare(a, b):
-       plt.hist(a.flat, bins=range(100), fc='k', ec='k', color="red")
-       plt.hist(b.flat, bins=range(100), fc='k', ec='k', color="blue")
-       plt.show()
-
-if __name__ == "__main__":
-       a = np.load("../../cube.npy")
-       show(a)
diff --git a/src/cube/main.py b/src/cube/main.py
deleted file mode 100755 (executable)
index 02dccbd..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#!.venv/bin/python
-
-import cube
-from camera import Camera
-from numpy import save
-from graph import show
-from test import validate
-
-import matplotlib.pyplot as plt
-
-LUT_SIZE = 12
-
-IMAGE_WIDTH = 640#1920
-IMAGE_HEIGHT = 360#1080
-
-QR_SIZE = 100
-QR_PADDING = 10
-
-if __name__ == "__main__":
-       eye = Camera(0)
-
-       times = []
-       means = []
-
-       lut = cube.Cube(eye, LUT_SIZE)
-       lut.process()
-       show(lut.lut)
-       validate(lut)
-
-       save("../../cube.npy", lut.lut)
-
-
diff --git a/src/cube/test.py b/src/cube/test.py
deleted file mode 100755 (executable)
index f5d0834..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-from frame import cast
-import cv2 as cv
-import numpy as np
-from graph import compare
-import time
-
-def validate(cube):
-       print("testing LUT...")
-
-       image = cv.imread("../calibration.jpg")
-       height, width, _ = image.shape
-       a = cast(np.flip(image, axis=-1)).astype(np.uint8)
-       casted = cast(np.flip(image, axis=-1)).astype(np.uint8)
-
-       start = time.time()
-
-       a = np.divide(a, cube.s)
-
-       c1 = np.floor(a).astype(np.uint8)
-       c2 = np.ceil(a).astype(np.uint8)
-       rem = np.remainder(a, 1)
-
-       def index_lut(a, i):
-               for ih in range(height):
-                       for iw in range(width):
-                               pos = i[ih,iw]
-                               pos = np.clip(pos, 0, a.shape[0])
-                               i[ih,iw] = a[pos[0], pos[1], pos[2]]
-
-               return i
-
-       c1 = index_lut(cube.lut, c1)
-       c2 = index_lut(cube.lut, c2)
-
-       a = c1 + np.array((c2 - c1) * rem, dtype=np.uint8)
-       a = np.flip(a, axis=-1)
-
-       dur = time.time() - start
-
-       casted = np.flip(casted, axis=-1)
-
-       # do the diff
-
-       diff = np.abs(image.sum(axis=-1, dtype=np.int16) - a.sum(axis=-1, dtype=np.int16))
-       diff = np.clip(diff, 0, 255).astype(np.uint8)
-       diff = np.stack((diff, diff, diff), axis=-1)
-
-       print(f"""
-cast mean:\t\t{np.mean(np.abs(casted - a))}
-
-max error:\t\t{diff.max()}
-mean error:\t\t{np.mean(diff)}
-standard deviation:\t{np.std(diff)}
-
-time taken:\t\t{dur}s
-          """)
-
-       # make the composite image
-
-       left = np.vstack((image, a), dtype=np.uint8)
-       right = np.vstack((casted, diff), dtype=np.uint8)
-
-       composite = np.hstack((left, right))
-
-       composite = cv.resize(composite, (640,360))
-       cv.imshow("LUT Calibration", composite)
-
-       cv.waitKey(0)