rscam = "0.5.5"
npyz = "0.8.4"
rand = "0.9.2"
+nokhwa = {version = "0.10.10", features = ["input-native"]}
+relative-path = "2.0.1"
[build-dependencies]
cc = "1.0"
fn main() {
- println!("cargo::rerun-if-changed=src/perspective.cpp");
+ println!("cargo::rerun-if-changed=src/process.cpp");
+ println!("cargo::rerun-if-changed=src/homography.cpp");
+ println!("cargo::rerun-if-changed=src/color.cpp");
println!("cargo::rustc-env=CXXFLAGS=-Wunused-parameter -lopencv_core -lopencv_highgui -lopencv_xfeatures2d -lopencv_calib3d -lopencv_videoio -lopencv_imgcodecs -lopencv_imgproc -lopencv_features2d");
cc::Build::new()
- .file("src/perspective.cpp")
+ .file("src/process.cpp")
.cpp(true)
.include("/usr/include/opencv4/")
- .compile("perspective.a");
+
+ // definitions cross language
+ .define("WINDOW_SIZE", "128")
+ .define("CHUNK_SIZE", "72")
+ .define("IMAGE_WIDTH", "1920")
+ .define("IMAGE_HEIGHT", "1080")
+ .define("LUT_SIZE", "12")
+
+ .compile("process.a");
println!("cargo::rustc-flags=-lopencv_core -lopencv_highgui -lopencv_xfeatures2d -lopencv_calib3d -lopencv_videoio -lopencv_imgcodecs -lopencv_imgproc -lopencv_features2d");
- println!("cargo:rustc-env=WGPU_BACKEND=gl") // fix show_image bug(?)
+ println!("cargo:rustc-env=WGPU_BACKEND=gl"); // fix show_image bug(?)
+ println!("cargo:rustc-env=WGPU_POWER_PREF=high"); // can switch to low if performance issues
}
--- /dev/null
+import numpy as np
+
+SCALER = (12 - 1) / 255;
+
+def run_color(color, a):
+
+ # at the moment this is how the cpp works too
+ # i think there might actually be something up with the LUT itself
+
+ color = color.astype(np.float64) * SCALER
+
+ floor = np.floor(color).astype(np.uint8)
+ ceil = np.ceil(color).astype(np.uint8)
+ rem = np.remainder(color, 1)
+
+ new_color = a[*floor] + ((a[*ceil] - a[*floor]) * rem)
+ new_color = np.round(new_color).astype(np.uint8)
+
+ print(new_color)
+ quit()
+
+if __name__ == "__main__":
+ a = np.load("./data/cube.npy")
+
+ print(run_color(np.array([79,2,91]), a))
--- /dev/null
+#include "opencv4/opencv2/imgproc.hpp"
+
+using namespace cv;
+
+void ApplyUndistort(uint8_t *camera_ptr, float *xmat_ptr, float *ymat_ptr)
+{
+ Mat xmat (IMAGE_HEIGHT, IMAGE_WIDTH, CV_32F, xmat_ptr);
+ Mat ymat (IMAGE_HEIGHT, IMAGE_WIDTH, CV_32F, ymat_ptr);
+
+ Mat capture(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, camera_ptr);
+ Mat buffer = capture.clone();
+
+ /* This wont work because the Mats have to be GpuMats, since we're getting a
+ * pointer for them, it might be better to move it all over onto the gpu and
+ * then do the warp transform all at the same time.
+ *
+ * This might be a bit messy.
+ *
+ * Also if im writing CUDA code in cpp and then moving between rust and cpp it
+ * might just be easier to make a lot of the color stuff in rust. Question is is
+ * it more efficient to just do it on the CPU or move it over to the GPU and
+ * then do it there...
+ */
+
+ remap(buffer, capture, xmat, ymat, INTER_NEAREST);
+}
--- /dev/null
+#include <cmath>
+#include "opencv4/opencv2/core.hpp"
+#include "opencv4/opencv2/highgui.hpp"
+#include "opencv4/opencv2/xfeatures2d.hpp"
+#include "opencv4/opencv2/calib3d.hpp"
+#include "opencv4/opencv2/imgproc.hpp"
+
+#ifndef LUT_SIZE
+const int LUT_SIZE = 12;
+#endif
+
+using namespace std;
+using namespace cv;
+
+// is this damn well right?!?!
+const float SCALER = ((float)LUT_SIZE - 1.) / 255.;
+
+void ApplyCorrection(uint8_t *buffer_ptr, uint8_t *lut_ptr)
+{
+ try
+ {
+ Mat capture(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, buffer_ptr);
+
+ MatIterator_<Point3_<uint8_t>> it = capture.begin<Point3_<uint8_t>>();
+ MatIterator_<Point3_<uint8_t>> it_end = capture.end<Point3_<uint8_t>>();
+
+ /*
+ * Pretty sure that because we're not actually using opencv for anything
+ * here, other than some itterator stuff that it might actually have lot
+ * of overhead for, we dont actually need to have the pixels read in bgr
+ * format
+ */
+
+ for (; it != it_end; it++) {
+ Point3_<uint8_t> pixel = (*it);
+
+ Point3_<float> scaled (pixel);
+ scaled *= SCALER;
+
+ int r = floor(scaled.z);
+ int g = floor(scaled.y);
+ int b = floor(scaled.x);
+
+ int idx = (((LUT_SIZE * LUT_SIZE) * r) + (LUT_SIZE * g)+ b) * 3;
+
+ uint8_t r_1 = *(lut_ptr + idx + 0);
+ uint8_t g_1 = *(lut_ptr + idx + 1);
+ uint8_t b_1 = *(lut_ptr + idx + 2);
+
+ int offset = 0;
+ if (r < 253) {
+ offset += LUT_SIZE * LUT_SIZE * 3;
+ }
+ if (g < 253) {
+ offset += LUT_SIZE * 3;
+ }
+ if (b < 253) {
+ offset += 3;
+ }
+
+ uint8_t r_2 = *(lut_ptr + idx + offset + 0);
+ uint8_t g_2 = *(lut_ptr + idx + offset + 1);
+ uint8_t b_2 = *(lut_ptr + idx + offset + 2);
+
+ double n;
+ // petty sure we can void stuff with null pointers but it
+ // didnt work when i tried just now
+
+ double r_w = modf(scaled.z, &n);
+ double g_w = modf(scaled.y, &n);
+ double b_w = modf(scaled.x, &n);
+
+ (*it).z = r_1 + (double)((r_2 - r_1) * r_w);
+ (*it).y = g_1 + (double)((g_2 - g_1) * g_w);
+ (*it).x = b_1 + (double)((b_2 - b_1) * b_w);
+ }
+ }
+ catch (const std::exception &e) // handle exceptions for rust
+ {
+ std::cout << "Exception " << e.what() << std::endl;
+ }
+}
+
mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w,h), 5)
dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)
-full_map = np.stack((mapx, mapy))
-np.save("./data/map.npy", full_map)
+np.save("./data/mapx.npy", mapx)
+np.save("./data/mapy.npy", mapy)
# crop the image
x, y, w, h = roi
from lut.graph import show
from lut.test import validate
from lut.cube import Cube
+from lut.config import PASSTHROUGH, CAST_TEST
from numpy import save
import matplotlib.pyplot as plt
show(lut.lut)
# validate error of LUT
- validate(lut)
- save("../data/cube.npy", lut.lut)
+ if PASSTHROUGH and CAST_TEST:
+ validate(lut)
+
+ save("./data/cube.npy", lut.lut)
--- /dev/null
+#include "opencv4/opencv2/xfeatures2d.hpp"
+#include "opencv4/opencv2/calib3d.hpp"
+#include "opencv4/opencv2/imgproc.hpp"
+#include "opencv4/opencv2/imgcodecs.hpp"
+
+using namespace cv;
+using namespace cv::xfeatures2d;
+
+void FuncGetHomography(uint8_t *camera_ptr, double *homography_ptr)
+{
+ try
+ {
+ Mat img1 = imread( samples::findFile("./test/calibration.jpg")/*, IMREAD_GRAYSCALE */);
+ Mat img2(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, camera_ptr);
+
+ // detect keypoints and compute descriptors
+ int minHessian = 400;
+ Ptr<SURF> detector = SURF::create( minHessian );
+
+ std::vector<KeyPoint> keypoints1, keypoints2;
+ Mat descriptors1, descriptors2;
+ detector->detectAndCompute( img1, noArray(), keypoints1, descriptors1 );
+ detector->detectAndCompute( img2, noArray(), keypoints2, descriptors2 );
+
+ // match descriptors
+ Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
+ std::vector< std::vector<DMatch> > knn_matches;
+
+ matcher->knnMatch( descriptors1, descriptors2, knn_matches, 2 );
+
+ // filter matches by the ratio test
+ const float ratio_thresh = 0.7f;
+ std::vector<DMatch> good_matches;
+ for (size_t i = 0; i < knn_matches.size(); i++)
+ {
+ if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
+ {
+ good_matches.push_back(knn_matches[i][0]);
+ }
+ }
+
+ // get the source and destination points
+ std::vector<Point2f> source_points, dst_points;
+ for (size_t i = 0; i < good_matches.size(); i++)
+ {
+ Point2f s_point = keypoints2[good_matches[i].trainIdx].pt;
+ Point2f d_point = keypoints1[good_matches[i].queryIdx].pt;
+ source_points.push_back(s_point);
+ dst_points.push_back(d_point);
+ }
+
+ // perform homography
+ double ransac_thresh = 5.0f;
+ Mat homography = findHomography(source_points, dst_points, RANSAC, ransac_thresh);
+
+ // copy the result to the homography location
+ const double* result_ptr = homography.ptr<double>(0);
+ std::memcpy(homography_ptr, result_ptr, 72); // size of [f64; 9]
+ }
+ catch (const std::exception &e) // handle exceptions for rust
+ {
+ std::cout << "Exception " << e.what() << std::endl;
+ }
+}
+
+void ApplyHomography(uint8_t *camera_ptr, uint8_t *buffer_ptr, double *homography_ptr)
+{
+ Mat capture(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, camera_ptr);
+ Mat buffer(CHUNK_SIZE, WINDOW_SIZE, CV_8UC3, buffer_ptr);
+ Mat homography(3, 3, CV_64F, homography_ptr);
+
+ warpPerspective(capture, capture, homography, capture.size());
+ resize(capture, buffer, buffer.size());
+}
+
--- /dev/null
+use nokhwa::Camera;
+use nokhwa::utils::{RequestedFormat, CameraIndex};
+use nokhwa::pixel_format::RgbFormat;
+
+use rustfft::num_complex::Complex;
+
+use npyz::NpyFile;
+
+use std::io::BufReader;
+use std::fs::File;
+
+use crate::{
+ GetHomography,
+ ProcessCapture,
+ LUT_LENGTH,
+ SPECTOGRAM_AREA,
+ IMAGE_AREA,
+ VOLUME_MIN,
+ VOLUME_REL,
+ AMPLITUDE_REL,
+ AMPLITUDE_MIN,
+ ANGLE_REL,
+ ANGLE_MIN
+};
+
+pub struct ImageArray {
+ pub data: Vec<u8>,
+ homography: [f64; 9],
+ lut: [u8; LUT_LENGTH],
+ camera_buffer: Vec<u8>,
+ camera: Camera,
+ chunks: usize
+}
+
+impl ImageArray {
+ pub fn new (homography: [f64; 9]) -> Self {
+
+ // digest the numpy array and setup lut
+ let file = BufReader::new(File::open("./data/cube.npy").unwrap());
+ let npy = NpyFile::new(file).unwrap();
+
+ // this is a little silly i should handle these things
+ let lut: [u8; LUT_LENGTH] = npy.into_vec().unwrap().try_into().unwrap();
+
+ // setup the camera
+ let index = CameraIndex::Index(0);
+ let requested = RequestedFormat::new::<RgbFormat>(
+ nokhwa::utils::RequestedFormatType::AbsoluteHighestFrameRate
+ );
+
+ let mut camera = Camera::new(index, requested).unwrap();
+
+ camera.set_resolution(
+ nokhwa::utils::Resolution { width_x: 1920, height_y: 1080}
+ ).expect("Resolution problem!");
+
+ // self
+ Self {
+ data: vec![0u8; SPECTOGRAM_AREA * 3],
+ homography,
+ lut,
+ camera_buffer: vec![0u8; IMAGE_AREA],
+ camera,
+ chunks: SPECTOGRAM_AREA
+ }
+ }
+
+ pub fn from_camera (&mut self) {
+ let frame = self.camera.frame().unwrap();
+
+ println!("{}", frame.resolution());
+
+ self.camera_buffer = frame.buffer()[..].try_into().expect("Image is wrong size");
+
+ unsafe {
+ ProcessCapture (
+ self.camera_buffer.as_ptr() as usize,
+ self.data.as_ptr() as usize,
+ self.homography.as_ptr() as usize,
+ self.lut.as_ptr() as usize
+ );
+ }
+ }
+
+ pub fn calibrate (&mut self) {
+ // hopefully dont need this !
+ // for _i in 0..10 {
+ // self.camera_buffer = self.camera.capture().unwrap()[..].try_into().expect("Image is wrong size"); //delete assignment
+ // }
+
+ // enter unsafe and get the homography array
+ unsafe {
+ GetHomography(self.camera_buffer.as_ptr() as usize, self.homography.as_ptr() as usize);
+ }
+ }
+
+ pub fn from_buffer (&mut self, buffer: &Vec<Complex<f32>>) -> () {
+ let mut r: f32;
+ let mut theta: f32;
+ let mut amplitude: f32;
+
+ let mut hue: f32;
+ let mut angle: f32;
+
+ let mut d:f32;
+ let mut s:f32;
+ let mut v:f32;
+ let mut c:f32;
+ let mut m:f32;
+ let mut x:f32;
+ let mut g:f32;
+ let mut b:f32;
+
+ for i in 0..self.chunks {
+ (r, theta) = buffer[i].to_polar();
+
+ // make linear and normalize
+ amplitude = 20f32 * r.log10();
+ amplitude = ((amplitude - VOLUME_MIN) / (VOLUME_REL / AMPLITUDE_REL)) + AMPLITUDE_MIN;
+
+ hue = (180f32 / 255f32) * amplitude;
+
+ angle = (theta.to_degrees() + 180f32) * (ANGLE_REL / 360f32) + ANGLE_MIN;
+
+ d = hue * (1f32 / 30f32);
+ s = angle / 255f32;
+ v = amplitude / 255f32;
+
+ c = s * v;
+ m = v - c;
+ x = c * (1f32 - (d.rem_euclid(2f32) - 1f32).abs());
+
+ (r, g, b) = match d.floor() {
+ 0.0 => (c, x, 0f32),
+ 1.0 => (x, c, 0f32),
+ 2.0 => (0f32, c, x),
+ 3.0 => (0f32, x, c),
+ 4.0 => (x, 0f32, c),
+ _ => (c, 0f32, x)
+ };
+
+ self.data[i*3] = ((r + m) * 255f32) as u8;
+ self.data[i*3+1] = ((g + m) * 255f32) as u8;
+ self.data[i*3+2] = ((b + m) * 255f32) as u8;
+ }
+ }
+
+ pub fn to_buffer (&mut self, buffer: &mut Vec<Complex<f32>>) -> () {
+ let mut r: f32;
+ let mut amplitude: f32;
+
+ let mut angle: f32;
+
+ let mut s:f32;
+ let mut v:f32;
+ let mut c:f32;
+ let mut g:f32;
+ let mut b:f32;
+
+ for i in 0..self.chunks {
+ r = self.data[i*3] as f32;
+ g = self.data[i*3+1] as f32;
+ b = self.data[i*3+2] as f32;
+
+ v = r.max(g).max(b);
+ c = (v - r.min(g).min(b)) * 255f32;
+ s = if v == 0f32 { 0f32 } else { c / v };
+
+ amplitude = (v - AMPLITUDE_MIN) * (VOLUME_REL / AMPLITUDE_REL) + VOLUME_MIN;
+
+ amplitude = 10f32.powf(amplitude / 20f32);
+
+ angle = (s - ANGLE_MIN) / (ANGLE_REL / 360f32) - 180f32;
+ angle = angle.to_radians();
+
+ buffer[i] = Complex::from_polar(amplitude, angle);
+ }
+ }
+}
-from .config import IMAGE_WIDTH, IMAGE_HEIGHT, CAP_WAIT
+from .config import IMAGE_WIDTH, IMAGE_HEIGHT, CAP_WAIT, PASSTHROUGH
import numpy as np
import cv2 as cv
self.camera = cv.VideoCapture(device)
self.homography = None
- #self.calibrate()
+ if not PASSTHROUGH: self.calibrate()
# get image from camera and fix perspective
def get(self, image):
cv.imshow("LUT Calibration", small)
cv.waitKey(CAP_WAIT)
- #_, capture = self.camera.read()
- capture = image
+ if PASSTHROUGH:
+ return image
+
+ _, capture = self.camera.read()
if self.homography is not None:
capture = cv.warpPerspective(
(IMAGE_WIDTH, IMAGE_HEIGHT)
)
- return image
return capture
# standard calibration function
# how long to wait before capturing image (1 minimum)
CAP_WAIT = 1
+
+# whether to use the camera or not
+PASSTHROUGH = True
+CAST_TEST = False
-from .config import QR_SIZE, QR_PADDING, IMAGE_WIDTH, IMAGE_HEIGHT
+from .config import QR_SIZE, QR_PADDING, IMAGE_WIDTH, IMAGE_HEIGHT, CAST_TEST
from pyzbar.pyzbar import decode
import numpy as np
qr_image = np.repeat(qr_image[:, :, np.newaxis], 3, axis=2)
qr_image = cv.resize(qr_image, (QR_SIZE,QR_SIZE), interpolation=cv.INTER_NEAREST)
- color = cast(color)
+ if CAST_TEST:
+ color = cast(color)
# create color image of correct shape
c_image = np.array([[color[::-1]]], dtype=np.uint8)
use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
use std::sync::{Arc, Mutex};
use std::sync::mpsc;
-use std::sync::mpsc::Sender;
-use rscam::{Camera, Config};
use image::ImageReader;
+use relative_path::RelativePath;
-mod correct;
+mod image_array;
+mod sample_buffer;
-use crate::correct::Correcter;
-use crate::correct::_cast;
+use image_array::ImageArray;
+use sample_buffer::SampleBuffer;
const WINDOW_SIZE: usize = 128;
const CHUNK_SIZE: usize = 72;
const IMAGE_WIDTH: usize = 1920;
const IMAGE_HEIGHT: usize = 1080;
const IMAGE_AREA: usize = IMAGE_WIDTH * IMAGE_HEIGHT * 3;
-const FPS: usize = 30;
+// const FPS: usize = 30;
// maximum and minimum pixel values of angle and amplitude. could be confined to
// improve performance for quiet sounds.
const VOLUME_MIN: f32 = -40.0;
const VOLUME_REL: f32 = VOLUME_MAX - VOLUME_MIN;
+const LUT_SIZE: usize = 12;
+const LUT_LENGTH: usize = LUT_SIZE * LUT_SIZE * LUT_SIZE * 3;
+
const DEBUG_MODE: bool = true;
+const CALIBRATION_PATH: &str = "./test/calibration.jpg";
+const AUDIO_PATH: &str = "/home/will/Downloads/Adducci - Around the Horn.wav";
+
extern "C" {
fn GetHomography(camera_ptr: usize, homography_ptr: usize);
- fn ApplyHomography(camera_ptr: usize, buffer_ptr: usize, homography_ptr: usize);
-}
-
-struct ImageArray {
- data: Vec<u8>,
- homography: [f64; 9],
- corrector: Correcter,
- camera_buffer: Vec<u8>,
- camera: Camera,
- chunks: usize
-}
-
-impl ImageArray {
- fn new (homography: [f64; 9]) -> Self {
- let mut array = Self {
- data: vec![0u8; SPECTOGRAM_AREA * 3],
- homography,
- corrector: Correcter::new("./cube.npy"),
-
- camera_buffer: vec![0u8; IMAGE_AREA],
- camera: Camera::new("/dev/v4l/by-id/usb-MACROSILICON_USB_Video-video-index0").unwrap(), // should be video2
-
- chunks: SPECTOGRAM_AREA
- };
-
- array.camera.start(&Config {
- interval: (1, FPS as u32),
- resolution: (IMAGE_WIDTH as u32, IMAGE_HEIGHT as u32),
- format: b"RGB3",
- ..Default::default()
- }).unwrap();
- array
- }
-
- fn from_camera (&mut self) {
- self.camera_buffer = self.camera.capture().unwrap()[..].try_into().expect("Image is wrong size");
-
- unsafe{ApplyHomography (self.camera_buffer.as_ptr() as usize, self.data.as_ptr() as usize, self.homography.as_ptr() as usize);}
-
- for i in 0..(self.data.len()-1)/3 {
- let mut r = self.data[i*3];
- let mut g = self.data[(i*3)+1];
- let mut b = self.data[(i*3)+3];
-
- [r, g, b] = self.corrector.correct(r, g, b);
-
- self.data[i*3] = r;
- self.data[(i*3)+1] = g;
- self.data[(i*3)+3] = b;
- }
- }
-
- fn calibrate (&mut self) {
- // rscam gives and empty image if its not prompted a couple times
- for _i in 0..10 {
- self.camera_buffer = self.camera.capture().unwrap()[..].try_into().expect("Image is wrong size"); //delete assignment
-
- }
-
- // enter unsafe and get the homography array
- unsafe {
- GetHomography(self.camera_buffer.as_ptr() as usize, self.homography.as_ptr() as usize);
- }
- }
-
- fn from_buffer (&mut self, buffer: &Vec<Complex<f32>>) -> () {
- let mut r: f32;
- let mut theta: f32;
- let mut amplitude: f32;
-
- let mut hue: f32;
- let mut angle: f32;
-
- let mut d:f32;
- let mut s:f32;
- let mut v:f32;
- let mut c:f32;
- let mut m:f32;
- let mut x:f32;
- let mut g:f32;
- let mut b:f32;
-
- for i in 0..self.chunks {
- (r, theta) = buffer[i].to_polar();
-
- // make linear and normalize
- amplitude = 20f32 * r.log10();
- amplitude = ((amplitude - VOLUME_MIN) / (VOLUME_REL / AMPLITUDE_REL)) + AMPLITUDE_MIN;
-
- hue = (180f32 / 255f32) * amplitude;
-
- angle = (theta.to_degrees() + 180f32) * (ANGLE_REL / 360f32) + ANGLE_MIN;
-
- d = hue * (1f32 / 30f32);
- s = angle / 255f32;
- v = amplitude / 255f32;
-
- c = s * v;
- m = v - c;
- x = c * (1f32 - (d.rem_euclid(2f32) - 1f32).abs());
-
- (r, g, b) = match d.floor() {
- 0.0 => (c, x, 0f32),
- 1.0 => (x, c, 0f32),
- 2.0 => (0f32, c, x),
- 3.0 => (0f32, x, c),
- 4.0 => (x, 0f32, c),
- _ => (c, 0f32, x)
- };
-
- self.data[i*3] = ((r + m) * 255f32) as u8;
- self.data[i*3+1] = ((g + m) * 255f32) as u8;
- self.data[i*3+2] = ((b + m) * 255f32) as u8;
- }
- }
-
- fn to_buffer (&mut self, buffer: &mut Vec<Complex<f32>>) -> () {
- let mut r: f32;
- let mut amplitude: f32;
-
- let mut angle: f32;
-
- let mut s:f32;
- let mut v:f32;
- let mut c:f32;
- let mut g:f32;
- let mut b:f32;
-
- for i in 0..self.chunks {
- r = self.data[i*3] as f32;
- g = self.data[i*3+1] as f32;
- b = self.data[i*3+2] as f32;
-
- v = r.max(g).max(b);
- c = (v - r.min(g).min(b)) * 255f32;
- s = if v == 0f32 { 0f32 } else { c / v };
-
- amplitude = (v - AMPLITUDE_MIN) * (VOLUME_REL / AMPLITUDE_REL) + VOLUME_MIN;
-
- amplitude = 10f32.powf(amplitude / 20f32);
-
- angle = (s - ANGLE_MIN) / (ANGLE_REL / 360f32) - 180f32;
- angle = angle.to_radians();
-
- buffer[i] = Complex::from_polar(amplitude, angle);
- }
- }
-}
-
-struct SampleBuffer {
- buffer: Arc<Mutex<[i16; 2 * SPECTOGRAM_AREA]>>,
- index: usize,
- tx: Sender<bool>
-}
-
-impl SampleBuffer {
- fn new(buffer: Arc<Mutex<[i16; 2 * SPECTOGRAM_AREA]>>, tx: Sender<bool>) -> Self {
- Self {
- buffer,
- index: 0,
- tx
- }
- }
-
- fn get_data(&mut self, data: &mut [i16]) {
- let mut buffer = self.buffer.lock().unwrap();
- let length = data.len()/2;
-
- for i in 0..length {
- data[i*2] = buffer[i + self.index];
- }
-
- self.index += length;
- if self.index > SPECTOGRAM_AREA {
- for i in 0..SPECTOGRAM_AREA {
- buffer[i] = buffer[i + SPECTOGRAM_AREA];
- }
- self.index -= SPECTOGRAM_AREA;
-
- let _ = self.tx.send(true);
- }
- }
+ fn ProcessCapture(camera_ptr: usize, buffer_ptr: usize, homography_ptr: usize, lut_ptr: usize);
}
#[show_image::main]
// create the debug window
let debug_window = create_window("Debug", Default::default())?;
+ // get calibration image path
+ let calibration_path = RelativePath::new(CALIBRATION_PATH).as_str();
+
// create window for displaying images and display calibration image
let display_window = create_window("Display", Default::default())?;
- let calibration_image = ImageReader::open("src/calibration.jpg")?.decode()?;
+ let calibration_image = ImageReader::open(calibration_path)?.decode()?;
display_window.set_image("Display", calibration_image)?;
// calibrate camera
image_array.calibrate();
+ // get audio path
+ let audio_path = RelativePath::new(AUDIO_PATH).as_str();
+
// open audio file
- let mut reader = hound::WavReader::open("/home/will/Downloads/asq.wav").unwrap();
+ let mut reader = hound::WavReader::open(audio_path).unwrap();
let file_rate = reader.spec().sample_rate;
// setup audio output and build output stream
+++ /dev/null
-// g++ ./perspective.cpp -I/usr/share/include/opencv4/ -lopencv_core -lopencv_calib3d -lopencv_highgui -lopencv_xfeatures2d -lopencv_features2d -lopencv_imgproc -lopencv_videoio -lopencv_imgcodecs -lopencv_features2d -o perspective.a
-//
-#include "opencv4/opencv2/core.hpp"
-#include "opencv4/opencv2/highgui.hpp"
-#include "opencv4/opencv2/xfeatures2d.hpp"
-#include "opencv4/opencv2/calib3d.hpp"
-#include "opencv4/opencv2/imgproc.hpp"
-
-#ifdef CUDA
-#include "opencv4/opencv2/cudawarping.hpp"
-#endif
-
-using namespace cv;
-using namespace cv::xfeatures2d;
-
-const size_t WINDOW_SIZE = 128;
-const size_t CHUNK_SIZE = 72;
-const size_t SPECTOGRAM_AREA = WINDOW_SIZE * CHUNK_SIZE;
-const int FLAT_AREA = SPECTOGRAM_AREA * 3;
-const Size DSIZE_AREA = Size(WINDOW_SIZE, CHUNK_SIZE);
-
-const size_t IMAGE_WIDTH = 1920;
-const size_t IMAGE_HEIGHT = 1080;
-
-extern "C"
-{
- void GetHomography(uint8_t *camera_ptr, double *homography_ptr)
- {
- try
- {
- Mat img1 = imread( samples::findFile("src/calibration.jpg")/*, IMREAD_GRAYSCALE */);
- Mat img2(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, camera_ptr);
-
- // detect keypoints and compute descriptors
- int minHessian = 400;
- Ptr<SURF> detector = SURF::create( minHessian );
-
- std::vector<KeyPoint> keypoints1, keypoints2;
- Mat descriptors1, descriptors2;
- detector->detectAndCompute( img1, noArray(), keypoints1, descriptors1 );
- detector->detectAndCompute( img2, noArray(), keypoints2, descriptors2 );
-
- // match descriptors
- Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
- std::vector< std::vector<DMatch> > knn_matches;
-
- matcher->knnMatch( descriptors1, descriptors2, knn_matches, 2 );
-
- // filter matches by the ratio test
- const float ratio_thresh = 0.7f;
- std::vector<DMatch> good_matches;
- for (size_t i = 0; i < knn_matches.size(); i++)
- {
- if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
- {
- good_matches.push_back(knn_matches[i][0]);
- }
- }
-
- // get the source and destination points
- std::vector<Point2f> source_points, dst_points;
- for (size_t i = 0; i < good_matches.size(); i++)
- {
- Point2f s_point = keypoints2[good_matches[i].trainIdx].pt;
- Point2f d_point = keypoints1[good_matches[i].queryIdx].pt;
- source_points.push_back(s_point);
- dst_points.push_back(d_point);
- }
-
- // perform homography
- double ransac_thresh = 5.0f;
- Mat homography = findHomography(source_points, dst_points, RANSAC, ransac_thresh);
-
- // copy the result to the homography location
- const double* result_ptr = homography.ptr<double>(0);
- std::memcpy(homography_ptr, result_ptr, 72); // size of [f64; 9]
- }
- catch (const std::exception &e) // handle exceptions for rust
- {
- std::cout << "Exception " << e.what() << std::endl;
- }
- }
-
- void ApplyHomography(uint8_t *camera_ptr, uint8_t *buffer_ptr, double *homography_ptr)
- {
- Mat capture(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, camera_ptr);
- Mat buffer(CHUNK_SIZE, WINDOW_SIZE, CV_8UC3, buffer_ptr);
- Mat homography(3, 3, CV_64F, homography_ptr);
-
- warpPerspective(capture, capture, homography, capture.size());
- resize(capture, buffer, buffer.size());
- }
-
- void ApplyUndistort(uint8_t *camera_ptr, float *xmat_ptr, float *ymat_ptr)
- {
- Mat xmat (IMAGE_HEIGHT, IMAGE_WIDTH, CV_32F, xmat_ptr);
- Mat ymat (IMAGE_HEIGHT, IMAGE_WIDTH, CV_32F, ymat_ptr);
-
- Mat capture(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, camera_ptr);
- Mat buffer = capture.clone();
-
-/* This wont work because the Mats have to be GpuMats, since we're getting a
- * pointer for them, it might be better to move it all over onto the gpu and
- * then do the warp transform all at the same time.
- *
- * This might be a bit messy.
- *
- * Also if im writing CUDA code in cpp and then moving between rust and cpp it
- * might just be easier to make a lot of the color stuff in rust. Question is is
- * it more efficient to just do it on the CPU or move it over to the GPU and
- * then do it there...
- */
-
-#ifdef CUDA
- cv::cuda::remap(buffer, capture, xmat, ymat, INTER_NEAREST);
-#else
- remap(buffer, capture, xmat, ymat, INTER_NEAREST);
-#endif
- }
-}
-
--- /dev/null
+#include "barrel.cpp"
+#include "homography.cpp"
+#include "color.cpp"
+
+extern "C"
+{
+ void ProcessCapture(uint8_t *camera_ptr, uint8_t *buffer_ptr, double *homography_ptr, uint8_t *lut_ptr)
+ {
+ /*
+ * Heres the plan:
+ *
+ * We take the image from the camera buffer
+ * Apply the homography to undistort it
+ * Apply the LUT to color correct it
+ * do the FFT to turn it into audio
+ *
+ * This is a lot of new code to write rather than just refactoring old
+ * code. But it will allow us to talk to the GPU a lot better keep
+ * things in one place more.
+ *
+ * It also means that we can compile with explicit cuda support without
+ * even thinking about it in the rust.
+ */
+
+ ApplyHomography(camera_ptr, buffer_ptr, homography_ptr);
+ //ApplyCorrection(buffer_ptr, lut_ptr);
+ }
+
+ // get homography function (see "homography.cpp")
+ void GetHomography(uint8_t *camera_ptr, double *homography_ptr)
+ {
+ FuncGetHomography(camera_ptr, homography_ptr);
+ }
+}
--- /dev/null
+use std::sync::mpsc::Sender;
+use std::sync::{Arc, Mutex};
+
+use crate::SPECTOGRAM_AREA;
+
+pub struct SampleBuffer {
+ buffer: Arc<Mutex<[i16; 2 * SPECTOGRAM_AREA]>>,
+ index: usize,
+ tx: Sender<bool>
+}
+
+impl SampleBuffer {
+ pub fn new(buffer: Arc<Mutex<[i16; 2 * SPECTOGRAM_AREA]>>, tx: Sender<bool>) -> Self {
+ Self {
+ buffer,
+ index: 0,
+ tx
+ }
+ }
+
+ pub fn get_data(&mut self, data: &mut [i16]) {
+ let mut buffer = self.buffer.lock().unwrap();
+ let length = data.len()/2;
+
+ for i in 0..length {
+ data[i*2] = buffer[i + self.index];
+ }
+
+ self.index += length;
+ if self.index > SPECTOGRAM_AREA {
+ for i in 0..SPECTOGRAM_AREA {
+ buffer[i] = buffer[i + SPECTOGRAM_AREA];
+ }
+ self.index -= SPECTOGRAM_AREA;
+
+ let _ = self.tx.send(true);
+ }
+ }
+}
todo:
-- make calibrator for casts on the image
- - could do this with HSV, i suspect that if we graph changes in HS across
- the surface of the image, we would see the casts, if they collate with each
- other then the new "LUT" is x,y,H. this should be simple to build into the
- current LUT system
-
- test the lut system in-situ, graph for optimal
-- make the LUT corrector (rust) more efficient (multithreading?)
- possibly see if a version can be made that takes mic input
- implement recording for testing
- implement image display in unused channel
- write cpp code for using cuFFT (not supported by rust-cuda)
- potentially write rust-cuda kernel for the color conversion
+
+QUICKLY:
+- make functional with windows
+ - some things that might not be windows compatible:
+ - show-image (YES but might need to mess with the backend)
+ - hound for reading the wav files (YES)
+ - cpal for the audio out (YES look into asio extra feature for lower latency)
+ - rscam for the camera (NO drop in for nokhwa) (IMPLEMENTED)
+ - the file paths (IMPLEMENTED)
+- evaluate if cuda is neccicary
+
+LIBRARIES:
+- FFTW (https://www.fftw.org/fftw3_doc/) for the c++
+- nokhwa (IMPLEMENTED)
+- fon
+- rayon
+
+LATENCY:
+- cpal asio
+- reduce the image size
+- gpu support for the c++ code
+
+GPU PERFORMANCE ISSUES:
+- bring down show_image env flag
+
+CAPTURE ISSUES:
+- check nokhwa fps, res, etc
+- reinstate the read loop