From ab3264342b6716f3f14b4407469c762309e9c520 Mon Sep 17 00:00:00 2001 From: Vixea <112600048+Vixea@users.noreply.github.com> Date: Sun, 9 Jul 2023 22:08:35 -0500 Subject: [PATCH 01/28] Linux: Add wayland drm_lease_shim library (#1728) * Linux: Add wayland drm_lease_shim library * Linux: Copy Shim (#1707) * rustfmt --------- Co-authored-by: David Rosca Co-authored-by: Riccardo Zaglia --- Cargo.lock | 2 + alvr/filesystem/src/lib.rs | 4 + alvr/vrcompositor_wrapper/Cargo.toml | 6 + alvr/vrcompositor_wrapper/build.rs | 15 ++ alvr/vrcompositor_wrapper/drm-lease-shim.cpp | 227 +++++++++++++++++++ alvr/vrcompositor_wrapper/src/main.rs | 14 ++ alvr/vulkan_layer/layer/device_api.cpp | 18 ++ alvr/vulkan_layer/layer/device_api.hpp | 9 + alvr/vulkan_layer/layer/layer.cpp | 2 + alvr/xtask/src/build.rs | 5 + 10 files changed, 302 insertions(+) create mode 100644 alvr/vrcompositor_wrapper/build.rs create mode 100644 alvr/vrcompositor_wrapper/drm-lease-shim.cpp diff --git a/Cargo.lock b/Cargo.lock index b5e1cb3a11..fecd1f2ffd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -424,7 +424,9 @@ dependencies = [ name = "alvr_vrcompositor_wrapper" version = "21.0.0-dev00" dependencies = [ + "alvr_filesystem", "exec", + "xshell", ] [[package]] diff --git a/alvr/filesystem/src/lib.rs b/alvr/filesystem/src/lib.rs index 964b20129e..969206c621 100644 --- a/alvr/filesystem/src/lib.rs +++ b/alvr/filesystem/src/lib.rs @@ -255,6 +255,10 @@ impl Layout { self.vrcompositor_wrapper_dir.join("vrcompositor-wrapper") } + pub fn drm_lease_shim(&self) -> PathBuf { + self.vrcompositor_wrapper_dir.join("alvr_drm_lease_shim.so") + } + pub fn vulkan_layer(&self) -> PathBuf { self.libraries_dir.join(dynlib_fname("alvr_vulkan_layer")) } diff --git a/alvr/vrcompositor_wrapper/Cargo.toml b/alvr/vrcompositor_wrapper/Cargo.toml index b274a7a62e..a33774d5e0 100644 --- a/alvr/vrcompositor_wrapper/Cargo.toml +++ b/alvr/vrcompositor_wrapper/Cargo.toml @@ -6,5 +6,11 @@ rust-version.workspace = true authors.workspace = true license.workspace = true +[dependencies] +alvr_filesystem.workspace = true + +[build-dependencies] +xshell = "0.2" + [target.'cfg(target_os = "linux")'.dependencies] exec = "0.3.1" diff --git a/alvr/vrcompositor_wrapper/build.rs b/alvr/vrcompositor_wrapper/build.rs new file mode 100644 index 0000000000..7d67110e76 --- /dev/null +++ b/alvr/vrcompositor_wrapper/build.rs @@ -0,0 +1,15 @@ +#[cfg(target_os = "linux")] +fn main() { + use std::{env, path::PathBuf}; + use xshell::{cmd, Shell}; + + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + let target_dir = out_dir.join("../../.."); + + let sh = Shell::new().unwrap(); + let command = format!("g++ -shared -fPIC $(pkg-config --cflags libdrm) drm-lease-shim.cpp -o {}/alvr_drm_lease_shim.so", target_dir.display()); + cmd!(sh, "bash -c {command}").run().unwrap(); +} + +#[cfg(not(target_os = "linux"))] +fn main() {} diff --git a/alvr/vrcompositor_wrapper/drm-lease-shim.cpp b/alvr/vrcompositor_wrapper/drm-lease-shim.cpp new file mode 100644 index 0000000000..a947ef5cfe --- /dev/null +++ b/alvr/vrcompositor_wrapper/drm-lease-shim.cpp @@ -0,0 +1,227 @@ +#include +#include +#include +#include +#include +#include +#include + +#include + +#define PICOJSON_USE_INT64 +#include "../server/cpp/alvr_server/include/picojson.h" + +#define LOAD_FN(f) \ + if (!real_##f) { \ + real_##f = reinterpret_cast(dlsym(RTLD_NEXT, #f)); \ + if (!real_##f) { \ + ERR("Failed to load %s", #f); \ + abort(); \ + } \ + } \ + +#define LOG(f, ...) printf(f "\n" __VA_OPT__(,) __VA_ARGS__) +#define ERR(f, ...) fprintf(stderr, f "\n" __VA_OPT__(,) __VA_ARGS__) + +template +static constexpr bool compare_ptr(X x, Y y) +{ + return reinterpret_cast(x) == reinterpret_cast(y); +} + +struct wl_registry_listener { + void (*global)(void *data, struct wl_registry *wl_registry, uint32_t name, const char *interface, uint32_t version); + void (*global_remove)(void *data, struct wl_registry *wl_registry, uint32_t name); +}; + +struct wp_drm_lease_device_v1_listener { + void (*drm_fd)(void *data, struct wp_drm_lease_device_v1 *wp_drm_lease_device_v1, int32_t fd); + void (*connector)(void *data, struct wp_drm_lease_device_v1 *wp_drm_lease_device_v1, struct wp_drm_lease_connector_v1 *id); + void (*done)(void *data, struct wp_drm_lease_device_v1 *wp_drm_lease_device_v1); + void (*released)(void *data, struct wp_drm_lease_device_v1 *wp_drm_lease_device_v1); +}; + +struct wp_drm_lease_connector_v1_listener { + void (*name)(void *data, struct wp_drm_lease_connector_v1 *wp_drm_lease_connector_v1, const char *name); + void (*description)(void *data, struct wp_drm_lease_connector_v1 *wp_drm_lease_connector_v1, const char *description); + void (*connector_id)(void *data, struct wp_drm_lease_connector_v1 *wp_drm_lease_connector_v1, uint32_t connector_id); + void (*done)(void *data, struct wp_drm_lease_connector_v1 *wp_drm_lease_connector_v1); + void (*withdrawn)(void *data, struct wp_drm_lease_connector_v1 *wp_drm_lease_connector_v1); +}; + +struct wp_drm_lease_v1_listener { + void (*lease_fd)(void *data, struct wp_drm_lease_v1 *wp_drm_lease_v1, int32_t leased_fd); + void (*finished)(void *data, struct wp_drm_lease_v1 *wp_drm_lease_v1); +}; + +static struct wp_drm_lease_device_v1 {} fake_device_id; +static struct wp_drm_lease_connector_v1 {} fake_connector_id; +static struct wp_drm_lease_request_v1 {} fake_lease_request_id; +static struct wp_drm_lease_v1 {} fake_lease_id; + +static int drm_fd = -1; +static int drm_connector_id = -1; + +static void open_drm_fd() +{ + static drmModeResPtr (*real_drmModeGetResources)(int fd) = nullptr; + LOAD_FN(drmModeGetResources); + + drm_fd = open("/dev/dri/card0", O_RDONLY); + auto res = real_drmModeGetResources(drm_fd); + if (res && res->count_connectors) { + drm_connector_id = res->connectors[0]; + } + LOG("DRM: fd=%d, connector_id=%d", drm_fd, drm_connector_id); +} + +static int (*real_wl_proxy_add_listener)(struct wl_proxy *proxy, void (**implementation)(void), void *data); +static int hooked_wl_proxy_add_listener(struct wl_proxy *proxy, void (**implementation)(void), void *data) +{ + // wp_drm_lease_connector_v1 + if (compare_ptr(proxy, &fake_connector_id)) { + LOG("LISTENER wp_drm_lease_connector_v1"); + auto listener = reinterpret_cast(implementation); + listener->name(data, &fake_connector_id, "ALVR_name"); + listener->description(data, &fake_connector_id, "ALVR_description"); + listener->connector_id(data, &fake_connector_id, drm_connector_id); + listener->done(data, &fake_connector_id); + LOG("LISTENER done"); + return 0; + } + + // wp_drm_lease_v1 + if (compare_ptr(proxy, &fake_lease_id)) { + LOG("LISTENER wp_drm_lease_v1"); + auto listener = reinterpret_cast(implementation); + listener->lease_fd(data, &fake_lease_id, drm_fd); + LOG("LISTENER done"); + return 0; + } + + // wp_drm_lease_device_v1 + if (compare_ptr(proxy, &fake_device_id)) { + LOG("LISTENER wp_drm_lease_device_v1"); + auto listener = reinterpret_cast(implementation); + open_drm_fd(); + listener->drm_fd(data, &fake_device_id, drm_fd); + if (drm_connector_id != -1) { + listener->connector(data, &fake_device_id, &fake_connector_id); + } + listener->done(data, &fake_device_id); + LOG("LISTENER done"); + return 0; + } + + const char *name = *(*reinterpret_cast(proxy)); + + if (strcmp(name, "wl_registry") == 0) { + LOG("LISTENER wl_registry"); + auto listener = reinterpret_cast(implementation); + listener->global(data, reinterpret_cast(proxy), 0, "wp_drm_lease_device_v1", 1); + LOG("LISTENER done"); + return 0; + } + + return real_wl_proxy_add_listener(proxy, implementation, data); +} + +static struct wl_proxy *(*real_wl_proxy_marshal_flags)(struct wl_proxy *proxy, uint32_t opcode, const struct wl_interface *interface, uint32_t version, uint32_t flags, ...); +static struct wl_proxy *hooked_wl_proxy_marshal_flags(struct wl_proxy *proxy, uint32_t opcode, const struct wl_interface *interface, uint32_t version, uint32_t flags, ...) +{ + // wp_drm_lease_connector_v1 + if (compare_ptr(proxy, &fake_connector_id)) { + if (opcode == 0) { + LOG("CALL wp_drm_lease_connector_v1_destroy"); + } else { + ERR("Unknown wp_drm_lease_connector_v1 opcode=%u", opcode); + } + return nullptr; + } + + // wp_drm_lease_request_v1 + if (compare_ptr(proxy, &fake_lease_request_id)) { + if (opcode == 0) { + LOG("CALL wp_drm_lease_request_v1_request_connector"); + } else if (opcode == 1) { + LOG("CALL wp_drm_lease_request_v1_submit"); + return reinterpret_cast(&fake_lease_id); + } else { + ERR("Unknown wp_drm_lease_request_v1 opcode=%u", opcode); + } + return nullptr; + } + + // wp_drm_lease_device_v1 + if (compare_ptr(proxy, &fake_device_id)) { + if (opcode == 0) { + LOG("CALL wp_drm_lease_device_v1_create_lease_request"); + return reinterpret_cast(&fake_lease_request_id); + } else if (opcode == 1) { + LOG("CALL wp_drm_lease_device_v1_release"); + } else { + ERR("Unknown wp_drm_lease_device_v1 opcode=%u", opcode); + } + return nullptr; + } + + const char *name = **reinterpret_cast(proxy); + const char *iname = *reinterpret_cast(const_cast(interface)); + + if (strcmp(name, "wl_registry") == 0 && strcmp(iname, "wp_drm_lease_device_v1") == 0 && opcode == 0) { + LOG("CALL wl_registry_bind - wp_drm_lease_device_v1"); + return reinterpret_cast(&fake_device_id); + } + + __builtin_return(__builtin_apply(reinterpret_cast(real_wl_proxy_marshal_flags), __builtin_apply_args(), 1024)); +} + +extern "C" void *SDL_LoadFunction(void *handle, const char *name) +{ + static void *(*real_SDL_LoadFunction)(void *handle, const char *name) = nullptr; + LOAD_FN(SDL_LoadFunction); + +#define HOOK(f) \ + if (strcmp(name, #f) == 0) { \ + LOG("HOOK %s", #f); \ + real_##f = reinterpret_cast(real_SDL_LoadFunction(handle, #f)); \ + return reinterpret_cast(hooked_##f); \ + } \ + + HOOK(wl_proxy_add_listener); + HOOK(wl_proxy_marshal_flags); + +#undef HOOK + + return real_SDL_LoadFunction(handle, name); +} + +extern "C" drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connectorId) +{ + LOG("CALL drmModeGetConnector(%d, %u)", fd, connectorId); + + static drmModeConnectorPtr (*real_drmModeGetConnector)(int fd, uint32_t connectorId) = nullptr; + LOAD_FN(drmModeGetConnector); + + auto con = real_drmModeGetConnector(fd, connectorId); + if (con) { + auto sessionFile = std::ifstream(getenv("ALVR_SESSION_JSON")); + auto json = std::string(std::istreambuf_iterator(sessionFile), std::istreambuf_iterator()); + picojson::value v; + picojson::parse(v, json); + auto config = v.get("openvr_config"); + + con->count_modes = 1; + con->modes = (drmModeModeInfo*)calloc(1, sizeof(drmModeModeInfo)); + con->modes->hdisplay = config.get("eye_resolution_width").get() * 2; + con->modes->vdisplay = config.get("eye_resolution_height").get(); + } + return con; +} + +__attribute__((constructor)) static void lib_init() +{ + LOG("ALVR: drm-lease shim loaded"); + + unsetenv("LD_PRELOAD"); +} diff --git a/alvr/vrcompositor_wrapper/src/main.rs b/alvr/vrcompositor_wrapper/src/main.rs index 8120f5fb34..3a039fa4ac 100644 --- a/alvr/vrcompositor_wrapper/src/main.rs +++ b/alvr/vrcompositor_wrapper/src/main.rs @@ -22,6 +22,20 @@ fn main() { "VK_LAYER_ALVR_capture,VK_LAYER_MESA_device_select", ); std::env::set_var("VK_LOADER_LAYERS_DISABLE", "*"); + if std::env::var("WAYLAND_DISPLAY").is_ok() { + let drm_lease_shim_path = match std::fs::read_link(&argv0) { + Ok(path) => path.parent().unwrap().join("alvr_drm_lease_shim.so"), + Err(err) => panic!("Failed to read vrcompositor symlink: {err}"), + }; + std::env::set_var("LD_PRELOAD", drm_lease_shim_path); + std::env::set_var( + "ALVR_SESSION_JSON", + alvr_filesystem::filesystem_layout_invalid() + .session() + .to_string_lossy() + .to_string(), + ); + } let err = exec::execvp(argv0 + ".real", std::env::args()); println!("Failed to run vrcompositor {err}"); diff --git a/alvr/vulkan_layer/layer/device_api.cpp b/alvr/vulkan_layer/layer/device_api.cpp index edf6f6e74b..ba2d2c7d9f 100644 --- a/alvr/vulkan_layer/layer/device_api.cpp +++ b/alvr/vulkan_layer/layer/device_api.cpp @@ -40,6 +40,7 @@ VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkGetDisplayModePropertiesKHR( VkPhysicalDevice device, VkDisplayKHR display, uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) { if (display != alvr_display_handle) { + *pPropertyCount = 0; return VK_ERROR_OUT_OF_HOST_MEMORY; } if (!pProperties) { @@ -78,6 +79,23 @@ VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkAcquireXlibDisplayEXT(VkPhysicalDevic return VK_SUCCESS; } +VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkGetDrmDisplayEXT(VkPhysicalDevice physicalDevice, + int32_t drmFd, + uint32_t connectorId, + VkDisplayKHR *display) { + *display = alvr_display_handle; + return VK_SUCCESS; +} + +VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkAcquireDrmDisplayEXT(VkPhysicalDevice physicalDevice, + int32_t drmFd, + VkDisplayKHR display) { + if (display != alvr_display_handle) { + return VK_ERROR_INITIALIZATION_FAILED; + } + return VK_SUCCESS; +} + VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkGetDisplayPlaneSupportedDisplaysKHR( VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { diff --git a/alvr/vulkan_layer/layer/device_api.hpp b/alvr/vulkan_layer/layer/device_api.hpp index ea8d869c7c..34641aeefa 100644 --- a/alvr/vulkan_layer/layer/device_api.hpp +++ b/alvr/vulkan_layer/layer/device_api.hpp @@ -18,6 +18,15 @@ VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkAcquireXlibDisplayEXT(VkPhysicalDevic Display *dpy, VkDisplayKHR display); +VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkGetDrmDisplayEXT(VkPhysicalDevice physicalDevice, + int32_t drmFd, + uint32_t connectorId, + VkDisplayKHR *display); + +VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkAcquireDrmDisplayEXT(VkPhysicalDevice physicalDevice, + int32_t drmFd, + VkDisplayKHR display); + VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkGetDisplayPlaneSupportedDisplaysKHR( VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t *pDisplayCount, VkDisplayKHR *pDisplays); diff --git a/alvr/vulkan_layer/layer/layer.cpp b/alvr/vulkan_layer/layer/layer.cpp index 1baa7f5db9..8bb35a7597 100644 --- a/alvr/vulkan_layer/layer/layer.cpp +++ b/alvr/vulkan_layer/layer/layer.cpp @@ -402,6 +402,8 @@ wsi_layer_vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { GET_PROC_ADDR(vkGetDisplayModePropertiesKHR); GET_PROC_ADDR(vkGetPhysicalDeviceDisplayPlanePropertiesKHR); GET_PROC_ADDR(vkAcquireXlibDisplayEXT); + GET_PROC_ADDR(vkGetDrmDisplayEXT); + GET_PROC_ADDR(vkAcquireDrmDisplayEXT); GET_PROC_ADDR(vkGetDisplayPlaneSupportedDisplaysKHR); GET_PROC_ADDR(vkCreateDisplayPlaneSurfaceKHR); GET_PROC_ADDR(vkCreateDisplayModeKHR); diff --git a/alvr/xtask/src/build.rs b/alvr/xtask/src/build.rs index 5a9f697d7c..a023c7634d 100644 --- a/alvr/xtask/src/build.rs +++ b/alvr/xtask/src/build.rs @@ -143,6 +143,11 @@ pub fn build_streamer( build_layout.vrcompositor_wrapper(), ) .unwrap(); + sh.copy_file( + artifacts_dir.join("alvr_drm_lease_shim.so"), + build_layout.drm_lease_shim(), + ) + .unwrap(); // build vulkan layer let _push_guard = sh.push_dir(afs::crate_dir("vulkan_layer")); From 6a06ca3bf6d26e18dd7175534665ca23001c88dc Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Mon, 10 Jul 2023 14:53:02 +0800 Subject: [PATCH 02/28] Progress on sync sockets (12) --- alvr/client_core/src/connection.rs | 22 +++++++++++++++------- alvr/client_core/src/lib.rs | 2 +- alvr/server/src/connection.rs | 23 ++++++++++++----------- alvr/server/src/lib.rs | 3 +-- alvr/server/src/statistics.rs | 5 +++-- 5 files changed, 32 insertions(+), 23 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 637c06b7b5..e93dedaf46 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -6,7 +6,7 @@ use crate::{ sockets::AnnouncerSocket, statistics::StatisticsManager, storage::Config, - ClientCoreEvent, CONTROL_CHANNEL_SENDER, DISCONNECT_NOTIFIER, EVENT_QUEUE, IS_ALIVE, + ClientCoreEvent, CONTROL_CHANNEL_SENDER, DISCONNECT_SERVER_NOTIFIER, EVENT_QUEUE, IS_ALIVE, IS_RESUMED, IS_STREAMING, STATISTICS_MANAGER, }; use alvr_audio::AudioDevice; @@ -348,14 +348,10 @@ fn connection_pipeline( let video_receive_loop = async move { let mut receiver_buffer = ReceiverBuffer::new(); let mut stream_corrupted = false; - loop { + while IS_STREAMING.value() { video_receiver.recv_buffer(&mut receiver_buffer).await?; let (header, nal) = receiver_buffer.get()?; - if !IS_RESUMED.value() { - break Ok(()); - } - if let Some(stats) = &mut *STATISTICS_MANAGER.lock() { stats.report_video_packet_received(header.timestamp); } @@ -382,6 +378,8 @@ fn connection_pipeline( warn!("Dropped video packet. Reason: Waiting for IDR frame") } } + + Ok(()) }; let haptics_receive_loop = async move { @@ -485,6 +483,14 @@ fn connection_pipeline( let receive_loop = async move { stream_socket.receive_loop().await }; + let lifecycle_check_thread = thread::spawn(|| { + while IS_STREAMING.value() && IS_RESUMED.value() && IS_ALIVE.value() { + thread::sleep(Duration::from_millis(100)); + } + + DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + }); + let res = CONNECTION_RUNTIME.read().as_ref().unwrap().block_on(async { // Run many tasks concurrently. Threading is managed by the runtime, for best performance. tokio::select! { @@ -508,7 +514,7 @@ fn connection_pipeline( res = keepalive_sender_loop => res, res = control_receive_loop => res, - _ = DISCONNECT_NOTIFIER.notified() => Ok(()), + _ = DISCONNECT_SERVER_NOTIFIER.notified() => Ok(()), } }); @@ -527,5 +533,7 @@ fn connection_pipeline( *crate::decoder::DECODER_DEQUEUER.lock() = None; } + lifecycle_check_thread.join().ok(); + res.map_err(to_int_e!()) } diff --git a/alvr/client_core/src/lib.rs b/alvr/client_core/src/lib.rs index 3ac618dfce..d0467d4644 100644 --- a/alvr/client_core/src/lib.rs +++ b/alvr/client_core/src/lib.rs @@ -49,7 +49,7 @@ static STATISTICS_MANAGER: Lazy>> = Lazy::new(|| static CONTROL_CHANNEL_SENDER: Lazy>>> = Lazy::new(|| Mutex::new(None)); -static DISCONNECT_NOTIFIER: Lazy = Lazy::new(Notify::new); +static DISCONNECT_SERVER_NOTIFIER: Lazy = Lazy::new(Notify::new); static EVENT_QUEUE: Lazy>> = Lazy::new(|| Mutex::new(VecDeque::new())); diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index 7a1008458d..8b12470f0b 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -7,8 +7,8 @@ use crate::{ statistics::StatisticsManager, tracking::{self, TrackingManager}, FfiButtonValue, FfiFov, FfiViewsConfig, VideoPacket, BITRATE_MANAGER, DECODER_CONFIG, - DISCONNECT_CLIENT_NOTIFIER, RESTART_NOTIFIER, SERVER_DATA_MANAGER, SHUTDOWN_NOTIFIER, - STATISTICS_MANAGER, VIDEO_MIRROR_SENDER, VIDEO_RECORDING_FILE, + DISCONNECT_CLIENT_NOTIFIER, RESTART_NOTIFIER, SERVER_DATA_MANAGER, STATISTICS_MANAGER, + VIDEO_MIRROR_SENDER, VIDEO_RECORDING_FILE, }; use alvr_audio::AudioDevice; use alvr_common::{ @@ -1032,6 +1032,14 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { } }); + let lifecycle_check_thread = thread::spawn(|| { + while SHOULD_CONNECT_TO_CLIENTS.value() && CONNECTION_RUNTIME.read().is_some() { + thread::sleep(Duration::from_millis(500)); + } + + DISCONNECT_CLIENT_NOTIFIER.notify_waiters(); + }); + { let on_connect_script = settings.connection.on_connect_script; @@ -1060,12 +1068,6 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { thread::spawn(move || { let _connection_drop_guard = _connection_drop_guard; - let shutdown_detector = async { - while SHOULD_CONNECT_TO_CLIENTS.value() { - time::sleep(Duration::from_secs(1)).await; - } - }; - let res = CONNECTION_RUNTIME .read() .as_ref() @@ -1094,9 +1096,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { Ok(()) } - _ = SHUTDOWN_NOTIFIER.notified() => Ok(()), _ = DISCONNECT_CLIENT_NOTIFIER.notified() => Ok(()), - _ = shutdown_detector => Ok(()), } }); if let Err(e) = res { @@ -1104,9 +1104,9 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { } // This requests shutdown from threads + *CONNECTION_RUNTIME.write() = None; *VIDEO_CHANNEL_SENDER.lock() = None; *HAPTICS_SENDER.lock() = None; - *CONNECTION_RUNTIME.write() = None; *VIDEO_RECORDING_FILE.lock() = None; @@ -1134,6 +1134,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { statistics_thread.join().ok(); control_thread.join().ok(); keepalive_thread.join().ok(); + lifecycle_check_thread.join().ok(); }); Ok(()) diff --git a/alvr/server/src/lib.rs b/alvr/server/src/lib.rs index bee07d9427..17d60fb8e1 100644 --- a/alvr/server/src/lib.rs +++ b/alvr/server/src/lib.rs @@ -82,7 +82,6 @@ static VIDEO_RECORDING_FILE: Lazy>> = Lazy::new(|| Mutex::new static DISCONNECT_CLIENT_NOTIFIER: Lazy = Lazy::new(Notify::new); static RESTART_NOTIFIER: Lazy = Lazy::new(Notify::new); -static SHUTDOWN_NOTIFIER: Lazy = Lazy::new(Notify::new); static FRAME_RENDER_VS_CSO: &[u8] = include_bytes!("../cpp/platform/win32/FrameRenderVS.cso"); static FRAME_RENDER_PS_CSO: &[u8] = include_bytes!("../cpp/platform/win32/FrameRenderPS.cso"); @@ -167,7 +166,7 @@ pub extern "C" fn shutdown_driver() { } } - SHUTDOWN_NOTIFIER.notify_waiters(); + DISCONNECT_CLIENT_NOTIFIER.notify_waiters(); // apply openvr config for the next launch SERVER_DATA_MANAGER.write().session_mut().openvr_config = connection::contruct_openvr_config(); diff --git a/alvr/server/src/statistics.rs b/alvr/server/src/statistics.rs index b591219a73..55bb187e96 100644 --- a/alvr/server/src/statistics.rs +++ b/alvr/server/src/statistics.rs @@ -239,8 +239,9 @@ impl StatisticsManager { video_packets_per_sec: (self.video_packets_partial_sum as f32 / interval_secs) as _, video_mbytes_total: (self.video_bytes_total as f32 / 1e6) as usize, - video_mbits_per_sec: self.video_bytes_partial_sum as f32 / interval_secs * 8. - / 1e6, + video_mbits_per_sec: self.video_bytes_partial_sum as f32 * 8. + / 1e6 + / interval_secs, total_latency_ms: client_stats.total_pipeline_latency.as_secs_f32() * 1000., network_latency_ms: network_latency.as_secs_f32() * 1000., encode_latency_ms: encoder_latency.as_secs_f32() * 1000., From 9dc9d7a5f4f0d2557604c1e7e3f995a57179361e Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Mon, 10 Jul 2023 15:25:29 +0800 Subject: [PATCH 03/28] Progress on sync sockets (13) --- alvr/client_core/src/connection.rs | 69 +++++++++++++++++++++--------- 1 file changed, 49 insertions(+), 20 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index e93dedaf46..49fc9e9a4f 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -345,12 +345,30 @@ fn connection_pipeline( EVENT_QUEUE.lock().push_back(streaming_start_event); - let video_receive_loop = async move { + let video_receive_thread = thread::spawn(move || { let mut receiver_buffer = ReceiverBuffer::new(); let mut stream_corrupted = false; - while IS_STREAMING.value() { - video_receiver.recv_buffer(&mut receiver_buffer).await?; - let (header, nal) = receiver_buffer.get()?; + loop { + if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + let res = runtime.block_on(async { + tokio::select! { + res = video_receiver.recv_buffer(&mut receiver_buffer) => Some(res), + _ = time::sleep(Duration::from_secs(1)) => None, + } + }); + + match res { + Some(Ok(())) => (), + Some(Err(_)) => return, + None => continue, + } + } else { + return; + } + + let Ok((header, nal)) = receiver_buffer.get() else { + return + }; if let Some(stats) = &mut *STATISTICS_MANAGER.lock() { stats.report_video_packet_received(header.timestamp); @@ -378,22 +396,33 @@ fn connection_pipeline( warn!("Dropped video packet. Reason: Waiting for IDR frame") } } + }); - Ok(()) - }; - - let haptics_receive_loop = async move { - loop { - let haptics = haptics_receiver.recv_header_only().await?; - - EVENT_QUEUE.lock().push_back(ClientCoreEvent::Haptics { - device_id: haptics.device_id, - duration: haptics.duration, - frequency: haptics.frequency, - amplitude: haptics.amplitude, + let haptics_receive_thread = thread::spawn(move || loop { + let haptics = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + let res = runtime.block_on(async { + tokio::select! { + res = haptics_receiver.recv_header_only() => Some(res), + _ = time::sleep(Duration::from_secs(1)) => None, + } }); - } - }; + + match res { + Some(Ok(packet)) => packet, + Some(Err(_)) => return, + None => continue, + } + } else { + return; + }; + + EVENT_QUEUE.lock().push_back(ClientCoreEvent::Haptics { + device_id: haptics.device_id, + duration: haptics.duration, + frequency: haptics.frequency, + amplitude: haptics.amplitude, + }); + }); // Poll for events that need a constant thread (mainly for the JNI env) #[cfg(target_os = "android")] @@ -506,8 +535,6 @@ fn connection_pipeline( }, res = spawn_cancelable(game_audio_loop) => res, res = spawn_cancelable(microphone_loop) => res, - res = spawn_cancelable(video_receive_loop) => res, - res = spawn_cancelable(haptics_receive_loop) => res, res = spawn_cancelable(control_send_loop) => res, // keep these loops on the current task @@ -533,6 +560,8 @@ fn connection_pipeline( *crate::decoder::DECODER_DEQUEUER.lock() = None; } + video_receive_thread.join().ok(); + haptics_receive_thread.join().ok(); lifecycle_check_thread.join().ok(); res.map_err(to_int_e!()) From 0834d3861ef0814ab59dc082512da66166ec1626 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Mon, 10 Jul 2023 16:49:41 +0800 Subject: [PATCH 04/28] Progress on sync sockets (14) --- alvr/client_core/src/connection.rs | 53 +++++++++++++++++++----------- 1 file changed, 34 insertions(+), 19 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 49fc9e9a4f..3df49277b0 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -29,7 +29,13 @@ use alvr_sockets::{ }; use futures::future::BoxFuture; use serde_json as json; -use std::{collections::HashMap, future, sync::Arc, thread, time::Duration}; +use std::{ + collections::HashMap, + future, + sync::Arc, + thread, + time::{Duration, Instant}, +}; use tokio::{runtime::Runtime, sync::mpsc as tmpsc, time}; #[cfg(target_os = "android")] @@ -427,8 +433,6 @@ fn connection_pipeline( // Poll for events that need a constant thread (mainly for the JNI env) #[cfg(target_os = "android")] thread::spawn(|| { - use std::time::Instant; - const BATTERY_POLL_INTERVAL: Duration = Duration::from_secs(5); let mut previous_hmd_battery_status = (0.0, false); @@ -461,25 +465,37 @@ fn connection_pipeline( } }); - let keepalive_sender_loop = { + let keepalive_sender_thread = thread::spawn({ let control_sender = Arc::clone(&control_sender); - async move { + move || { + let mut deadline = Instant::now(); loop { - let res = control_sender - .lock() - .await - .send(&ClientControlPacket::KeepAlive) - .await; - if let Err(e) = res { - info!("Server disconnected. Cause: {e}"); - set_hud_message(SERVER_DISCONNECTED_MESSAGE); - break Ok(()); + if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + let res = runtime.block_on(async { + control_sender + .lock() + .await + .send(&ClientControlPacket::KeepAlive) + .await + }); + if let Err(e) = res { + info!("Server disconnected. Cause: {e}"); + set_hud_message(SERVER_DISCONNECTED_MESSAGE); + DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + + return; + } + } else { + return; } - time::sleep(NETWORK_KEEPALIVE_INTERVAL).await; + deadline += NETWORK_KEEPALIVE_INTERVAL; + while Instant::now() < deadline && IS_STREAMING.value() { + thread::sleep(Duration::from_millis(500)); + } } } - }; + }); let control_send_loop = async move { while let Some(packet) = control_channel_receiver.recv().await { @@ -514,7 +530,7 @@ fn connection_pipeline( let lifecycle_check_thread = thread::spawn(|| { while IS_STREAMING.value() && IS_RESUMED.value() && IS_ALIVE.value() { - thread::sleep(Duration::from_millis(100)); + thread::sleep(Duration::from_millis(500)); } DISCONNECT_SERVER_NOTIFIER.notify_waiters(); @@ -537,8 +553,6 @@ fn connection_pipeline( res = spawn_cancelable(microphone_loop) => res, res = spawn_cancelable(control_send_loop) => res, - // keep these loops on the current task - res = keepalive_sender_loop => res, res = control_receive_loop => res, _ = DISCONNECT_SERVER_NOTIFIER.notified() => Ok(()), @@ -562,6 +576,7 @@ fn connection_pipeline( video_receive_thread.join().ok(); haptics_receive_thread.join().ok(); + keepalive_sender_thread.join().ok(); lifecycle_check_thread.join().ok(); res.map_err(to_int_e!()) From 6a2d3597ae46422c93853c1ea26c3b5d6c7d2224 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Mon, 10 Jul 2023 17:30:03 +0800 Subject: [PATCH 05/28] Progress on sync sockets (15) --- alvr/client_core/src/connection.rs | 73 ++++++++++++++++++------------ alvr/packets/src/lib.rs | 2 +- alvr/server/src/connection.rs | 8 ++-- 3 files changed, 48 insertions(+), 35 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 3df49277b0..0875f5c1de 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -230,7 +230,7 @@ fn connection_pipeline( match runtime.block_on(async { tokio::select! { res = control_receiver.recv() => res, - _ = time::sleep(Duration::from_millis(1)) => fmt_e!("Timeout"), + _ = time::sleep(Duration::from_secs(1)) => fmt_e!("Timeout"), } }) { Ok(ServerControlPacket::StartStream) => { @@ -263,7 +263,7 @@ fn connection_pipeline( let stream_socket_builder = runtime.block_on(async { tokio::select! { res = listen_for_server_future => res.map_err(to_int_e!()), - _ = time::sleep(Duration::from_millis(1)) => int_fmt_e!("Timeout while binding stream socket"), + _ = time::sleep(Duration::from_secs(1)) => int_fmt_e!("Timeout while binding stream socket"), } })?; @@ -359,7 +359,7 @@ fn connection_pipeline( let res = runtime.block_on(async { tokio::select! { res = video_receiver.recv_buffer(&mut receiver_buffer) => Some(res), - _ = time::sleep(Duration::from_secs(1)) => None, + _ = time::sleep(Duration::from_millis(500)) => None, } }); @@ -409,7 +409,7 @@ fn connection_pipeline( let res = runtime.block_on(async { tokio::select! { res = haptics_receiver.recv_header_only() => Some(res), - _ = time::sleep(Duration::from_secs(1)) => None, + _ = time::sleep(Duration::from_millis(500)) => None, } }); @@ -497,6 +497,41 @@ fn connection_pipeline( } }); + let control_receive_thread = thread::spawn(move || loop { + let maybe_packet = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + runtime.block_on(async { + tokio::select! { + res = control_receiver.recv() => Some(res), + _ = time::sleep(Duration::from_millis(500)) => None, + } + }) + } else { + return; + }; + + match maybe_packet { + Some(Ok(ServerControlPacket::InitializeDecoder(config))) => { + decoder::create_decoder(config); + } + Some(Ok(ServerControlPacket::Restarting)) => { + info!("{SERVER_RESTART_MESSAGE}"); + set_hud_message(SERVER_RESTART_MESSAGE); + DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + + return; + } + Some(Ok(_)) => (), + Some(Err(e)) => { + info!("{SERVER_DISCONNECTED_MESSAGE} Cause: {e}"); + set_hud_message(SERVER_DISCONNECTED_MESSAGE); + DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + + return; + } + None => (), + } + }); + let control_send_loop = async move { while let Some(packet) = control_channel_receiver.recv().await { control_sender.lock().await.send(&packet).await.ok(); @@ -505,27 +540,6 @@ fn connection_pipeline( Ok(()) }; - let control_receive_loop = async move { - loop { - match control_receiver.recv().await { - Ok(ServerControlPacket::InitializeDecoder(config)) => { - decoder::create_decoder(config); - } - Ok(ServerControlPacket::Restarting) => { - info!("{SERVER_RESTART_MESSAGE}"); - set_hud_message(SERVER_RESTART_MESSAGE); - break Ok(()); - } - Ok(_) => (), - Err(e) => { - info!("{SERVER_DISCONNECTED_MESSAGE} Cause: {e}"); - set_hud_message(SERVER_DISCONNECTED_MESSAGE); - break Ok(()); - } - } - } - }; - let receive_loop = async move { stream_socket.receive_loop().await }; let lifecycle_check_thread = thread::spawn(|| { @@ -553,16 +567,14 @@ fn connection_pipeline( res = spawn_cancelable(microphone_loop) => res, res = spawn_cancelable(control_send_loop) => res, - res = control_receive_loop => res, - _ = DISCONNECT_SERVER_NOTIFIER.notified() => Ok(()), } }); IS_STREAMING.set(false); - CONNECTION_RUNTIME.write().take(); - TRACKING_SENDER.lock().take(); - STATISTICS_SENDER.lock().take(); + *CONNECTION_RUNTIME.write() = None; + *TRACKING_SENDER.lock() = None; + *STATISTICS_SENDER.lock() = None; EVENT_QUEUE .lock() @@ -576,6 +588,7 @@ fn connection_pipeline( video_receive_thread.join().ok(); haptics_receive_thread.join().ok(); + control_receive_thread.join().ok(); keepalive_sender_thread.join().ok(); lifecycle_check_thread.join().ok(); diff --git a/alvr/packets/src/lib.rs b/alvr/packets/src/lib.rs index 109c0e8229..2fb5aaf51b 100644 --- a/alvr/packets/src/lib.rs +++ b/alvr/packets/src/lib.rs @@ -54,7 +54,7 @@ pub enum ServerControlPacket { InitializeDecoder(DecoderInitializationConfig), Restarting, KeepAlive, - ServerPredictionAverage(Duration), + ServerPredictionAverage(Duration), // todo: remove Reserved(String), ReservedBuffer(Vec), } diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index 8b12470f0b..fdecbb9432 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -667,7 +667,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let video_send_thread = thread::spawn(move || loop { let VideoPacket { header, payload } = - match video_channel_receiver.recv_timeout(Duration::from_millis(100)) { + match video_channel_receiver.recv_timeout(Duration::from_millis(500)) { Ok(packet) => packet, Err(RecvTimeoutError::Timeout) => continue, Err(RecvTimeoutError::Disconnected) => return, @@ -705,7 +705,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let maybe_tracking = runtime.block_on(async { tokio::select! { res = tracking_receiver.recv_header_only() => Some(res), - _ = time::sleep(Duration::from_millis(100)) => None, + _ = time::sleep(Duration::from_millis(500)) => None, } }); match maybe_tracking { @@ -823,7 +823,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let maybe_client_stats = runtime.block_on(async { tokio::select! { res = statics_receiver.recv_header_only() => Some(res), - _ = time::sleep(Duration::from_millis(100)) => None, + _ = time::sleep(Duration::from_millis(500)) => None, } }); match maybe_client_stats { @@ -892,7 +892,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let maybe_packet = runtime.block_on(async { tokio::select! { res = control_receiver.recv() => Some(res), - _ = time::sleep(Duration::from_millis(100)) => None, + _ = time::sleep(Duration::from_millis(500)) => None, } }); match maybe_packet { From e8422abd15140b8910aaf0df7ac741b3f4913a92 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Mon, 10 Jul 2023 19:36:57 +0800 Subject: [PATCH 06/28] Progress on sync sockets (16) --- alvr/client_core/src/connection.rs | 91 +++++++++++-------------- alvr/client_core/src/decoder.rs | 2 +- alvr/client_core/src/lib.rs | 7 +- alvr/client_core/src/logging_backend.rs | 2 +- 4 files changed, 42 insertions(+), 60 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 0875f5c1de..9fd2562455 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -6,8 +6,7 @@ use crate::{ sockets::AnnouncerSocket, statistics::StatisticsManager, storage::Config, - ClientCoreEvent, CONTROL_CHANNEL_SENDER, DISCONNECT_SERVER_NOTIFIER, EVENT_QUEUE, IS_ALIVE, - IS_RESUMED, IS_STREAMING, STATISTICS_MANAGER, + ClientCoreEvent, EVENT_QUEUE, IS_ALIVE, IS_RESUMED, IS_STREAMING, STATISTICS_MANAGER, }; use alvr_audio::AudioDevice; use alvr_common::{ @@ -32,11 +31,11 @@ use serde_json as json; use std::{ collections::HashMap, future, - sync::Arc, + sync::{mpsc, Arc}, thread, time::{Duration, Instant}, }; -use tokio::{runtime::Runtime, sync::mpsc as tmpsc, time}; +use tokio::{runtime::Runtime, sync::Notify, time}; #[cfg(target_os = "android")] use crate::audio; @@ -64,12 +63,19 @@ const RETRY_CONNECT_MIN_INTERVAL: Duration = Duration::from_secs(1); const NETWORK_KEEPALIVE_INTERVAL: Duration = Duration::from_secs(1); const CONNECTION_RETRY_INTERVAL: Duration = Duration::from_secs(1); +static DISCONNECT_SERVER_NOTIFIER: Lazy = Lazy::new(Notify::new); + pub static CONNECTION_RUNTIME: Lazy>> = Lazy::new(|| RwLock::new(None)); pub static TRACKING_SENDER: Lazy>>> = Lazy::new(|| Mutex::new(None)); pub static STATISTICS_SENDER: Lazy>>> = Lazy::new(|| Mutex::new(None)); +// Note: the ControlSocketSender cannot be shared directly. this is because it is used inside the +// logging callback and that could lead to double lock. +pub static CONTROL_CHANNEL_SENDER: Lazy>>> = + Lazy::new(|| Mutex::new(None)); + fn set_hud_message(message: &str) { let message = format!( "ALVR v{}\nhostname: {}\nIP: {}\n\n{message}", @@ -224,8 +230,7 @@ fn connection_pipeline( }, )); - let (control_sender, mut control_receiver) = proto_control_socket.split(); - let control_sender = Arc::new(tokio::sync::Mutex::new(control_sender)); + let (mut control_sender, mut control_receiver) = proto_control_socket.split(); match runtime.block_on(async { tokio::select! { @@ -267,13 +272,7 @@ fn connection_pipeline( } })?; - if let Err(e) = runtime.block_on(async { - control_sender - .lock() - .await - .send(&ClientControlPacket::StreamReady) - .await - }) { + if let Err(e) = runtime.block_on(control_sender.send(&ClientControlPacket::StreamReady)) { info!("Server disconnected. Cause: {e}"); set_hud_message(SERVER_DISCONNECTED_MESSAGE); return Ok(()); @@ -294,10 +293,6 @@ fn connection_pipeline( info!("Connected to server"); - // create this before initializing the stream on cpp side - let (control_channel_sender, mut control_channel_receiver) = tmpsc::unbounded_channel(); - *CONTROL_CHANNEL_SENDER.lock() = Some(control_channel_sender); - { let config = &mut *DECODER_INIT_CONFIG.lock(); @@ -349,6 +344,9 @@ fn connection_pipeline( *TRACKING_SENDER.lock() = Some(tracking_sender); *STATISTICS_SENDER.lock() = Some(statistics_sender); + let (control_channel_sender, control_channel_receiver) = mpsc::channel(); + *CONTROL_CHANNEL_SENDER.lock() = Some(control_channel_sender); + EVENT_QUEUE.lock().push_back(streaming_start_event); let video_receive_thread = thread::spawn(move || { @@ -465,33 +463,29 @@ fn connection_pipeline( } }); - let keepalive_sender_thread = thread::spawn({ - let control_sender = Arc::clone(&control_sender); - move || { - let mut deadline = Instant::now(); - loop { - if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - let res = runtime.block_on(async { - control_sender - .lock() - .await - .send(&ClientControlPacket::KeepAlive) - .await - }); - if let Err(e) = res { - info!("Server disconnected. Cause: {e}"); - set_hud_message(SERVER_DISCONNECTED_MESSAGE); - DISCONNECT_SERVER_NOTIFIER.notify_waiters(); - - return; - } - } else { - return; - } + let keepalive_sender_thread = thread::spawn(move || { + let mut deadline = Instant::now(); + while IS_STREAMING.value() { + if let Some(sender) = &*CONTROL_CHANNEL_SENDER.lock() { + sender.send(ClientControlPacket::KeepAlive).ok(); + } - deadline += NETWORK_KEEPALIVE_INTERVAL; - while Instant::now() < deadline && IS_STREAMING.value() { - thread::sleep(Duration::from_millis(500)); + deadline += NETWORK_KEEPALIVE_INTERVAL; + while Instant::now() < deadline && IS_STREAMING.value() { + thread::sleep(Duration::from_millis(500)); + } + } + }); + + let control_send_thread = thread::spawn(move || { + while let Ok(packet) = control_channel_receiver.recv() { + if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + if let Err(e) = runtime.block_on(control_sender.send(&packet)) { + info!("Server disconnected. Cause: {e}"); + set_hud_message(SERVER_DISCONNECTED_MESSAGE); + DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + + return; } } } @@ -532,14 +526,6 @@ fn connection_pipeline( } }); - let control_send_loop = async move { - while let Some(packet) = control_channel_receiver.recv().await { - control_sender.lock().await.send(&packet).await.ok(); - } - - Ok(()) - }; - let receive_loop = async move { stream_socket.receive_loop().await }; let lifecycle_check_thread = thread::spawn(|| { @@ -565,7 +551,6 @@ fn connection_pipeline( }, res = spawn_cancelable(game_audio_loop) => res, res = spawn_cancelable(microphone_loop) => res, - res = spawn_cancelable(control_send_loop) => res, _ = DISCONNECT_SERVER_NOTIFIER.notified() => Ok(()), } @@ -575,6 +560,7 @@ fn connection_pipeline( *CONNECTION_RUNTIME.write() = None; *TRACKING_SENDER.lock() = None; *STATISTICS_SENDER.lock() = None; + *CONTROL_CHANNEL_SENDER.lock() = None; EVENT_QUEUE .lock() @@ -589,6 +575,7 @@ fn connection_pipeline( video_receive_thread.join().ok(); haptics_receive_thread.join().ok(); control_receive_thread.join().ok(); + control_send_thread.join().ok(); keepalive_sender_thread.join().ok(); lifecycle_check_thread.join().ok(); diff --git a/alvr/client_core/src/decoder.rs b/alvr/client_core/src/decoder.rs index 8a37327d8b..f2574b0877 100644 --- a/alvr/client_core/src/decoder.rs +++ b/alvr/client_core/src/decoder.rs @@ -60,7 +60,7 @@ pub fn create_decoder(lazy_config: DecoderInitializationConfig) { *DECODER_ENQUEUER.lock() = Some(enqueuer); *DECODER_DEQUEUER.lock() = Some(dequeuer); - if let Some(sender) = &*crate::CONTROL_CHANNEL_SENDER.lock() { + if let Some(sender) = &*crate::connection::CONTROL_CHANNEL_SENDER.lock() { sender .send(alvr_packets::ClientControlPacket::RequestIdr) .ok(); diff --git a/alvr/client_core/src/lib.rs b/alvr/client_core/src/lib.rs index d0467d4644..6341594206 100644 --- a/alvr/client_core/src/lib.rs +++ b/alvr/client_core/src/lib.rs @@ -33,7 +33,7 @@ use alvr_common::{ }; use alvr_packets::{BatteryPacket, ButtonEntry, ClientControlPacket, Tracking, ViewsConfig}; use alvr_session::{CodecType, Settings}; -use connection::{CONNECTION_RUNTIME, STATISTICS_SENDER, TRACKING_SENDER}; +use connection::{CONNECTION_RUNTIME, CONTROL_CHANNEL_SENDER, STATISTICS_SENDER, TRACKING_SENDER}; use decoder::EXTERNAL_DECODER; use serde::{Deserialize, Serialize}; use statistics::StatisticsManager; @@ -43,14 +43,9 @@ use std::{ time::Duration, }; use storage::Config; -use tokio::{sync::mpsc, sync::Notify}; static STATISTICS_MANAGER: Lazy>> = Lazy::new(|| Mutex::new(None)); -static CONTROL_CHANNEL_SENDER: Lazy>>> = - Lazy::new(|| Mutex::new(None)); -static DISCONNECT_SERVER_NOTIFIER: Lazy = Lazy::new(Notify::new); - static EVENT_QUEUE: Lazy>> = Lazy::new(|| Mutex::new(VecDeque::new())); diff --git a/alvr/client_core/src/logging_backend.rs b/alvr/client_core/src/logging_backend.rs index 9c24267fe4..6e41a5cb13 100644 --- a/alvr/client_core/src/logging_backend.rs +++ b/alvr/client_core/src/logging_backend.rs @@ -1,4 +1,4 @@ -use crate::CONTROL_CHANNEL_SENDER; +use crate::connection::CONTROL_CHANNEL_SENDER; use alvr_common::{ log::{Level, Record}, once_cell::sync::Lazy, From fc2c729bb76b2990a2253acfc913c497019bd051 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Mon, 10 Jul 2023 18:12:15 +0200 Subject: [PATCH 07/28] Update "How ALVR works" wiki page (#1703) --- wiki/How-ALVR-works.md | 374 ++++++++++++++++++++++++++--------------- 1 file changed, 243 insertions(+), 131 deletions(-) diff --git a/wiki/How-ALVR-works.md b/wiki/How-ALVR-works.md index 1b2205251e..347bbbd65d 100644 --- a/wiki/How-ALVR-works.md +++ b/wiki/How-ALVR-works.md @@ -4,228 +4,340 @@ This document details some technologies used by ALVR. If you have any doubt about what is (or isn't) written in here you can contact @zarik5, preferably on Discord. -**Note: At the time of writing, not all features listed here are implemented** +This document was last updated on June 27th 2023 and refers to the master branch. + +## Table of contents + +* Architecture + * The packaged application + * Programming languages + * Source code organization +* Logging and error management + * The event system +* Session and settings + * Procedural generation of code and UI +* The dashboard + * The user interface + * Driver communication + * Driver lifecycle +* The streaming pipeline: Overview +* Client-driver communication + * Discovery + * Streaming +* SteamVR driver +* Client and driver compositors + * Foveated rendering + * Color correction +* Video transcoding +* Audio +* Tracking and display timing +* Other streams +* Upcoming + * Phase sync + * Sliced encoding ## Architecture -### The built application +### The packaged application -ALVR is made of two applications: the server and client. The server is installed on the PC and the client is installed on the headset. While the client is a single APK, the server is made of three parts: the launcher, the driver and the dashboard. The launcher (`ALVR Launcher.exe`) is the single executable found at the root of the server app installation. The driver is located in `bin/win64/` and named `driver_alvr_server.dll`. The dashboard is a collection of files located in `dashboard/`. +ALVR is made of two applications: the streamer and client. The streamer can be installed on Windows and Linux, while the client is installed on Android VR headsets. The client communicates with the driver through TCP or UDP sockets. -The launcher sets up the PC environment and then opens SteamVR, which loads the ALVR driver. The driver is responsible for loading the dashboard and connecting to the client. +The client is a single unified APK, named `alvr_client_android.apk`. It is powered by OpenXR and it is compatible with Quest headsets, recent Pico headsets and HTC Focus 3 and XR Elite. + +The streamer is made of two main parts: the dashboard and the driver (also known as server). The driver is dynamically loaded by SteamVR. This is the file structure on Windows: + +* `bin/win64/` + * `driver_alvr_server.dll`: The main binary, responsible for client discovery and streaming. Loaded by SteamVR. + * `driver_alvr_server.pdb`: Debugging symbols + * `openvr_api.dll`: OpenVR SDK used for updating the chaperone. + * `vcruntime140_1.dll`: Windows SDK used by C++ code in the driver. +* `ALVR Dashboad.exe`: Dashboard binary used to change settings, manage clients, monitor statistics and do installation actions. It can launch SteamVR. +* `driver.vrdrivermanifest`: Auxiliary config file used by the driver. + +At runtime, some other files are created: + +* `session.json`: This contains unified configuration data used by ALVR, such as settings and client records. +* `session_log.txt`: Main log file. Each line is a json structure and represents an event generated by the driver. This gets cleared each time a client connects. +* `crash_log.txt`: Auxiliary log file. Same as `session_log.txt`, except only error logs are saved, and does not get cleared. ### Programming languages -ALVR is written in multiple languages: Rust, C, C++, Java, HTML, Javascript, HLSL, GLSL. C++ is the most present language in the codebase but Rust is the language that plays the most important role, as it is used as glue and more and more code is getting rewritten in Rust. -Rust is a system programming language focused on memory safety and ease of use. It is as performant as C++ but code written on it is less likely to be affected by bugs at runtime. A feature of Rust that is extensively used by ALVR is enums, that correspond to tagged unions in C++. Rust's enums are a data type that can store different kinds of data, but only one type can be accessed at a time. For example the type `Result` can contain either an `Ok` value or an `Err` value but not both. Together with pattern matching, this is the foundation of error management in Rust applications. -C++ and Java code in ALVR is legacy code inherited by the developer @polygraphene; it is almost unmaintained and it is getting replaced by Rust. HTML and Javascript are used to write the dashboard. +ALVR is written in multiple languages: Rust, C, C++, HLSL, GLSL. The main language used in the codebase is Rust, which is used for the dashboard, networking, video decoding and audio code. C and C++ are used for graphics, video encoding and SteamVR integration. HLSL is used for graphics shaders on the Windows driver, GLSL is used on the Linux driver and the client. Moving forward, more code will be rewritten from C/C++ to Rust and HLSL code will be moved to GLSL or WGSL. + +Rust is a system programming language focused on memory safety and ease of use. It is as performant as C++ but Rust code is less likely to be affected by runtime bugs. The prime feature Rust feature used by ALVR is enums, that correspond to tagged unions in C++. Rust's enum is a data type that stores different kinds of data, but only one type can be accessed at a time. For example the type `Result` can contain either an `Ok` value or an `Err` value but not both. Together with pattern matching, this is the foundation of error management in Rust applications. ### Source code organization -* `alvr/`: This is where most of the code resides. Each subfolder is a Rust crate ("crate" means a code library or executable). - * `alvr/client/`: Crate that builds the client application. `alvr/client/android/` is the Android Studio project that builds the final APK. - * `alvr/common/`: Code shared by both client and server. It contains code for settings generation, networking, audio and logging. - * `alvr/launcher/`: This crate build the launcher executable. - * `alvr/server/`: This crate builds the driver DLL. `alvr/server/cpp/` contains the legacy code. - * `alvr/settings-schema/` and `alvr/settings-schema-derive/`: Utilities for settings code generation. - * `alvr/xtask/`: Build utilities. The code contained in this crate does not actually end up in the final ALVR applications. -* `server_release_template/`: Contains every file for ALVR server that does not require a build pass. This includes the dashboard. -* `wix/`: WIX project used to crate the ALVR installer on Windows. +ALVR code is hosted in a monorepo. This is an overview of the git tree: + +* `.github/`: Contains scripts used by the GitHub CI. +* `alvr/`: Each subfolder is a Rust crate ("crate" means a code library or executable). + * `audio/`: Utility crate hosting audio related code shared by client and driver. + * `client_core/`: Platform agnostic code for the client. It is used as a Rust library for `alvr_client_openxr` and can also compiled to a C ABI shared library with a .h header for integration with other projects. + * `client_mock/`: Client mock implemented as a thin wrapper around `alvr_client_core`. + * `client_openxr/`: Client implementation using OpenXR, compiled to a APK binary. + * `common/`: Some common code shared by other crates. It contains code for versioning, logging, struct primitives, and OpenXR paths. + * `dashboard/`: The dashboard application. + * `events/`: Utility crate hosting code related to events. + * `filesystem/`: Utility crate hosting code for filesystem abstraction between Windows and Linux. + * `packets/`: Utility crate containing packet definitions for communication between client, driver and dashboard. + * `server/`: The driver shared library loaded by SteamVR. + * `server_io/`: Common functionality shared by dashboard and driver, for interaction with the host system. This allows dashboard and driver to work independently from each other. + * `session/`: Utility crate related to session file and data management. + * `sockets/`: Utility crate shared by client and driver with socket and protocol implementation. + * `vrcompositor_wrapper/`: Small script used on Linux to correctly load the ALVR Vulkan layer by SteamVR. + * `vulkan_layer/`: Vulkan WSI layer used on Linux to work around limitations of the OpenVR API on Linux. This is mostly patchwork and hopefully will be removed in the future. + * `xtask/`: Utility CLI hosting a variety of scripts for environment setting, building, and packaging ALVR. Should be called with `cargo xtask`. +* `resources/`: resources for the README. +* `wiki/`: Contains the source for the Github ALVR wiki. Changes are mirrored to the actual wiki once committed. +* `about.toml`: Controls what dependency licenses are allowed in the codebase, and helps with generating the licenses file in the packaged ALVR streamer. +* `Cargo.lock`: Contains versioning information about Rust dependencies used by ALVR. +* `Cargo.toml`: Defines the list of Rust crates contained in the repository, and hosts some other workspace-level Rust configuration. ## Logging and error management -In ALVR codebase, logging is split into interface and implementation. The interface is defined in `alvr/common/src/logging.rs`, the implementations are defined in `alvr/server/src/logging_backend.rs` and `alvr/client/src/logging_backend.rs`. - -ALVR logging system is based on the crate [log](https://crates.io/crates/log). `log` is already very powerful on its own, since the macros `error!`, `warn!`, `info!`, `debug!` and `trace!` can collect messages, file and line number of the invocation. But I needed something more that can reduce boilerplate when doing error management (*Disclaimer: I know that there are tens of already established error management crates but I wanted to have something even more opinionated and custom fitted*). +Logging is split into interface and implementation. The interface is defined in `alvr/common/src/logging.rs`, the implementations are defined in each binary crate as `logging_backend.rs`. -ALVR defines some macros and functions to ease error management. The base type used for error management is `StrResult` that is an alias for `Result`. Read more about Rust's Result type [here](https://doc.rust-lang.org/std/result/). -`trace_err!` is a macro that takes as input a generic result and outputs and converts it into a `StrResult`. It does not support custom error messages and it should be used only to wrap `Result` types to convert them to `StrResult` when the result is actually not likely to return an error. This way we avoid calling `.unwrap()` that makes the program crash directly. In case of error, the `Err` type is converted to string and is prefixed with the current source code path and line number. -`trace_none!` works similarly to `trace_err!` but it accepts an `Option` as argument. `None` is mapped to `StrResult::Err()` with no converted error message (because there is none). -`fmt_e!` is a macro to create a `StrResult` from a hand specified error message. The result will be always `Err`. +ALVR logging system is based on the crate [log](https://crates.io/crates/log). `log` is already very powerful on its own, since its macros can collect messages, file and line number of the invocation. -When chaining `trace_err!` from one function to the other, a stack trace is formed. Unlike other error management crates, I can decide in which point in the stack to insert trace information to make error messages more concise. +ALVR defines some structures, macros and functions to ease error management. The base type used for error management is `StrResult` that is an alias for `Result`. Read more about Rust's Result type [here](https://doc.rust-lang.org/std/result/). -To show an error (if present) the function `show_err` is defined. It shows an error popup if supported by the OS (currently only on Windows) and the message is also forwarded to `error!`. -Other similar functions are defined: `show_e` shows an error unconditionally, `show_err_blocking` blocks the current thread until the popup is closed, `show_warn` opens a warning popup. More similar functions are in `alvr/common/src/logging.rs`. +There are many ways of logging in ALVR, each one for different use-cases. To make use of them you should add `use alvr_common::prelude::*` at the top of the Rust source file. -### The messaging system +* `error!()`, `warn!()`, `info!()`, `debug!()` (reexported macros from the `log` crate). Log is processed depending on the logging backend. +* `show_e()` and `show_w()` are used to log a string message, additionally showing a popup. +* `show_err()`, `show_warn()` work similarly to `show_e()` and `show_w()`, but they accept a `Result<>` and log only if the result is `Err()`. +* `fmt_e!()` adds tracing information to a message and produces a `Err()`, that can be returned. +* `err!()` and `enone!()` used respectively with .`.map_err()` and `.ok_or_else()`, to map a `Result` or `Option` to a `StrResult`, adding tracing information. +* Some other similarly named functions and macros with similar functionality -The communication between driver and the dashboard uses two methods. The dashboard can interrogate the server through an HTTP API. The server can notify the dashboard through logging. The server uses the function `log_id` to log a `LogId` instance (as JSON text). All log lines are sent to the dashboard though a websocket. The dashboard registers all log lines and searches for the log ID structures contained; the dashboard then reacts accordingly. -While log IDs can contain any (serializable) type of data, it is preferred to use them only as notifications. Any type of data needed by the dashboard that should be persistent is stored in the session structure (more on this later), and the dashboard can request it any time. +### The event system -## The launcher +Events are messages used internally in the driver and sent to dashboard instances. Events are generated with `send_event()` and is implemented on top of the logging system. -The launcher is the entry point for the server application. It first checks that SteamVR is installed and setup properly and then launches it. -The launcher requires `%LOCALAPPDATA%/openvr/` to contain a valid UTF-8 formatted file `openvrpaths.vrpath`. This file is crucial because it contains the path of the installation folder of SteamVR, the paths of the current registered drivers and the path of the Steam `config/` folder. +This is the layout of `Event`, in JSON form -### The bootstrap lifecycle +```json +{ + "timestamp": "", + "event_type": { + "id": "", + "content": { } + } +} +``` -1. The launcher is opened. First `openvrpaths.vrpath` is checked to exist and to be valid. -2. From `openvrpaths.vrpath`, the list of registered drivers is obtained. If the current instance of ALVR is registered do nothing. Otherwise stash all driver paths to a file `alvr_drivers_paths_backup.txt` in `%TEMP%` and register the current ALVR path. -3. SteamVR is killed and then launched using the URI `steam://rungameid/250820`. -4. The launcher tries to GET `http://127.0.0.1:8082` until success. -5. The launcher closes itself. -6. Once the driver loads, `alvr_drivers_paths_backup.txt` is restored into `openvrpaths.vrpath`. +Log is a special kind of event: -### Other launcher functions +```json +{ + "timestamp": "", + "event_type": { + "id": "Log", + "content": { + "severity": "Error or Warn or Info or Debug", + "content": "" + } + } +} +``` -The launcher has the button `Reset drivers and retry` that attempts to fix the current ALVR installation. It works as follows: +The driver logs events in JSON form to `session.json`, one per line. -1. SteamVR is killed. -2. `openvrpaths.vrpath` is deleted and ALVR add-on is unblocked (in `steam/config/steamvr.vrsettings`). -3. SteamVR is launched and then killed again after a timeout. This is done to recreate the file `openvrpaths.vrpath`. -4. The current ALVR path is registered and SteamVR is launched again. +Currently its use is limited, but eventually this will replace the current logging system, and logging will be built on top of the event system. The goal is to create a unified star-shaped network where each client and dashboard instance sends events to the server and the server broadcasts events to all other clients and dashboard instances. This should also unify the way the server communicates with clients and dashboards, making the dashboard just another client. -The launcher can also be launched in "restart" mode, that is headless (no window is visible). This is invoked by the driver to bootstrap a SteamVR restart (since the driver cannot restart itself since it is a DLL loaded by SteamVR). +## Session and settings -## Settings generation and data storage +ALVR uses a unified configuration file, that is `session.json`. It is generated the first time ALVR is launched. This file contains the following top-level fields: -A common programming paradigm is to have a strict separation between UI and background logic. This generally helps with maintainability, but for settings management this becomes a burden, because for each change of the settings structure on the backend the UI must be manually updated. ALVR solves by heavily relying on code generation. +* `"server_version"`: the current version of the streamer. It helps during a version upgrade. +* `"drivers_backup"`: temporary storage for SteamVR driver paths. Used by the dashboard. +* `"openvr_config"`: contains a list of settings that have been checked for a diff. It is used by C++ code inside the driver. +* `"client_connections"`: contains entries corresponding to known clients. +* `"session_settings"`: all ALVR settings, laid in a tree structure. -### Code generation on the backend (Rust) +### Procedural generation of code and UI -On ALVR, settings are defined in one and only place, that is `alvr/common/src/data/settings.rs`. Rust structures and enums are used to construct a tree-like representation of the settings. Structs and enums are decorated with the derive macro `SettingsSchema` that deals with the backend side of the code generation. -While the hand-defined structs and enums represent the concrete realization of a particular settings configuration, `SettingsSchema` generates two other settings representations, namely the schema and the "default" representation (aka session settings). -The schema representation defines the structure and metadata of the settings (not the concrete values). While arrangement and position of the fields is inferred by the definition itself of the structures, the fields can also be decorated with metadata like `advanced`, `min`/`max`/`step`, `gui` type, etc. that is needed by the user interface. -The second generated representation is the "default" representation. This representation has a dual purpose: it is used to define the default values of the settings (used in turn by the schema generation step) and to store the settings values on disk (`session.json`). -But why not use the original hand-defined structures to store the settings on disk? This is because enums (that are tagged unions) creates branching. -The branching is a desired behavior. Take the `Controllers` setting in the Headset tab as an example. If you uncheck it it means you *now* don't care about any other settings related to controllers. If we store this on disk using the original settings representation, all modifications to the settings related to the controllers are lost, but *then* you may want to recover these settings. -To solve this problem, the default/session representation transforms every enum into a struct, where every branch becomes a field, so every branch coexist at once, even unused ones. +ALVR lays out settings in a tree-like structure, in a way that the code itself can efficiently make use of. Settings can contain variants (in `session.json` are specified in PascalCase), which represent mutually exclusive options. -### Code generation on the frontend (Javascript) +ALVR uses the macro `SettingsSchema` in the `settings-schema` crate to generate auxiliary code, ie a schema and the "default representation" of the settings. This is a crate created specifically for ALVR but can be used for other projects too. -One of the main jobs of the dashboard is to let the user interact with settings. The dashboard gets the schema from the driver and uses it to generate the user interface. The schema has every kind of data that the UI needs except for translations which are defined in `server_release_template/dashboard/js/app/nls`. This is because this type of metadata would obscure the original settings definition if it was defined inline, due to the large amount of text. The schema is also used to interpret the session data loaded from the server. +The schema is made of nested `SchemaNode`s that contain metadata. Some of the metadata is specified directly inside inline attributes in structures and enums. -### The schema representation +The "default representation" (the type names are generated by concatenating the structure/enum name with `Default`), are structures that can hold settings in a way no not lose information about unselected variants; enums are converted to structs and variants that hold a value are converted to fields. The main goal of this is to meet the user expectation of not losing nested configuration when changing some options. The default representation is exactly what is saved inside `session.json` in `"session_settings"`. -While the original structs and enums that define settings are named, the schema representation loses the type names; it is based on a single base enum `SchemaNode` that can be nested. `SchemaNode` defines the following variants: +Info about the various types of schema nodes can be found [here](https://github.com/zarik5/settings-schema-rs). -* `Section`: This is translated from `struct`s and struct-like `enum` variants data. It contains a list of named fields, that can be set to `advanced`. In the UI it is represented by a collapsible group of settings controls. The top level section is treated specially and it generates the tabs (Video, Audio, etc). -* `Choice`: This is translated from `enums`. Each variant can have one or zero childs. In the UI this is represented by a stateful button group. Only the active branch content is displayed. -* `Switch`: This is generated by the special struct `Switch`. This node type is used when a settings make sense to be "turned off", and it also had some associated specialized settings only when in the "on" state. In the UI this is similar to `Section` but has also a checkbox. In the future this should be graphically changed to a switch. -* `Boolean`: translated from `bool`. -* `Integer`/`Float`: Translated from integer and floating point type. They accept the metadata `min`, `max`, `step`, `gui`. `gui` can be either `textBox`, `upDown` and `slider`. Only certain combinations of `min`/`max`/`step`/`gui` is valid. -* `Text`: Translated from `String`. In the UI this is a simple textbox. -* `Array`: Translated from rust arrays. In the UI this is represented similarly to `Section`s, with the index as the field name. In the future this should be changed to look more like a table. +The dashboard makes use of schema metadata and the default representation to generate the settings UI. The end result is that the settings UI layout closely matches the structures used internally in the code, and this helps understanding the inner workings of the code. -There are also currently unused node types: +When upgrading ALVR, the session might have a slightly different layout, usually some settings might have been added/removed/moved/renamed. ALVR is able to handle this by doing an extrapolation process: it starts from the default session, and replace values taken from the old session file with the help of the settings schema. -* `Optional`: This is translated from `Option`. Similarly to `Switch`, this is generated from an enum that has one variant with data and one that doesn't. The reason behind the distinction is about the intention/meaning of the setting. Optional settings can either be "set" or "default". "Default" does not mean that the setting is set to a fixed default value, it means that ALVR can dynamically decide the value or let some other independent source decide the value, that ALVR might not even be aware of. -* `Vector` and `Dictionary`: Translated from `Vec` and `Vec<(String, T)>` respectively. These types are unimplemented in the UI. They should represent a variable-sized collection of values. +## The dashboard -### The session +The dashboard is the main way of interacting with ALVR. Functionality is organized in tabs. -Settings (in the session settings representation) are stored inside `session.json`, together with other session data. The session structure is defined in `alvr/common/src/data/session.rs`. The session supports extrapolation, that is the recovery of data when the structure of `session.json` does not match the schema. This often happens during a server version update. The extrapolation is also used when the dashboard requests saving the settings, where the payload can be a preset, that is a deliberately truncated session file. +### The User Interface -## The connection lifecycle +These are the main components: -The code responsible for the connection lifecycle is located in `alvr/client/src/connection.rs` and `alvr/server/src/connection.rs`. +TODO: Add screenshots -The connection lifecycle can be divided into 3 steps: discovery, connection handshake and streaming. +* Sidebar: is used to select the tab for the main content page. +* Connections tab: used to trust clients or add them manually specifying the IP +* Statistics tab: shows graphs for latency and FPS and a summary page +* Settings tab: settings page split between `Presets` and `All Settings`. `All Settings` are procedurally generated from a schema. `Presets` are controls that modify other settings. +* Installation tab: utilities for installation: setting firewall rules, registering the driver, launching the setup wizard. +* Logs tab: shows logs and events in a table. +* Debug tab: debugging actions. +* About tab: information about ALVR. +* Lower sidebar button: can be either "Launch SteamVR" or "Restart SteamVR", depending on the driver connection status +* Notification bar: shows log in a non-obstructive way. -During multiple connection steps, the client behaves like a server and the server behaves like a client. This is because of the balance in responsibility of the two peers. The client becomes the portal though a PC, that can contain sensitive data. For this reason the server has to trust the client before initiating the connection. +### Driver communication -### Discovery +The dashboard communicates with the driver in order to update its information and save configuration. This is done through a HTTP API, with base URL `http://localhost:8082`. These are the endpoints: -ALVR discovery protocol has initial support for a cryptographic handshake but it is currently unused. +* `/api/dashboard-request`: This is the main URL used by the dashboard to send messages and data to the server. The body contains the specific type and body of the request. +* `/api/events`: This endpoint is upgraded to a websocket and is used for listening to events from the driver +* `/api/ping`: returns code 200 when the driver is alive. -When ALVR is launched for the first time on the headset, a hostname, certificate and secret are generated. The client then broadcasts its hostname, certificate and ALVR version (`ClientHandshakePacket`). The server has a looping task that listens for these packets and registers the client entry, saving hostname and certificate, if the client version is compatible. -If the client is visible and trusted on the server side, the connection handshake begins. +The dashboard retains some functionality when the driver is not launched. It can manage settings, clients and perform installation actions, but clients cannot be discovered. Once The driver is launched all these actions are performed by the server, requested with the HTTP API. This mechanism ensures that there are no data races. -### Connection handshake +### Driver lifecycle -The client listens for incoming TCP connections with the `ControlSocket` from the server. Once connected the client sends its headset specifications (`HeadsetInfoPacket`). The server then combines this data with the settings to create the configuration used for streaming (`ClientConfigPacket`) that is sent to the client. In particular, this last packet contains the dashboard URL, so the client can access the server dashboard. If this streaming configuration is found to invalidate the current ALVR OpenVR driver initialization settings (`OpenvrConfig` inside the session), SteamVR is restarted. -After this, if everything went right, the client discovery task is terminated, and after the server sends the control message `StartStream` the two peers are considered connected, but the procedure is not concluded. The next step is the setup of streams with `StreamSocket`. +The dashboard is able to launch and restart SteamVR, in order to manage the driver's lifecycle. -### Streaming +The driver launch procedure is as follows: -The streams created from `StreamSocket` (audio, video, tracking, etc) are encapsulated in async loops that are all awaited concurrently. One of these loops is the receiving end of the `ControlSocket`. -While streaming, the server only sends the control message `KeepAlive` periodically. The client can send `PlayspaceSync` (when the view is recentered), `RequestIDR` (in case of packet loss), and `KeepAlive`. +* The driver is registered according to the "Driver launch action" setting, if needed. By default, current SteamVR drivers are unregistered and backed up inside `session.json`. +* On Linux, the vrcompositor wrapper is installed if needed +* SteamVR is launched. -### Disconnection +Once the drivers shuts down, if there are backed up drivers, these are restored. -When the control sockets encounters an error while sending or receiving a packet (for example with `KeepAlive`) the connection pipeline is interrupted and all looping tasks are canceled. A destructor callback (guard) is then run for objects or tasks that do not directly support canceling. +The driver restart procedure is as follows: -## The streaming socket +* The dashboard notifies the driver that it should be restarted. +* The driver sends a request for restart to the dashboard. +* The driver asks SteamVR to shutdown, never unregistering drivers. +* The dashboard waits for SteamVR to shutdown, otherwise killing it after a timeout. +* The dashboard relaunches SteamVR. -`StreamSocket` is an abstraction layer over multiple network protocols. It currently supports UDP and TCP but it is designed to support also QUIC without a big API refactoring. `StreamSocket` API is inspired by the QUIC protocol, where multiple streams can be multiplexed on the same socket. +This might seem unnecessarily complicated. The reason for the first message round trip is to plug-in to the existing restarting system used by settings invalidation, which is invoked from the driver itself. The reason which the driver cannot be autonomous in restarting is because any auxiliary process spawned by the driver will block SteamVR shutdown or leave it in a zombie state. -Why not using one socket per stream? Regarding UDP, this does not have any particular advantage. The maximum transmission speed is still determined by the physical network controller and router. Regarding TCP, having multiple concurrent open sockets is even disadvantageous. TCP is a protocol that makes adjustments to the transmission speed depending on periodic network tests. Multiple TCP sockets can compete with each other for the available bandwidth, potentially resulting in unbalanced and unpredictable bandwidth between the sockets. Having one single multiplexed socket solves this by moving the bandwidth allocation problem to the application side. +## The streaming pipeline: Overview -### Packet layout +The goal of ALVR is to bridge input and output of a PCVR application to a remote headset. In order to do this ALVR implements pipelines to handle input, video and audio. The tracking-video pipeline (as known as the motion-to-photon pipeline) is the most complex one and it can be summarized in the following steps: -A packet is laid out as follows: +* Poll tracking data on the client +* Send tracking to the driver +* Execute the PCVR game logic and render layers +* Compose layers into a frame +* Encode the video frame +* Send the encoded video frame to the client through the network +* Decode the video frame on the client +* Perform more compositor transformations +* Submit the frame to the VR runtime +* The runtime renders the frame during a vsync. -| Stream ID | Packet index | Header | Raw buffer | -| :-------: | :----------: | :------: | :--------: | -| 1 byte | 8 bytes | variable | variable | +## Client-driver communication -The packet index is relative to a single stream. It is used to detect packet loss. -Both header and raw buffer can have variable size, even from one packet to the other in the same stream. The header is serialized and deserialized using [bincode](https://github.com/servo/bincode) and so the header size can be obtained deterministically. +ALVR uses a custom protocol for client-driver communication. ALVR supports UDP and TCP transports. USB connection is supported although not as a first class feature; you can read more about it [here](https://github.com/alvr-org/ALVR/wiki/ALVR-wired-setup-(ALVR-over-USB)). -### Throttling buffer +### Discovery -A throttling buffer is a traffic shaping tool to avoid packet bursts, that often lead to packet loss. +Usually the first step to establish a connection is discovery. When the server discovers a client it shows it in the "New clients" section in the Connection tab. The user can then trust the client and the connection is established. -If the throttling buffer is enabled, the packets are fragmented/recombined into buffers of a predefined size. The size should be set according to the supported MTU of the current network configuration, to avoid undetected packet fragmentation at the IP layer. +ALVR uses a UDP socket at 9943 for discovery. The client broadcasts a packet and waits for the driver to respond. It's the client that broadcasts and it's the driver that then asks for a connection: this is because of the balance in responsibility of the two peers. The client becomes the portal though a PC, that can contain sensitive data. For this reason the server has to trust the client before initiating the connection. -The current implementation is similar to the leaky bucket algorithm, but it uses some statistical machinery (`EventTiming` in fixed latency mode to 0) to dynamically determine the optimal time interval between packets such as the "bucket" does not overflow and the latency remains minimal. +This is the layout of the discovery packet -## Event timing +| Prefix | Protocol ID | Hostname | +| :---------------: | :---------: | :------: | +| "ALVR" + 0x0 x 12 | 8 bytes | 32 bytes | -`EventTiming` is a general purpose mathematical tool used to manage timing for cyclical processes. Some "enqueue" and "dequeue" events are registered and `EventTiming` outputs some timing hints to minimize the queuing time for the next events. +* The prefix is used to filter packets and ensure a packet is really sent by an ALVR client +* The protocol ID is a unique version identifier calculated from the semver version of the client. If the client version is *semver-compatible* with the streamer, the protocol ID will match. +* Hostname: the hostname is a unique identifier for a client. When a client is launched for the first time, an hostname is chosen and it persists for then successive launches. It is reset when the app is upgraded or downgraded. -Currently, `EventTiming` is used for the stream socket throttling buffer and audio implementations, but it will be also used for video frame timing (to reduce latency and jitter), total video latency estimation (to reduce the black pull and positional lag), controller timing and maybe also controller jitter. +The format of the packet can change between major versions, but the prefix must remain unchanged, and the protocol ID must be 8 bytes. -`EventTiming` supports two operation modes: fixed latency and automatic latency. +### Streaming -### Fixed latency mode +ALVR uses two sockets for streaming: the control socket and stream socket. Currently these are implemented with async code; there's a plan to move this back to sync code. -In fixed latency mode, `EventTiming` calculates the average latency between corresponding enqueue and dequeue events. +The control socket uses the TCP transport; it is used to exchange small messages between client and server, ALVR requires TCP to ensure reliability. -Todo +The stream socket can use UDP or TCP; it is used to send large packets and/or packets that do not require reliability, ALVR is robust to packet losses and packet reordering. -### Automatic latency mode +The specific packet format used over the network is not clearly defined since ALVR uses multiple abstraction layers to manipulate the data (bincode, tokio Length Delimited Coding). Furthermore, packets are broken up into shards to ensure they can support the MTU when using UDP. -Todo +Since the amount of data streamed is large, the socket buffer size is increased both on the driver side and on the client. -## Motion-to-photon pipeline +## SteamVR driver -Todo +The driver is the component responsible for most of the streamer functionality. It is implemented as a shared library loaded by SteamVR. It implements the [OpenVR API](https://github.com/ValveSoftware/openvr) in order to interface with SteamVR. -## Foveated encoding +Using the OpenVR API, ALVR pushes tracking and button data to SteamVR using `vr::VRServerDriverHost()->TrackedDevicePoseUpdated()`. SteamVR then returns a rendered game frame with associated pose used for rendering. On Windows, frames are retrieved implementing the `IVRDriverDirectModeComponent` interface: SteamVR calls `IVRDriverDirectModeComponent::Present()`. On Linux this API doesn't work, and so ALVR uses a WSI Vulkan layer to intercept display driver calls done by vrcompositor. The pose associated to the frame is obtained from the vrcompositor execution stack with the help of libunwind. -Foveated encoding is a technique where frame images are individually compressed in a way that the human eye barely detects the compression. Particularly, the center of the image is kept at original resolution, and the rest is compressed. In practice, first the frames are re-rendered on the server with the outskirts of the frame "squished". The image is then transmitted to the client and then it gets re-expanded by using an inverse procedure. +## Client and driver compositors -But why does this work? The human eye has increased acuity in the center of the field of vision (the fovea) with respect to the periphery. +ALVR is essentially a bridge between PC and headset that transmits tracking, audio and video. But it also implements some additional features to improve image quality and streaming performance. To this goal, ALVR implements Fixed Foveated Rendering (FFR) and color correction. -Foveated encoding should not be confused with foveated rendering, where the image is rendered to begin with at a lower resolution in certain spots. Foveated encoding will NOT lower your GPU usage, only the network usage. +The client compositor is implemented in OpenGL, while on the server it's either implemented with DirectX 11 on Windows or Vulkan on Linux. There are plans to move all compositor code to the graphics abstraction layer [wgpu](https://github.com/gfx-rs/wgpu), mainly for unifying the codebase. -Currently ALVR does not directly support foveated encoding in the strict sense, instead it uses *fixed* foveated encoding. In a traditional foveated encoding application, the eyes are tracked, so that only what is directly looked at is rendered at higher resolution. But currently none of the headset supported by ALVR support eye tracking. For this reason, ALVR does foveated encoding by pretending the user is looking straight at the center of the image, which most of time is true. +It's important to note that ALVR's compositors are separate from the headset runtime compositor and SteamVR compositors. The headset runtime compositor is part of the headset operative system and controls compositing between different apps and overlays, and prepares the image for display (with lens distortion correction, chroma aberration correction, mura and ghosting correction). On the driver side, on Windows ALVR takes responsibility for compositing layers returned by SteamVR. The only responsibility of SteamVR is converting the frame into a valid DXGI texture if the game uses OpenGL or Vulkan graphics. On Linux ALVR grabs Vulkan frames that are already composited by vrcompostor. This introduced additional challenges since vrcompositor implements async reprojection which disrupts our head tracking mechanism. -Here are explained three foveated encoding algorithms. +### Foveated encoding -### Warp +Foveated rendering is a technique where frame images are individually compressed in a way that the human eye barely detects the compression. Particularly, the center of the image is kept at original resolution, and the rest is compressed. ALVR refers to foveated rendering as "Foveated encoding" to clarify its scope. In native standalone or PCVR apps, foveated rendering reduces the load on the GPU by rendering parts of the image ar lower resolution. In ALVR's case frames are still rendered at full resolution, but are then "encoded" (compressing the outskirts of the image) before actually encoding and transmitting them. The image is then reexpanded on the client side after decoding and before display. -Developed by @zarik5. This algorithm applies an image compression that most adapts to the actual acuity graph of the human eye. It compresses the image radially (with an ellipse as the base) from a chosen spot in the image, with a chosen monotonic function. This algorithm makes heavy use of derivatives and inverse functions. It is implemented using a chain of shaders (shaders are a small piece of code that is run on the GPU for performance reasons). You can explore an interactive demo at [this link](https://www.shadertoy.com/view/3l2GRR). +Currently ALVR supports only fixed foveation, but support for tracked eye foveation is planned. -This algorithm is actually NOT used by ALVR. It used to be, but it got replaced by the "slices" method. The warp method has a fatal flaw: the pixel alignment is not respected. This causes resampling that makes the image look blurry. +In its history, ALVR implemented different algorithms for foveated encoding. The first one is "Warp", where the image is compressed in an elliptical pattern using the tangent function to define the compression ratio radially. A problem with algorithm is that it causes the image to become blurry. [Here](https://www.shadertoy.com/view/3l2GRR) is a demo of this algorithm in action. The second algorithm used was "Slices" where the image is sliced up into 9 sections (center, edges, corners), compressed to different degrees and the re-packed together into a single rectangular frame. The main issue with this algorithm was its complexity. You can find a demo [here](https://www.shadertoy.com/view/WddGz8). The current algorithm in use is reimplementation of Oculus AADT (Axis-Aligned Distorted Transfer), which simply compresses the lateral edges of the image horizontally and the vertical edged vertically. This algorithm has less compression power but it's much simpler and less taxing on the Quest's GPU. -### Slices +### Color correction -Developed by @zarik5. This is the current algorithm used by ALVR for foveated encoding. The frame is cut into 9 rectangles (with 2 vertical and 2 horizontal cuts). Each rectangle is rendered at a different compression level. The center rectangle is uncompressed, the top/bottom/left/right rectangle is compressed 2x, the corner rectangles are compressed 4x. These cuts are actually virtual (mathematical) cuts, that are executed all at once in a single shader pass. All slices are neatly packed to form a new rectangular image. You can explore an interactive demo at [this link](https://www.shadertoy.com/view/WddGz8). +Color correction is implemented on the server and adds simple brightness, contrast, saturation, gamma and sharpening controls. It's implemented on the server for performance reasons and to avoid amplifying image artifacts caused by transcoding. -This algorithm is much simpler than the warp method but it is still quite complex. The implementation takes into account pixel alignment and uses some margins in the rectangles to avoid color bleeding. Like the warp algorithm, the slices method was designed to support eye tracking support when it will be available in consumer hardware. +## Video transcoding -### Axis-Aligned Distorted Transfer (AADT) +To be able to send frames from driver to client through the network, they have to be compressed since current WiFi technology doesn't allow to send the amount of data of raw frames. Doing a quick conservative calculation, let's say we have 2 x 2048x2048 eye images, 3 color channels, 8 bits per channel, sent 72 times per second, that would amount to almost 15 Gbps. -This algorithm was developed by Oculus for the Oculus Link implementation. It is simpler than the other two methods, the end result looks better but it has less compression power. Like the slices algorithm, the image is cut into 9 rectangles where each rectangle is compressed independently. But actually the top and bottom rectangles are compressed only vertically, and the left and right only horizontally. This type of compression lends itself well to be used for images rendered in VR headsets, since it works in the same direction (and not against) the image distortion needed for lens distortion correction. +ALVR uses h264 and HEVC video codecs for compression. These codecs are chosen since they have hardware decoding support on Android and generally hardware encoding support on the PC side. On Windows, the driver uses NvEnc for Nvidia GPUs and AMF for AMD GPUS; on Linux ALVR supports VAAPI, NvEnc and AMF through FFmpeg. In case the GPU doesn't support hardware encoding, on both Windows and Linux ALVR supports software encoding with x264 (through FFmpeg), although the performance is often insufficient for a smooth experience. The client supports only MediaCodec, which is the API to access hardware video codecs on Android. -It is planned to replace the slices method with AADT in the future. +h264 and HEVC codecs compression works on the assumption that consecutive frames are similar to each other. Each frame is reconstructed from past frames + some small additional data. For this reason, packet losses may cause glitches that persist many frames after the missing frame. When ALVR detects packet losses, it requests a new IDR frame from the encoder. A IDR frame is a packet that contains all the information to build a whole frame by itself; the encoder will ensure that successive frames will not rely on older frames than the last requested IDR. ## Audio -Todo +Game audio is captured on the PC and sent to the client, and microphone audio is captured on the client and sent to the PC. Windows and Linux implementation once again differ. On Windows, game audio is captured from a loopback device; microphone is is sent to virtual audio cable software to expose audio data from a (virtual) input device. On Linux the microphone does not work out-of-the-box, but there is a bash script available for creating and plugging into pipewire audio devices. + +Unlike for video, audio is sent as a raw PCM waveform and new packets do not rely on old packets. But packet losses may still cause popping, which happens when there is a sudden jump in the waveform. To mitigate this, when ALVR detects a packet loss (or a buffer overflow or underflow) it will render a fadeout or cross-fade. + +## Tracking and display timing + +Handling head and controller tracking is tricky for VR applications, and even more for VR streaming applications. + +In a normal native VR application, tracking is polled at the beginning of the rendering cycle, it is used to render the eye views from a certain perspective and render the controller or hand models. When the game finished rendering the frame it submits it to the VR runtime which will display it on screen. From the time tracking is polled and the frame is displayed on screen, 1 or more frame durations may have passed (for example at 72fps the frame duration is 13ms). Our eyes are very sensitive to latency, especially for orientation, so VR runtimes implement image reprojection (Oculus calls it Asynchronous Time Warp). Reprojection works by rendering the frame rotated in 3D to compensate for the difference in orientation between the tracking pose polled at the beginning of the rendering cycle and the actual pose of the headset at the time of vsync when the image should be pushed to the display. To be able to correctly rotate the image, the runtime will also need to know the timestamp used for polling tracking, which can be the time of poll, or better, the predicted time of the vsync. If a time in the future is used for tracking poll, the polled tracking will be extrapolated. + +For VR streaming applications, the pipeline is similar, except that tracking is polled for a more distant point in the future, to compensate for the whole transcoding pipeline, and it's not trivial to decide on how much to predict in the future. ALVR calculates the prediction offset by reading how much time passes between the tracking poll time and the time a frame rendered with the same tracking is submitted. These interval samples are averaged and then used for future tracking polls. (To calculate the correct total latency you also need to add the VR runtime compositor latency, which in the dashboard latency graph is shown as "Client VSync"). + +On the streamer side, ALVR needs to workaround a OpenVR API limitation. SteamVR returns frames with its pose, but then ALVR is responsible of matching the pose with one of the poses submitted previously and re-match its timestamp. + +## Other streams + +There are some other kinds of data which can be streamed without requiring any special timing. These are button presses and haptics, respectively sent from client to driver and from driver to client. + +## Upcoming + +### Phase sync + +Phase sync is not a single algorithm but many that share similar objectives, reducing latency or jitter in the rendering/streaming pipeline. The term "phase sync" comes from Oculus, that describes its algorithm for reducing latency in its OpenXR runtime by starting the rendering cycle as late las possible to reduce waiting time before the vsync. + +In general, a phase sync algorithm is composed of two parts: a queue that holds data resources or pointers, and a statistical model to predict event times. The statistical model is fed with duration or other kinds of timing samples and as output it returns a refined time prediction for a recurring event. The statistical model could be simple and just aim for a average-controlled event, or more complex that aims for submitting for a deadline; the second case needs to take into account the variance of the timing samples. Unlike Oculus implementation, these statistical models can be highly configurable to tune the target mean or target variance. + +There are a few phase sync algorithms planned to be implemented: frame submission timing (to reduce frame queueing on the client, controlled by shifting the phase of the driver rendering cycle), SteamVR tracking submission timing (to make sure SteamVR is using exactly the tracking sample we want) and tracking poll timing (to reduce queuing on the server side). + +## Sliced encoding ---------------------------- -Document written by @zarik5 +Sliced encoding is another algorithm showcased by Oculus and it's about reducing latency by parallelizing work. In a simple streaming pipeline, frames are processed sequentially: rendering, then encoding, then transmission, then decoding. There is already some degree of parallelism, as rendering, encoding, transmission, and decoding can happen at the same time. Sliced encoding can help in reducing encoding and decoding time, as the frames are split into "slices". This allows for more efficient utilization of hardware encoders/decoders, or even use hardware and software codecs in parallel. It's crucial to note that network latency cannot be optimized. Given the constraint of network, sliced encoding can reduce waiting times between encoder/transmission and transmission/decoder as each encoded slice can be transmitted immediately and doesn't have to wait for the rest of the frame to be encoded (and a similar reasoning applies for the decoding side). From 6750c9eaf12012bdde94d76d1374e06c28c20df1 Mon Sep 17 00:00:00 2001 From: Charlie Le <20309750+CharlieQLe@users.noreply.github.com> Date: Mon, 10 Jul 2023 15:50:16 -0400 Subject: [PATCH 08/28] Update Flatpak docs (#1724) * Update wiki for Flatpak * Update Flatpak page and install guide --- wiki/Flatpak.md | 10 +++++++++- wiki/Installation-guide.md | 3 +++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/wiki/Flatpak.md b/wiki/Flatpak.md index 2aaaa32e47..f7d1c5a1ef 100644 --- a/wiki/Flatpak.md +++ b/wiki/Flatpak.md @@ -12,7 +12,7 @@ 5. Any scripts that affect the host will run within the sandbox -6. Sometimes, a new instance of Steam will launch when launching the dashboard. To fix this, close both ALVR and Steam then launch Steam. As soon as Steam is opens to the storefront, launch the ALVR dashboard. +6. Sometimes, a new instance of Steam will launch when launching the dashboard. To fix this, close both ALVR and Steam then launch Steam. As soon as Steam opens to the storefront, launch the ALVR dashboard. 7. The ALVR Dashboard is not available in the Applications menu. To run the dashboard, run the following command to run `alvr_dashboard` in the Steam Flatpak environment: @@ -20,6 +20,8 @@ flatpak run --command=alvr_dashboard com.valvesoftware.Steam ``` +8. This only works with the Steam Flatpak. For non-Flatpak Steam, use the AppImage instead + ## Dependencies First, flatpak must be installed from your distro's repositories. Refer to [this page](https://flatpak.org/setup/) to find the instructions for your distro. @@ -64,6 +66,12 @@ Once inside the repository, simply run the following command to build and instal flatpak run org.flatpak.Builder --user --install --force-clean .flatpak-build-dir alvr/xtask/flatpak/com.valvesoftware.Steam.Utility.alvr.json ``` +If ALVR is not cloned under the home directory, permission to access the directory may need to be given to the build command. An example of this is given below. + +``` +flatpak run --filesystem="$(pwd)" org.flatpak.Builder --user --install --force-clean .flatpak-build-dir alvr/xtask/flatpak/com.valvesoftware.Steam.Utility.alvr.json +``` + ## Notes ### Running the dashboard diff --git a/wiki/Installation-guide.md b/wiki/Installation-guide.md index 24a35b802f..b2c3d8554f 100644 --- a/wiki/Installation-guide.md +++ b/wiki/Installation-guide.md @@ -94,6 +94,9 @@ somewhere in your home directory (steam doesn't like long paths) #### AppImage You can get appimage for latest stable version from [here](https://github.com/alvr-org/ALVR/releases/latest). +#### Flatpak +For Flatpak users, refer to the instructions [here](https://github.com/alvr-org/ALVR/wiki/Flatpak) + #### Portable tar.gz * Install FFmpeg with VAAPI/NVENC + DRM + Vulkan + x264/x265 support. You can use this [ppa:savoury1/ffmpeg5](https://launchpad.net/~savoury1/+archive/ubuntu/ffmpeg5) under Ubuntu. From 972b5c37e8e00bc1dde52236fe5e030f6bb36b63 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Tue, 11 Jul 2023 14:53:11 +0800 Subject: [PATCH 09/28] Remove unused vulkan-sdk --- .github/workflows/rust.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 00c207ed84..795d3a0302 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -24,10 +24,6 @@ jobs: components: clippy - uses: Swatinem/rust-cache@v1 - - uses: crazy-max/ghaction-chocolatey@v1 - with: - args: install vulkan-sdk - - uses: ErichDonGubler/clippy-check@fix-windows-lf-breaking-reports with: token: ${{ secrets.GITHUB_TOKEN }} From 8cfb716bc2a992e2d899f9b6a8e79a8af344b939 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Tue, 11 Jul 2023 15:20:17 +0800 Subject: [PATCH 10/28] Progress on sync sockets (17) --- alvr/audio/src/lib.rs | 68 ++++++++++++++++----------- alvr/client_core/src/audio.rs | 68 +++++++++++++-------------- alvr/client_core/src/connection.rs | 42 +++++++++-------- alvr/server/src/connection.rs | 75 ++++++++++++++++-------------- 4 files changed, 135 insertions(+), 118 deletions(-) diff --git a/alvr/audio/src/lib.rs b/alvr/audio/src/lib.rs index 0bb3cbe86f..6ff2588ec6 100644 --- a/alvr/audio/src/lib.rs +++ b/alvr/audio/src/lib.rs @@ -1,4 +1,8 @@ -use alvr_common::{once_cell::sync::Lazy, parking_lot::Mutex, prelude::*}; +use alvr_common::{ + once_cell::sync::Lazy, + parking_lot::{Mutex, RwLock}, + prelude::*, +}; use alvr_session::{ AudioBufferingConfig, CustomAudioDeviceConfig, LinuxAudioBackend, MicrophoneDevicesConfig, }; @@ -12,8 +16,9 @@ use std::{ collections::{HashMap, VecDeque}, sync::{mpsc as smpsc, Arc}, thread, + time::Duration, }; -use tokio::sync::mpsc as tmpsc; +use tokio::{runtime::Runtime, sync::mpsc as tmpsc, time}; static VIRTUAL_MICROPHONE_PAIRS: Lazy> = Lazy::new(|| { [ @@ -441,7 +446,8 @@ pub fn get_next_frame_batch( // underflow, overflow, packet loss). In case the computation takes too much time, the audio // callback will gracefully handle an interruption, and the callback timing and sound wave // continuity will not be affected. -pub async fn receive_samples_loop( +pub fn receive_samples_loop( + runtime: &RwLock>, mut receiver: StreamReceiver<()>, sample_buffer: Arc>>, channels_count: usize, @@ -451,7 +457,22 @@ pub async fn receive_samples_loop( let mut receiver_buffer = ReceiverBuffer::new(); let mut recovery_sample_buffer = vec![]; loop { - receiver.recv_buffer(&mut receiver_buffer).await?; + if let Some(runtime) = &*runtime.read() { + let res = runtime.block_on(async { + tokio::select! { + res = receiver.recv_buffer(&mut receiver_buffer) => Some(res), + _ = time::sleep(Duration::from_millis(500)) => None, + } + }); + match res { + Some(Ok(())) => (), + Some(err_res) => return err_res.map_err(err!()), + None => continue, + } + } else { + return Ok(()); + } + let (_, packet) = receiver_buffer.get()?; let new_samples = packet @@ -585,7 +606,8 @@ impl Iterator for StreamingSource { } } -pub async fn play_audio_loop( +pub fn play_audio_loop( + runtime: &RwLock>, device: AudioDevice, channels_count: u16, sample_rate: u32, @@ -601,34 +623,28 @@ pub async fn play_audio_loop( let sample_buffer = Arc::new(Mutex::new(VecDeque::new())); - // Store the stream in a thread (because !Send) - let (_shutdown_notifier, shutdown_receiver) = smpsc::channel::<()>(); - thread::spawn({ - let sample_buffer = Arc::clone(&sample_buffer); - move || -> StrResult { - let (_stream, handle) = OutputStream::try_from_device(&device.inner).map_err(err!())?; - - let source = StreamingSource { - sample_buffer, - current_batch: vec![], - current_batch_cursor: 0, - channels_count: channels_count as _, - sample_rate, - batch_frames_count, - }; - handle.play_raw(source).map_err(err!())?; + let (_stream, handle) = OutputStream::try_from_device(&device.inner).map_err(err!())?; - shutdown_receiver.recv().ok(); - Ok(()) - } - }); + handle + .play_raw(StreamingSource { + sample_buffer: Arc::clone(&sample_buffer), + current_batch: vec![], + current_batch_cursor: 0, + channels_count: channels_count as _, + sample_rate, + batch_frames_count, + }) + .map_err(err!())?; receive_samples_loop( + runtime, receiver, sample_buffer, channels_count as _, batch_frames_count, average_buffer_frames_count, ) - .await + .ok(); + + Ok(()) } diff --git a/alvr/client_core/src/audio.rs b/alvr/client_core/src/audio.rs index 2589764350..60d34ef6a6 100644 --- a/alvr/client_core/src/audio.rs +++ b/alvr/client_core/src/audio.rs @@ -1,5 +1,8 @@ use alvr_audio::AudioDevice; -use alvr_common::{parking_lot::Mutex, prelude::*}; +use alvr_common::{ + parking_lot::{Mutex, RwLock}, + prelude::*, +}; use alvr_session::AudioBufferingConfig; use alvr_sockets::{StreamReceiver, StreamSender}; use oboe::{ @@ -13,7 +16,7 @@ use std::{ sync::{mpsc as smpsc, Arc}, thread, }; -use tokio::sync::mpsc as tmpsc; +use tokio::{runtime::Runtime, sync::mpsc as tmpsc}; struct RecorderCallback { sender: tmpsc::UnboundedSender>, @@ -115,7 +118,8 @@ impl AudioOutputCallback for PlayerCallback { } #[allow(unused_variables)] -pub async fn play_audio_loop( +pub fn play_audio_loop( + runtime: &RwLock>, device: AudioDevice, channels_count: u16, sample_rate: u32, @@ -134,45 +138,37 @@ pub async fn play_audio_loop( let sample_buffer = Arc::new(Mutex::new(VecDeque::new())); - // store the stream in a thread (because !Send) and extract the playback handle - let (_shutdown_notifier, shutdown_receiver) = smpsc::channel::<()>(); - thread::spawn({ - let sample_buffer = Arc::clone(&sample_buffer); - move || -> StrResult { - let mut stream = AudioStreamBuilder::default() - .set_shared() - .set_performance_mode(PerformanceMode::LowLatency) - .set_sample_rate(sample_rate as _) - .set_sample_rate_conversion_quality(SampleRateConversionQuality::Fastest) - .set_stereo() - .set_f32() - .set_frames_per_callback(batch_frames_count as _) - .set_output() - .set_usage(Usage::Game) - .set_callback(PlayerCallback { - sample_buffer, - batch_frames_count, - }) - .open_stream() - .map_err(err!())?; - - stream.start().map_err(err!())?; - - shutdown_receiver.recv().ok(); - - // Note: Oboe crahes if stream.stop() is NOT called on AudioPlayer - stream.stop_with_timeout(0).ok(); - - Ok(()) - } - }); + let mut stream = AudioStreamBuilder::default() + .set_shared() + .set_performance_mode(PerformanceMode::LowLatency) + .set_sample_rate(sample_rate as _) + .set_sample_rate_conversion_quality(SampleRateConversionQuality::Fastest) + .set_stereo() + .set_f32() + .set_frames_per_callback(batch_frames_count as _) + .set_output() + .set_usage(Usage::Game) + .set_callback(PlayerCallback { + sample_buffer: Arc::clone(&sample_buffer), + batch_frames_count, + }) + .open_stream() + .map_err(err!())?; + + stream.start().map_err(err!())?; alvr_audio::receive_samples_loop( + runtime, receiver, sample_buffer, 2, batch_frames_count, average_buffer_frames_count, ) - .await + .ok(); + + // Note: Oboe crahes if stream.stop() is NOT called on AudioPlayer + stream.stop_with_timeout(0).ok(); + + Ok(()) } diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 9fd2562455..8190e28462 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -301,27 +301,13 @@ fn connection_pipeline( config.options = settings.video.mediacodec_extra_options; } - let tracking_sender = stream_socket.request_stream(TRACKING); - let statistics_sender = stream_socket.request_stream(STATISTICS); let mut video_receiver = runtime.block_on(stream_socket.subscribe_to_stream::(VIDEO)); + let game_audio_receiver = runtime.block_on(stream_socket.subscribe_to_stream(AUDIO)); + let tracking_sender = stream_socket.request_stream(TRACKING); let mut haptics_receiver = runtime.block_on(stream_socket.subscribe_to_stream::(HAPTICS)); - - let game_audio_loop: BoxFuture<_> = if let Switch::Enabled(config) = settings.audio.game_audio { - let device = AudioDevice::new_output(None, None).map_err(to_int_e!())?; - - let game_audio_receiver = runtime.block_on(stream_socket.subscribe_to_stream(AUDIO)); - Box::pin(audio::play_audio_loop( - device, - 2, - game_audio_sample_rate, - config.buffering, - game_audio_receiver, - )) - } else { - Box::pin(future::pending()) - }; + let statistics_sender = stream_socket.request_stream(STATISTICS); let microphone_loop: BoxFuture<_> = if matches!(settings.audio.microphone, Switch::Enabled(_)) { let device = AudioDevice::new_input(None).map_err(to_int_e!())?; @@ -402,6 +388,23 @@ fn connection_pipeline( } }); + let game_audio_thread = if let Switch::Enabled(config) = settings.audio.game_audio { + let device = AudioDevice::new_output(None, None).map_err(to_int_e!())?; + + thread::spawn(move || { + alvr_common::show_err(audio::play_audio_loop( + &CONNECTION_RUNTIME, + device, + 2, + game_audio_sample_rate, + config.buffering, + game_audio_receiver, + )); + }) + } else { + thread::spawn(|| ()) + }; + let haptics_receive_thread = thread::spawn(move || loop { let haptics = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { let res = runtime.block_on(async { @@ -526,8 +529,6 @@ fn connection_pipeline( } }); - let receive_loop = async move { stream_socket.receive_loop().await }; - let lifecycle_check_thread = thread::spawn(|| { while IS_STREAMING.value() && IS_RESUMED.value() && IS_ALIVE.value() { thread::sleep(Duration::from_millis(500)); @@ -537,6 +538,7 @@ fn connection_pipeline( }); let res = CONNECTION_RUNTIME.read().as_ref().unwrap().block_on(async { + let receive_loop = async move { stream_socket.receive_loop().await }; // Run many tasks concurrently. Threading is managed by the runtime, for best performance. tokio::select! { res = spawn_cancelable(receive_loop) => { @@ -549,7 +551,6 @@ fn connection_pipeline( Ok(()) }, - res = spawn_cancelable(game_audio_loop) => res, res = spawn_cancelable(microphone_loop) => res, _ = DISCONNECT_SERVER_NOTIFIER.notified() => Ok(()), @@ -573,6 +574,7 @@ fn connection_pipeline( } video_receive_thread.join().ok(); + game_audio_thread.join().ok(); haptics_receive_thread.join().ok(); control_receive_thread.join().ok(); control_send_thread.join().ok(); diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index fdecbb9432..400233f3fa 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -556,14 +556,14 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { .map_err(to_int_e!())?; let stream_socket = Arc::new(stream_socket); + let mut video_sender = stream_socket.request_stream(VIDEO); + let microphone_receiver = runtime.block_on(stream_socket.subscribe_to_stream(AUDIO)); let mut tracking_receiver = runtime.block_on(stream_socket.subscribe_to_stream::(TRACKING)); + let haptics_sender = stream_socket.request_stream(HAPTICS); let mut statics_receiver = runtime.block_on(stream_socket.subscribe_to_stream::(STATISTICS)); - let mut video_sender = stream_socket.request_stream(VIDEO); - let haptics_sender = stream_socket.request_stream(HAPTICS); - let game_audio_loop: BoxFuture<_> = if let Switch::Enabled(config) = settings.audio.game_audio { let sender = stream_socket.request_stream(AUDIO); Box::pin(async move { @@ -622,38 +622,6 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { } else { Box::pin(future::pending()) }; - let microphone_loop: BoxFuture<_> = if let Switch::Enabled(config) = settings.audio.microphone { - #[allow(unused_variables)] - let (sink, source) = AudioDevice::new_virtual_microphone_pair( - Some(settings.audio.linux_backend), - config.devices, - ) - .map_err(to_int_e!())?; - let receiver = runtime.block_on(stream_socket.subscribe_to_stream(AUDIO)); - - #[cfg(windows)] - if let Ok(id) = alvr_audio::get_windows_device_id(&source) { - unsafe { - crate::SetOpenvrProperty( - *alvr_common::HEAD_ID, - crate::openvr_props::to_ffi_openvr_prop( - alvr_session::OpenvrPropertyKey::AudioDefaultRecordingDeviceId, - alvr_session::OpenvrPropValue::String(id), - ), - ) - } - } - - Box::pin(alvr_audio::play_audio_loop( - sink, - 1, - streaming_caps.microphone_sample_rate, - config.buffering, - receiver, - )) - } else { - Box::pin(future::pending()) - }; // Note: here we create CONNECTION_RUNTIME. The rest of the function MUST be infallible, as // CONNECTION_RUNTIME must be destroyed in the thread defined at the end of the function. @@ -681,6 +649,41 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { } }); + let microphone_thread = if let Switch::Enabled(config) = settings.audio.microphone { + #[allow(unused_variables)] + let (sink, source) = AudioDevice::new_virtual_microphone_pair( + Some(settings.audio.linux_backend), + config.devices, + ) + .map_err(to_int_e!())?; + + #[cfg(windows)] + if let Ok(id) = alvr_audio::get_windows_device_id(&source) { + unsafe { + crate::SetOpenvrProperty( + *alvr_common::HEAD_ID, + crate::openvr_props::to_ffi_openvr_prop( + alvr_session::OpenvrPropertyKey::AudioDefaultRecordingDeviceId, + alvr_session::OpenvrPropValue::String(id), + ), + ) + } + } + + thread::spawn(move || { + alvr_common::show_err(alvr_audio::play_audio_loop( + &CONNECTION_RUNTIME, + sink, + 1, + streaming_caps.microphone_sample_rate, + config.buffering, + microphone_receiver, + )); + }) + } else { + thread::spawn(|| ()) + }; + let tracking_manager = Arc::new(Mutex::new(TrackingManager::new())); let tracking_receive_thread = thread::spawn({ @@ -1084,7 +1087,6 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { // Spawn new tasks and let the runtime manage threading res = spawn_cancelable(game_audio_loop) => res, - res = spawn_cancelable(microphone_loop) => res, _ = RESTART_NOTIFIER.notified() => { control_sender @@ -1130,6 +1132,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { // ensure shutdown of threads video_send_thread.join().ok(); + microphone_thread.join().ok(); tracking_receive_thread.join().ok(); statistics_thread.join().ok(); control_thread.join().ok(); From 72f27e4fe4545d06c4ff101c8baa04930650a89d Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Tue, 11 Jul 2023 16:06:35 +0800 Subject: [PATCH 11/28] Move windows audio code to a separate module --- alvr/audio/src/lib.rs | 95 +++------------------------------------ alvr/audio/src/windows.rs | 87 +++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 88 deletions(-) create mode 100644 alvr/audio/src/windows.rs diff --git a/alvr/audio/src/lib.rs b/alvr/audio/src/lib.rs index 6ff2588ec6..01873f416a 100644 --- a/alvr/audio/src/lib.rs +++ b/alvr/audio/src/lib.rs @@ -1,3 +1,9 @@ +#[cfg(windows)] +mod windows; + +#[cfg(windows)] +pub use crate::windows::*; + use alvr_common::{ once_cell::sync::Lazy, parking_lot::{Mutex, RwLock}, @@ -206,93 +212,6 @@ pub fn is_same_device(device1: &AudioDevice, device2: &AudioDevice) -> bool { } } -#[cfg(windows)] -fn get_windows_device(device: &AudioDevice) -> StrResult { - use widestring::U16CStr; - use windows::Win32::{ - Devices::FunctionDiscovery::PKEY_Device_FriendlyName, - Media::Audio::{eAll, IMMDeviceEnumerator, MMDeviceEnumerator, DEVICE_STATE_ACTIVE}, - System::Com::{self, CLSCTX_ALL, COINIT_MULTITHREADED, STGM_READ}, - }; - - let device_name = device.inner.name().map_err(err!())?; - - unsafe { - // This will fail the second time is called, ignore the error - Com::CoInitializeEx(None, COINIT_MULTITHREADED).ok(); - - let imm_device_enumerator: IMMDeviceEnumerator = - Com::CoCreateInstance(&MMDeviceEnumerator, None, CLSCTX_ALL).map_err(err!())?; - - let imm_device_collection = imm_device_enumerator - .EnumAudioEndpoints(eAll, DEVICE_STATE_ACTIVE) - .map_err(err!())?; - - let count = imm_device_collection.GetCount().map_err(err!())?; - - for i in 0..count { - let imm_device = imm_device_collection.Item(i).map_err(err!())?; - - let property_store = imm_device.OpenPropertyStore(STGM_READ).map_err(err!())?; - - let mut prop_variant = property_store - .GetValue(&PKEY_Device_FriendlyName) - .map_err(err!())?; - let utf16_name = - U16CStr::from_ptr_str(prop_variant.Anonymous.Anonymous.Anonymous.pwszVal.0); - Com::StructuredStorage::PropVariantClear(&mut prop_variant).map_err(err!())?; - - let imm_device_name = utf16_name.to_string().map_err(err!())?; - if imm_device_name == device_name { - return Ok(imm_device); - } - } - - fmt_e!("No device found with specified name") - } -} - -#[cfg(windows)] -pub fn get_windows_device_id(device: &AudioDevice) -> StrResult { - use widestring::U16CStr; - use windows::Win32::System::Com; - - unsafe { - let imm_device = get_windows_device(device)?; - - let id_str_ptr = imm_device.GetId().map_err(err!())?; - let id_str = U16CStr::from_ptr_str(id_str_ptr.0) - .to_string() - .map_err(err!())?; - Com::CoTaskMemFree(Some(id_str_ptr.0 as _)); - - Ok(id_str) - } -} - -// device must be an output device -#[cfg(windows)] -fn set_mute_windows_device(device: &AudioDevice, mute: bool) -> StrResult { - use windows::{ - core::GUID, - Win32::{Media::Audio::Endpoints::IAudioEndpointVolume, System::Com::CLSCTX_ALL}, - }; - - unsafe { - let imm_device = get_windows_device(device)?; - - let endpoint_volume = imm_device - .Activate::(CLSCTX_ALL, None) - .map_err(err!())?; - - endpoint_volume - .SetMute(mute, &GUID::zeroed()) - .map_err(err!())?; - } - - Ok(()) -} - #[cfg_attr(not(windows), allow(unused_variables))] pub async fn record_audio_loop( device: AudioDevice, @@ -330,7 +249,7 @@ pub async fn record_audio_loop( move || { #[cfg(windows)] if mute && device.is_output { - set_mute_windows_device(&device, true).ok(); + crate::windows::set_mute_windows_device(&device, true).ok(); } let stream = device diff --git a/alvr/audio/src/windows.rs b/alvr/audio/src/windows.rs new file mode 100644 index 0000000000..452b84f42b --- /dev/null +++ b/alvr/audio/src/windows.rs @@ -0,0 +1,87 @@ +use crate::AudioDevice; +use alvr_common::prelude::*; +use rodio::DeviceTrait; + +fn get_windows_device(device: &AudioDevice) -> StrResult { + use widestring::U16CStr; + use windows::Win32::{ + Devices::FunctionDiscovery::PKEY_Device_FriendlyName, + Media::Audio::{eAll, IMMDeviceEnumerator, MMDeviceEnumerator, DEVICE_STATE_ACTIVE}, + System::Com::{self, CLSCTX_ALL, COINIT_MULTITHREADED, STGM_READ}, + }; + + let device_name = device.inner.name().map_err(err!())?; + + unsafe { + // This will fail the second time is called, ignore the error + Com::CoInitializeEx(None, COINIT_MULTITHREADED).ok(); + + let imm_device_enumerator: IMMDeviceEnumerator = + Com::CoCreateInstance(&MMDeviceEnumerator, None, CLSCTX_ALL).map_err(err!())?; + + let imm_device_collection = imm_device_enumerator + .EnumAudioEndpoints(eAll, DEVICE_STATE_ACTIVE) + .map_err(err!())?; + + let count = imm_device_collection.GetCount().map_err(err!())?; + + for i in 0..count { + let imm_device = imm_device_collection.Item(i).map_err(err!())?; + + let property_store = imm_device.OpenPropertyStore(STGM_READ).map_err(err!())?; + + let mut prop_variant = property_store + .GetValue(&PKEY_Device_FriendlyName) + .map_err(err!())?; + let utf16_name = + U16CStr::from_ptr_str(prop_variant.Anonymous.Anonymous.Anonymous.pwszVal.0); + Com::StructuredStorage::PropVariantClear(&mut prop_variant).map_err(err!())?; + + let imm_device_name = utf16_name.to_string().map_err(err!())?; + if imm_device_name == device_name { + return Ok(imm_device); + } + } + + fmt_e!("No device found with specified name") + } +} + +pub fn get_windows_device_id(device: &AudioDevice) -> StrResult { + use widestring::U16CStr; + use windows::Win32::System::Com; + + unsafe { + let imm_device = get_windows_device(device)?; + + let id_str_ptr = imm_device.GetId().map_err(err!())?; + let id_str = U16CStr::from_ptr_str(id_str_ptr.0) + .to_string() + .map_err(err!())?; + Com::CoTaskMemFree(Some(id_str_ptr.0 as _)); + + Ok(id_str) + } +} + +// device must be an output device +pub fn set_mute_windows_device(device: &AudioDevice, mute: bool) -> StrResult { + use windows::{ + core::GUID, + Win32::{Media::Audio::Endpoints::IAudioEndpointVolume, System::Com::CLSCTX_ALL}, + }; + + unsafe { + let imm_device = get_windows_device(device)?; + + let endpoint_volume = imm_device + .Activate::(CLSCTX_ALL, None) + .map_err(err!())?; + + endpoint_volume + .SetMute(mute, &GUID::zeroed()) + .map_err(err!())?; + } + + Ok(()) +} From 915f961f0cac78a17dadafb5f4f288327de2f82d Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Wed, 12 Jul 2023 10:54:19 +0800 Subject: [PATCH 12/28] Progress on sync sockets (18) --- alvr/audio/src/lib.rs | 164 ++++++++++++++--------------- alvr/client_core/src/audio.rs | 100 +++++++++--------- alvr/client_core/src/connection.rs | 55 +++++----- alvr/server/src/connection.rs | 105 +++++++++--------- 4 files changed, 211 insertions(+), 213 deletions(-) diff --git a/alvr/audio/src/lib.rs b/alvr/audio/src/lib.rs index 01873f416a..9b00e3bdca 100644 --- a/alvr/audio/src/lib.rs +++ b/alvr/audio/src/lib.rs @@ -20,11 +20,11 @@ use cpal::{ use rodio::{OutputStream, Source}; use std::{ collections::{HashMap, VecDeque}, - sync::{mpsc as smpsc, Arc}, + sync::Arc, thread, time::Duration, }; -use tokio::{runtime::Runtime, sync::mpsc as tmpsc, time}; +use tokio::{runtime::Runtime, time}; static VIRTUAL_MICROPHONE_PAIRS: Lazy> = Lazy::new(|| { [ @@ -212,12 +212,20 @@ pub fn is_same_device(device1: &AudioDevice, device2: &AudioDevice) -> bool { } } -#[cfg_attr(not(windows), allow(unused_variables))] -pub async fn record_audio_loop( - device: AudioDevice, +#[derive(Clone)] +pub enum AudioRecordState { + Recording, + ShouldStop, + Err(String), +} + +#[allow(unused_variables)] +pub fn record_audio_blocking( + runtime: Arc>>, + mut sender: StreamSender<()>, + device: &AudioDevice, channels_count: u16, mute: bool, - mut sender: StreamSender<()>, ) -> StrResult { let config = device .inner @@ -240,95 +248,81 @@ pub async fn record_audio_loop( buffer_size: BufferSize::Default, }; - // data_sender/receiver is the bridge between tokio and std thread - let (data_sender, mut data_receiver) = tmpsc::unbounded_channel::>>(); - let (_shutdown_notifier, shutdown_receiver) = smpsc::channel::<()>(); - - let thread_callback = { - let data_sender = data_sender.clone(); - move || { - #[cfg(windows)] - if mute && device.is_output { - crate::windows::set_mute_windows_device(&device, true).ok(); - } - - let stream = device - .inner - .build_input_stream_raw( - &stream_config, - config.sample_format(), - { - let data_sender = data_sender.clone(); - move |data, _| { - let data = if config.sample_format() == SampleFormat::F32 { - data.bytes() - .chunks_exact(4) - .flat_map(|b| { - f32::from_ne_bytes([b[0], b[1], b[2], b[3]]) - .to_sample::() - .to_ne_bytes() - .to_vec() - }) - .collect() - } else { - data.bytes().to_vec() - }; - - let data = if config.channels() == 1 && channels_count == 2 { - data.chunks_exact(2) - .flat_map(|c| vec![c[0], c[1], c[0], c[1]]) - .collect() - } else if config.channels() == 2 && channels_count == 1 { - data.chunks_exact(4) - .flat_map(|c| vec![c[0], c[1]]) - .collect() - } else { - data - }; - - data_sender.send(Ok(data)).ok(); - } - }, - { - let data_sender = data_sender.clone(); - move |e| { - data_sender - .send(fmt_e!("Error while recording audio: {e}")) - .ok(); - } - }, - None, - ) - .map_err(err!())?; + let state = Arc::new(Mutex::new(AudioRecordState::Recording)); - stream.play().map_err(err!())?; + let stream = device + .inner + .build_input_stream_raw( + &stream_config, + config.sample_format(), + { + let state = Arc::clone(&state); + let runtime = Arc::clone(&runtime); + move |data, _| { + let data = if config.sample_format() == SampleFormat::F32 { + data.bytes() + .chunks_exact(4) + .flat_map(|b| { + f32::from_ne_bytes([b[0], b[1], b[2], b[3]]) + .to_sample::() + .to_ne_bytes() + .to_vec() + }) + .collect() + } else { + data.bytes().to_vec() + }; + + let data = if config.channels() == 1 && channels_count == 2 { + data.chunks_exact(2) + .flat_map(|c| vec![c[0], c[1], c[0], c[1]]) + .collect() + } else if config.channels() == 2 && channels_count == 1 { + data.chunks_exact(4) + .flat_map(|c| vec![c[0], c[1]]) + .collect() + } else { + data + }; + + if let Some(runtime) = &*runtime.read() { + runtime.block_on(sender.send(&(), data)).ok(); + } else { + *state.lock() = AudioRecordState::ShouldStop; + } + } + }, + { + let state = Arc::clone(&state); + move |e| *state.lock() = AudioRecordState::Err(e.to_string()) + }, + None, + ) + .map_err(err!())?; - shutdown_receiver.recv().ok(); + #[cfg(windows)] + if mute && device.is_output { + crate::windows::set_mute_windows_device(&device, true).ok(); + } - #[cfg(windows)] - if mute && device.is_output { - set_mute_windows_device(&device, false).ok(); - } + let mut res = stream.play().map_err(err!()); - Ok(vec![]) + if res.is_ok() { + while matches!(*state.lock(), AudioRecordState::Recording) && runtime.read().is_some() { + thread::sleep(Duration::from_millis(500)) } - }; - // use a std thread to store the stream object. The stream object must be destroyed on the same - // thread of creation. - thread::spawn(move || { - let res = thread_callback(); - if res.is_err() { - data_sender.send(res).ok(); + if let AudioRecordState::Err(e) = state.lock().clone() { + res = Err(e); } - }); + } - // todo: reuse buffers also in the audio callback - while let Some(maybe_data) = data_receiver.recv().await { - sender.send(&(), maybe_data?).await.ok(); + #[cfg(windows)] + if mute && device.is_output { + set_mute_windows_device(device, false).ok(); } - Ok(()) + res } // Audio callback. This is designed to be as less complex as possible. Still, when needed, this diff --git a/alvr/client_core/src/audio.rs b/alvr/client_core/src/audio.rs index 60d34ef6a6..1f697aa56a 100644 --- a/alvr/client_core/src/audio.rs +++ b/alvr/client_core/src/audio.rs @@ -1,4 +1,4 @@ -use alvr_audio::AudioDevice; +use alvr_audio::{AudioDevice, AudioRecordState}; use alvr_common::{ parking_lot::{Mutex, RwLock}, prelude::*, @@ -10,16 +10,13 @@ use oboe::{ AudioStream, AudioStreamBuilder, DataCallbackResult, InputPreset, Mono, PerformanceMode, SampleRateConversionQuality, Stereo, Usage, }; -use std::{ - collections::VecDeque, - mem, - sync::{mpsc as smpsc, Arc}, - thread, -}; -use tokio::{runtime::Runtime, sync::mpsc as tmpsc}; +use std::{collections::VecDeque, mem, sync::Arc, thread, time::Duration}; +use tokio::runtime::Runtime; struct RecorderCallback { - sender: tmpsc::UnboundedSender>, + runtime: Arc>>, + sender: StreamSender<()>, + state: Arc>, } impl AudioInputCallback for RecorderCallback { @@ -36,56 +33,65 @@ impl AudioInputCallback for RecorderCallback { sample_buffer.extend(&frame.to_ne_bytes()); } - self.sender.send(sample_buffer).ok(); + if let Some(runtime) = &*self.runtime.read() { + runtime.block_on(self.sender.send(&(), sample_buffer)).ok(); - DataCallbackResult::Continue + DataCallbackResult::Continue + } else { + *self.state.lock() = AudioRecordState::ShouldStop; + + DataCallbackResult::Stop + } + } + + fn on_error_before_close(&mut self, _: &mut dyn AudioInputStreamSafe, error: oboe::Error) { + *self.state.lock() = AudioRecordState::Err(error.to_string()); } } #[allow(unused_variables)] -pub async fn record_audio_loop( - device: AudioDevice, +pub fn record_audio_blocking( + runtime: Arc>>, + sender: StreamSender<()>, + device: &AudioDevice, channels_count: u16, mute: bool, - mut sender: StreamSender<()>, ) -> StrResult { let sample_rate = device.input_sample_rate()?; - let (_shutdown_notifier, shutdown_receiver) = smpsc::channel::<()>(); - let (data_sender, mut data_receiver) = tmpsc::unbounded_channel(); - - thread::spawn(move || -> StrResult { - let mut stream = AudioStreamBuilder::default() - .set_shared() - .set_performance_mode(PerformanceMode::LowLatency) - .set_sample_rate(sample_rate as _) - .set_sample_rate_conversion_quality(SampleRateConversionQuality::Fastest) - .set_mono() - .set_i16() - .set_input() - .set_usage(Usage::VoiceCommunication) - .set_input_preset(InputPreset::VoiceCommunication) - .set_callback(RecorderCallback { - sender: data_sender, - }) - .open_stream() - .map_err(err!())?; - - stream.start().map_err(err!())?; - - shutdown_receiver.recv().ok(); - - // This call gets stuck if the headset goes to sleep, but finishes when the headset wakes up - stream.stop_with_timeout(0).ok(); - - Ok(()) - }); - - while let Some(data) = data_receiver.recv().await { - sender.send(&(), data).await.ok(); + let state = Arc::new(Mutex::new(AudioRecordState::Recording)); + + let mut stream = AudioStreamBuilder::default() + .set_shared() + .set_performance_mode(PerformanceMode::LowLatency) + .set_sample_rate(sample_rate as _) + .set_sample_rate_conversion_quality(SampleRateConversionQuality::Fastest) + .set_mono() + .set_i16() + .set_input() + .set_usage(Usage::VoiceCommunication) + .set_input_preset(InputPreset::VoiceCommunication) + .set_callback(RecorderCallback { + runtime: Arc::clone(&runtime), + sender, + state: Arc::clone(&state), + }) + .open_stream() + .map_err(err!())?; + + let mut res = stream.start().map_err(err!()); + + if res.is_ok() { + while matches!(*state.lock(), AudioRecordState::Recording) && runtime.read().is_some() { + thread::sleep(Duration::from_millis(500)) + } + + if let AudioRecordState::Err(e) = state.lock().clone() { + res = Err(e); + } } - Ok(()) + res } struct PlayerCallback { diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 8190e28462..32d3acf511 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -26,11 +26,9 @@ use alvr_sockets::{ spawn_cancelable, PeerType, ProtoControlSocket, ReceiverBuffer, StreamSender, StreamSocketBuilder, }; -use futures::future::BoxFuture; use serde_json as json; use std::{ collections::HashMap, - future, sync::{mpsc, Arc}, thread, time::{Duration, Instant}, @@ -65,7 +63,8 @@ const CONNECTION_RETRY_INTERVAL: Duration = Duration::from_secs(1); static DISCONNECT_SERVER_NOTIFIER: Lazy = Lazy::new(Notify::new); -pub static CONNECTION_RUNTIME: Lazy>> = Lazy::new(|| RwLock::new(None)); +pub static CONNECTION_RUNTIME: Lazy>>> = + Lazy::new(|| Arc::new(RwLock::new(None))); pub static TRACKING_SENDER: Lazy>>> = Lazy::new(|| Mutex::new(None)); pub static STATISTICS_SENDER: Lazy>>> = @@ -309,20 +308,6 @@ fn connection_pipeline( runtime.block_on(stream_socket.subscribe_to_stream::(HAPTICS)); let statistics_sender = stream_socket.request_stream(STATISTICS); - let microphone_loop: BoxFuture<_> = if matches!(settings.audio.microphone, Switch::Enabled(_)) { - let device = AudioDevice::new_input(None).map_err(to_int_e!())?; - - let microphone_sender = stream_socket.request_stream(AUDIO); - Box::pin(audio::record_audio_loop( - device, - 1, - false, - microphone_sender, - )) - } else { - Box::pin(future::pending()) - }; - // Important: To make sure this is successfully unset when stopping streaming, the rest of the // function MUST be infallible IS_STREAMING.set(true); @@ -405,6 +390,31 @@ fn connection_pipeline( thread::spawn(|| ()) }; + let microphone_thread = if matches!(settings.audio.microphone, Switch::Enabled(_)) { + let device = AudioDevice::new_input(None).map_err(to_int_e!())?; + + let microphone_sender = stream_socket.request_stream(AUDIO); + + thread::spawn(move || loop { + match audio::record_audio_blocking( + Arc::clone(&CONNECTION_RUNTIME), + microphone_sender.clone(), + &device, + 1, + false, + ) { + Ok(()) => break, + Err(e) => { + error!("Audio record error: {e}"); + + continue; + } + } + }) + } else { + thread::spawn(|| ()) + }; + let haptics_receive_thread = thread::spawn(move || loop { let haptics = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { let res = runtime.block_on(async { @@ -537,7 +547,7 @@ fn connection_pipeline( DISCONNECT_SERVER_NOTIFIER.notify_waiters(); }); - let res = CONNECTION_RUNTIME.read().as_ref().unwrap().block_on(async { + CONNECTION_RUNTIME.read().as_ref().unwrap().block_on(async { let receive_loop = async move { stream_socket.receive_loop().await }; // Run many tasks concurrently. Threading is managed by the runtime, for best performance. tokio::select! { @@ -548,12 +558,8 @@ fn connection_pipeline( set_hud_message( SERVER_DISCONNECTED_MESSAGE ); - - Ok(()) }, - res = spawn_cancelable(microphone_loop) => res, - - _ = DISCONNECT_SERVER_NOTIFIER.notified() => Ok(()), + _ = DISCONNECT_SERVER_NOTIFIER.notified() => (), } }); @@ -575,11 +581,12 @@ fn connection_pipeline( video_receive_thread.join().ok(); game_audio_thread.join().ok(); + microphone_thread.join().ok(); haptics_receive_thread.join().ok(); control_receive_thread.join().ok(); control_send_thread.join().ok(); keepalive_sender_thread.join().ok(); lifecycle_check_thread.join().ok(); - res.map_err(to_int_e!()) + Ok(()) } diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index 400233f3fa..d8f561e548 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -27,13 +27,10 @@ use alvr_packets::{ }; use alvr_session::{CodecType, ConnectionState, ControllersEmulationMode, FrameSize, OpenvrConfig}; use alvr_sockets::{ - spawn_cancelable, PeerType, ProtoControlSocket, StreamSender, StreamSocketBuilder, - KEEPALIVE_INTERVAL, + PeerType, ProtoControlSocket, StreamSender, StreamSocketBuilder, KEEPALIVE_INTERVAL, }; -use futures::future::BoxFuture; use std::{ collections::HashMap, - future, io::Write, net::IpAddr, process::Command, @@ -52,7 +49,8 @@ const RETRY_CONNECT_MIN_INTERVAL: Duration = Duration::from_secs(1); pub static SHOULD_CONNECT_TO_CLIENTS: Lazy> = Lazy::new(|| Arc::new(RelaxedAtomic::new(false))); -static CONNECTION_RUNTIME: Lazy>> = Lazy::new(|| RwLock::new(None)); +static CONNECTION_RUNTIME: Lazy>>> = + Lazy::new(|| Arc::new(RwLock::new(None))); static VIDEO_CHANNEL_SENDER: Lazy>>> = Lazy::new(|| Mutex::new(None)); static HAPTICS_SENDER: Lazy>>> = Lazy::new(|| Mutex::new(None)); @@ -557,6 +555,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let stream_socket = Arc::new(stream_socket); let mut video_sender = stream_socket.request_stream(VIDEO); + let game_audio_sender = stream_socket.request_stream(AUDIO); let microphone_receiver = runtime.block_on(stream_socket.subscribe_to_stream(AUDIO)); let mut tracking_receiver = runtime.block_on(stream_socket.subscribe_to_stream::(TRACKING)); @@ -564,10 +563,35 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let mut statics_receiver = runtime.block_on(stream_socket.subscribe_to_stream::(STATISTICS)); - let game_audio_loop: BoxFuture<_> = if let Switch::Enabled(config) = settings.audio.game_audio { - let sender = stream_socket.request_stream(AUDIO); - Box::pin(async move { - loop { + // Note: here we create CONNECTION_RUNTIME. The rest of the function MUST be infallible, as + // CONNECTION_RUNTIME must be destroyed in the thread defined at the end of the function. + // Failure to respect this might leave a lingering runtime. + *CONNECTION_RUNTIME.write() = Some(runtime); + + let (video_channel_sender, video_channel_receiver) = + std::sync::mpsc::sync_channel(settings.connection.max_queued_server_video_frames); + *VIDEO_CHANNEL_SENDER.lock() = Some(video_channel_sender); + *HAPTICS_SENDER.lock() = Some(haptics_sender); + + let video_send_thread = thread::spawn(move || loop { + let VideoPacket { header, payload } = + match video_channel_receiver.recv_timeout(Duration::from_millis(500)) { + Ok(packet) => packet, + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => return, + }; + + if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + // IMPORTANT: The only error that can happen here is socket closed. For this reason it's + // acceptable to call .ok() and ignore the error. The connection would already be + // closing so no corruption handling is necessary + runtime.block_on(video_sender.send(&header, payload)).ok(); + } + }); + + let game_audio_thread = if let Switch::Enabled(config) = settings.audio.game_audio { + thread::spawn(move || { + while CONNECTION_RUNTIME.read().is_some() { let device = match AudioDevice::new_output( Some(settings.audio.linux_backend), config.device.as_ref(), @@ -575,11 +599,10 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { Ok(data) => data, Err(e) => { warn!("New audio device failed: {e}"); - time::sleep(RETRY_CONNECT_MIN_INTERVAL).await; + thread::sleep(RETRY_CONNECT_MIN_INTERVAL); continue; } }; - let mute_when_streaming = config.mute_when_streaming; #[cfg(windows)] if let Ok(id) = alvr_audio::get_windows_device_id(&device) { @@ -596,16 +619,19 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { continue; }; - let new_sender = sender.clone(); - if let Err(e) = - alvr_audio::record_audio_loop(device, 2, mute_when_streaming, new_sender).await - { - warn!("Audio task exit with error : {e}") + if let Err(e) = alvr_audio::record_audio_blocking( + Arc::clone(&CONNECTION_RUNTIME), + game_audio_sender.clone(), + &device, + 2, + config.mute_when_streaming, + ) { + error!("Audio record error: {e}"); } #[cfg(windows)] - if let Ok(id) = - alvr_audio::get_windows_device_id(&AudioDevice::new_output(None, None)?) + if let Ok(id) = AudioDevice::new_output(None, None) + .and_then(|d| alvr_audio::get_windows_device_id(&d)) { unsafe { crate::SetOpenvrProperty( @@ -620,35 +646,9 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { } }) } else { - Box::pin(future::pending()) + thread::spawn(|| ()) }; - // Note: here we create CONNECTION_RUNTIME. The rest of the function MUST be infallible, as - // CONNECTION_RUNTIME must be destroyed in the thread defined at the end of the function. - // Failure to respect this might leave a lingering runtime. - *CONNECTION_RUNTIME.write() = Some(runtime); - - let (video_channel_sender, video_channel_receiver) = - std::sync::mpsc::sync_channel(settings.connection.max_queued_server_video_frames); - *VIDEO_CHANNEL_SENDER.lock() = Some(video_channel_sender); - *HAPTICS_SENDER.lock() = Some(haptics_sender); - - let video_send_thread = thread::spawn(move || loop { - let VideoPacket { header, payload } = - match video_channel_receiver.recv_timeout(Duration::from_millis(500)) { - Ok(packet) => packet, - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => return, - }; - - if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - // IMPORTANT: The only error that can happen here is socket closed. For this reason it's - // acceptable to call .ok() and ignore the error. The connection would already be - // closing so no corruption handling is necessary - runtime.block_on(video_sender.send(&header, payload)).ok(); - } - }); - let microphone_thread = if let Switch::Enabled(config) = settings.audio.microphone { #[allow(unused_variables)] let (sink, source) = AudioDevice::new_virtual_microphone_pair( @@ -1071,7 +1071,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { thread::spawn(move || { let _connection_drop_guard = _connection_drop_guard; - let res = CONNECTION_RUNTIME + CONNECTION_RUNTIME .read() .as_ref() .unwrap() @@ -1081,13 +1081,8 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { if let Err(e) = res { info!("Client disconnected. Cause: {e}" ); } - - Ok(()) }, - // Spawn new tasks and let the runtime manage threading - res = spawn_cancelable(game_audio_loop) => res, - _ = RESTART_NOTIFIER.notified() => { control_sender .lock() @@ -1095,15 +1090,10 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { .send(&ServerControlPacket::Restarting) .await .ok(); - - Ok(()) } - _ = DISCONNECT_CLIENT_NOTIFIER.notified() => Ok(()), + _ = DISCONNECT_CLIENT_NOTIFIER.notified() => (), } }); - if let Err(e) = res { - warn!("Connection interrupted: {e:?}"); - } // This requests shutdown from threads *CONNECTION_RUNTIME.write() = None; @@ -1132,6 +1122,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { // ensure shutdown of threads video_send_thread.join().ok(); + game_audio_thread.join().ok(); microphone_thread.join().ok(); tracking_receive_thread.join().ok(); statistics_thread.join().ok(); From c636bd5f53bbbe2ecf3cd629abfefee60cb5c6e1 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Wed, 12 Jul 2023 11:25:23 +0800 Subject: [PATCH 13/28] Fix prop error message on SteamVR shutdown --- alvr/server/cpp/alvr_server/TrackedDevice.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/alvr/server/cpp/alvr_server/TrackedDevice.cpp b/alvr/server/cpp/alvr_server/TrackedDevice.cpp index 2332e3d8e9..cb817a9114 100644 --- a/alvr/server/cpp/alvr_server/TrackedDevice.cpp +++ b/alvr/server/cpp/alvr_server/TrackedDevice.cpp @@ -11,6 +11,10 @@ std::string TrackedDevice::get_serial_number() { } void TrackedDevice::set_prop(FfiOpenvrProperty prop) { + if (this->object_id == vr::k_unTrackedDeviceIndexInvalid) { + return; + } + auto key = (vr::ETrackedDeviceProperty)prop.key; auto props = vr::VRProperties(); @@ -51,4 +55,4 @@ void TrackedDevice::set_prop(FfiOpenvrProperty prop) { event_data.property.prop = key; vr::VRServerDriverHost()->VendorSpecificEvent( this->object_id, vr::VREvent_PropertyChanged, event_data, 0.); -} \ No newline at end of file +} From 30349d7f8c7816082f8d381c239b781de947b565 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Wed, 12 Jul 2023 12:10:14 +0800 Subject: [PATCH 14/28] Progress on sync sockets (19) --- alvr/client_core/src/connection.rs | 38 +++++++++++++++--------- alvr/server/src/connection.rs | 42 ++++++++++++++++++++++----- alvr/sockets/src/lib.rs | 25 ---------------- alvr/sockets/src/stream_socket/mod.rs | 8 ++--- alvr/sockets/src/stream_socket/tcp.rs | 12 ++++---- alvr/sockets/src/stream_socket/udp.rs | 15 ++++++---- 6 files changed, 79 insertions(+), 61 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 32d3acf511..84aa8716ed 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -23,8 +23,7 @@ use alvr_packets::{ }; use alvr_session::{settings_schema::Switch, SessionConfig}; use alvr_sockets::{ - spawn_cancelable, PeerType, ProtoControlSocket, ReceiverBuffer, StreamSender, - StreamSocketBuilder, + PeerType, ProtoControlSocket, ReceiverBuffer, StreamSender, StreamSocketBuilder, }; use serde_json as json; use std::{ @@ -539,6 +538,28 @@ fn connection_pipeline( } }); + let stream_receive_thread = thread::spawn(move || { + while let Some(runtime) = &*CONNECTION_RUNTIME.read() { + let res = runtime.block_on(async { + tokio::select! { + res = stream_socket.recv() => Some(res), + _ = time::sleep(Duration::from_millis(500)) => None, + } + }); + match res { + Some(Ok(())) => (), + Some(Err(e)) => { + info!("Client disconnected. Cause: {e}"); + set_hud_message(SERVER_DISCONNECTED_MESSAGE); + DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + + return; + } + None => continue, + } + } + }); + let lifecycle_check_thread = thread::spawn(|| { while IS_STREAMING.value() && IS_RESUMED.value() && IS_ALIVE.value() { thread::sleep(Duration::from_millis(500)); @@ -548,17 +569,7 @@ fn connection_pipeline( }); CONNECTION_RUNTIME.read().as_ref().unwrap().block_on(async { - let receive_loop = async move { stream_socket.receive_loop().await }; - // Run many tasks concurrently. Threading is managed by the runtime, for best performance. tokio::select! { - res = spawn_cancelable(receive_loop) => { - if let Err(e) = res { - info!("Server disconnected. Cause: {e}"); - } - set_hud_message( - SERVER_DISCONNECTED_MESSAGE - ); - }, _ = DISCONNECT_SERVER_NOTIFIER.notified() => (), } }); @@ -583,8 +594,9 @@ fn connection_pipeline( game_audio_thread.join().ok(); microphone_thread.join().ok(); haptics_receive_thread.join().ok(); - control_receive_thread.join().ok(); control_send_thread.join().ok(); + control_receive_thread.join().ok(); + stream_receive_thread.join().ok(); keepalive_sender_thread.join().ok(); lifecycle_check_thread.join().ok(); diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index d8f561e548..20b6bd3db1 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -887,7 +887,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { } }); - let control_thread = thread::spawn({ + let control_receive_thread = thread::spawn({ let control_sender = Arc::clone(&control_sender); let client_hostname = client_hostname.clone(); move || loop { @@ -1035,6 +1035,37 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { } }); + let stream_receive_thread = thread::spawn({ + let client_hostname = client_hostname.clone(); + move || { + while let Some(runtime) = &*CONNECTION_RUNTIME.read() { + let res = runtime.block_on(async { + tokio::select! { + res = stream_socket.recv() => Some(res), + _ = time::sleep(Duration::from_millis(500)) => None, + } + }); + match res { + Some(Ok(())) => (), + Some(Err(e)) => { + info!("Client disconnected. Cause: {e}"); + + SERVER_DATA_MANAGER.write().update_client_list( + client_hostname, + ClientListAction::SetConnectionState(ConnectionState::Disconnecting { + should_be_removed: false, + }), + ); + DISCONNECT_CLIENT_NOTIFIER.notify_waiters(); + + return; + } + None => continue, + } + } + } + }); + let lifecycle_check_thread = thread::spawn(|| { while SHOULD_CONNECT_TO_CLIENTS.value() && CONNECTION_RUNTIME.read().is_some() { thread::sleep(Duration::from_millis(500)); @@ -1077,12 +1108,6 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { .unwrap() .block_on(async move { tokio::select! { - res = stream_socket.receive_loop() => { - if let Err(e) = res { - info!("Client disconnected. Cause: {e}" ); - } - }, - _ = RESTART_NOTIFIER.notified() => { control_sender .lock() @@ -1126,7 +1151,8 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { microphone_thread.join().ok(); tracking_receive_thread.join().ok(); statistics_thread.join().ok(); - control_thread.join().ok(); + control_receive_thread.join().ok(); + stream_receive_thread.join().ok(); keepalive_thread.join().ok(); lifecycle_check_thread.join().ok(); }); diff --git a/alvr/sockets/src/lib.rs b/alvr/sockets/src/lib.rs index 4555d556bd..7136ece9ae 100644 --- a/alvr/sockets/src/lib.rs +++ b/alvr/sockets/src/lib.rs @@ -15,28 +15,3 @@ pub const HANDSHAKE_PACKET_SIZE_BYTES: usize = 56; // this may change in future pub const KEEPALIVE_INTERVAL: Duration = Duration::from_millis(500); type Ldc = tokio_util::codec::LengthDelimitedCodec; - -mod util { - use alvr_common::prelude::*; - use std::future::Future; - use tokio::{sync::oneshot, task}; - - // Tokio tasks are not cancelable. This function awaits a cancelable task. - pub async fn spawn_cancelable( - future: impl Future + Send + 'static, - ) -> StrResult { - // this channel is actually never used. cancel_receiver will be notified when _cancel_sender - // is dropped - let (_cancel_sender, cancel_receiver) = oneshot::channel::<()>(); - - task::spawn(async { - tokio::select! { - res = future => res, - _ = cancel_receiver => Ok(()), - } - }) - .await - .map_err(err!())? - } -} -pub use util::*; diff --git a/alvr/sockets/src/stream_socket/mod.rs b/alvr/sockets/src/stream_socket/mod.rs index c2bd4e5887..76b4ddbb65 100644 --- a/alvr/sockets/src/stream_socket/mod.rs +++ b/alvr/sockets/src/stream_socket/mod.rs @@ -430,13 +430,13 @@ impl StreamSocket { } } - pub async fn receive_loop(&self) -> StrResult { - match self.receive_socket.lock().await.take().unwrap() { + pub async fn recv(&self) -> StrResult { + match self.receive_socket.lock().await.as_mut().unwrap() { StreamReceiveSocket::Udp(socket) => { - udp::receive_loop(socket, Arc::clone(&self.packet_queues)).await + udp::recv(socket, Arc::clone(&self.packet_queues)).await } StreamReceiveSocket::Tcp(socket) => { - tcp::receive_loop(socket, Arc::clone(&self.packet_queues)).await + tcp::recv(socket, Arc::clone(&self.packet_queues)).await } } } diff --git a/alvr/sockets/src/stream_socket/tcp.rs b/alvr/sockets/src/stream_socket/tcp.rs index b78b27b470..6233b03b60 100644 --- a/alvr/sockets/src/stream_socket/tcp.rs +++ b/alvr/sockets/src/stream_socket/tcp.rs @@ -67,18 +67,20 @@ pub async fn connect_to_client( Ok((Arc::new(Mutex::new(send_socket)), receive_socket)) } -pub async fn receive_loop( - mut socket: TcpStreamReceiveSocket, +pub async fn recv( + socket: &mut TcpStreamReceiveSocket, packet_enqueuers: Arc>>>, ) -> StrResult { - while let Some(maybe_packet) = socket.next().await { + if let Some(maybe_packet) = socket.next().await { let mut packet = maybe_packet.map_err(err!())?; let stream_id = packet.get_u16(); if let Some(enqueuer) = packet_enqueuers.lock().await.get_mut(&stream_id) { enqueuer.send(packet).map_err(err!())?; } - } - Ok(()) + Ok(()) + } else { + fmt_e!("Socket closed") + } } diff --git a/alvr/sockets/src/stream_socket/udp.rs b/alvr/sockets/src/stream_socket/udp.rs index ece5b4981e..057b2759a6 100644 --- a/alvr/sockets/src/stream_socket/udp.rs +++ b/alvr/sockets/src/stream_socket/udp.rs @@ -67,22 +67,25 @@ pub async fn connect( )) } -pub async fn receive_loop( - mut socket: UdpStreamReceiveSocket, +pub async fn recv( + socket: &mut UdpStreamReceiveSocket, packet_enqueuers: Arc>>>, ) -> StrResult { - while let Some(maybe_packet) = socket.inner.next().await { + if let Some(maybe_packet) = socket.inner.next().await { let (mut packet_bytes, address) = maybe_packet.map_err(err!())?; if address != socket.peer_addr { - continue; + // Non fatal + return Ok(()); } let stream_id = packet_bytes.get_u16(); if let Some(enqueuer) = packet_enqueuers.lock().await.get_mut(&stream_id) { enqueuer.send(packet_bytes).map_err(err!())?; } - } - Ok(()) + Ok(()) + } else { + fmt_e!("Socket closed") + } } From 3216ad76ffee1b6062b6f80235e5fef1b65b53ff Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Wed, 12 Jul 2023 12:57:21 +0800 Subject: [PATCH 15/28] Progress on sync sockets (20) --- alvr/server/src/connection.rs | 57 +++++++++++++++++++++++------------ alvr/server/src/lib.rs | 19 ++++++------ alvr/server/src/web_server.rs | 8 +++-- 3 files changed, 53 insertions(+), 31 deletions(-) diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index 20b6bd3db1..9fc33a6892 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -7,8 +7,7 @@ use crate::{ statistics::StatisticsManager, tracking::{self, TrackingManager}, FfiButtonValue, FfiFov, FfiViewsConfig, VideoPacket, BITRATE_MANAGER, DECODER_CONFIG, - DISCONNECT_CLIENT_NOTIFIER, RESTART_NOTIFIER, SERVER_DATA_MANAGER, STATISTICS_MANAGER, - VIDEO_MIRROR_SENDER, VIDEO_RECORDING_FILE, + SERVER_DATA_MANAGER, STATISTICS_MANAGER, VIDEO_MIRROR_SENDER, VIDEO_RECORDING_FILE, }; use alvr_audio::AudioDevice; use alvr_common::{ @@ -37,7 +36,7 @@ use std::{ ptr, sync::{ atomic::{AtomicBool, Ordering}, - mpsc::{RecvTimeoutError, SyncSender, TrySendError}, + mpsc::{self, RecvTimeoutError, SyncSender, TrySendError}, Arc, }, thread, @@ -55,6 +54,15 @@ static VIDEO_CHANNEL_SENDER: Lazy>>> = Lazy::new(|| Mutex::new(None)); static HAPTICS_SENDER: Lazy>>> = Lazy::new(|| Mutex::new(None)); +pub enum ClientDisconnectRequest { + Disconnect, + ServerShutdown, + ServerRestart, +} + +pub static DISCONNECT_CLIENT_NOTIFIER: Lazy>>> = + Lazy::new(|| Mutex::new(None)); + fn align32(value: f32) -> u32 { ((value / 32.).floor() * 32.) as u32 } @@ -309,6 +317,9 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { }) .map_err(to_int_e!())?; + let (disconnect_sender, disconnect_receiver) = mpsc::channel(); + *DISCONNECT_CLIENT_NOTIFIER.lock() = Some(disconnect_sender); + // Safety: this never panics because client_ip is picked from client_ips keys let client_hostname = client_ips.remove(&client_ip).unwrap(); @@ -335,6 +346,8 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { self.hostname.clone(), ClientListAction::SetConnectionState(ConnectionState::Disconnected), ); + + *DISCONNECT_CLIENT_NOTIFIER.lock() = None; } } let _connection_drop_guard = DropGuard { @@ -875,7 +888,9 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { should_be_removed: false, }), ); - DISCONNECT_CLIENT_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_CLIENT_NOTIFIER.lock() { + notifier.send(ClientDisconnectRequest::Disconnect).ok(); + } return; } @@ -909,7 +924,9 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { should_be_removed: false, }), ); - DISCONNECT_CLIENT_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_CLIENT_NOTIFIER.lock() { + notifier.send(ClientDisconnectRequest::Disconnect).ok(); + } return; } @@ -1056,7 +1073,10 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { should_be_removed: false, }), ); - DISCONNECT_CLIENT_NOTIFIER.notify_waiters(); + + if let Some(notifier) = &*DISCONNECT_CLIENT_NOTIFIER.lock() { + notifier.send(ClientDisconnectRequest::Disconnect).ok(); + } return; } @@ -1071,7 +1091,9 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { thread::sleep(Duration::from_millis(500)); } - DISCONNECT_CLIENT_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_CLIENT_NOTIFIER.lock() { + notifier.send(ClientDisconnectRequest::Disconnect).ok(); + } }); { @@ -1102,23 +1124,20 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { thread::spawn(move || { let _connection_drop_guard = _connection_drop_guard; - CONNECTION_RUNTIME - .read() - .as_ref() - .unwrap() - .block_on(async move { - tokio::select! { - _ = RESTART_NOTIFIER.notified() => { + let res = disconnect_receiver.recv(); + if matches!(res, Ok(ClientDisconnectRequest::ServerRestart)) { + if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + runtime + .block_on(async { control_sender .lock() .await .send(&ServerControlPacket::Restarting) .await - .ok(); - } - _ = DISCONNECT_CLIENT_NOTIFIER.notified() => (), - } - }); + }) + .ok(); + } + } // This requests shutdown from threads *CONNECTION_RUNTIME.write() = None; diff --git a/alvr/server/src/lib.rs b/alvr/server/src/lib.rs index 17d60fb8e1..eecfa0768b 100644 --- a/alvr/server/src/lib.rs +++ b/alvr/server/src/lib.rs @@ -35,7 +35,7 @@ use alvr_packets::{ClientListAction, DecoderInitializationConfig, VideoPacketHea use alvr_server_io::ServerDataManager; use alvr_session::{CodecType, ConnectionState}; use bitrate::BitrateManager; -use connection::SHOULD_CONNECT_TO_CLIENTS; +use connection::{ClientDisconnectRequest, DISCONNECT_CLIENT_NOTIFIER, SHOULD_CONNECT_TO_CLIENTS}; use statistics::StatisticsManager; use std::{ collections::HashMap, @@ -48,10 +48,7 @@ use std::{ time::{Duration, Instant}, }; use sysinfo::{ProcessRefreshKind, RefreshKind, SystemExt}; -use tokio::{ - runtime::Runtime, - sync::{broadcast, Notify}, -}; +use tokio::{runtime::Runtime, sync::broadcast}; static FILESYSTEM_LAYOUT: Lazy = Lazy::new(|| { afs::filesystem_layout_from_openvr_driver_root_dir(&alvr_server_io::get_driver_dir().unwrap()) @@ -80,8 +77,8 @@ static VIDEO_MIRROR_SENDER: Lazy>>>> = Lazy::new(|| Mutex::new(None)); static VIDEO_RECORDING_FILE: Lazy>> = Lazy::new(|| Mutex::new(None)); -static DISCONNECT_CLIENT_NOTIFIER: Lazy = Lazy::new(Notify::new); -static RESTART_NOTIFIER: Lazy = Lazy::new(Notify::new); +// static DISCONNECT_CLIENT_NOTIFIER: Lazy = Lazy::new(Notify::new); +// static RESTART_NOTIFIER: Lazy = Lazy::new(Notify::new); static FRAME_RENDER_VS_CSO: &[u8] = include_bytes!("../cpp/platform/win32/FrameRenderVS.cso"); static FRAME_RENDER_PS_CSO: &[u8] = include_bytes!("../cpp/platform/win32/FrameRenderPS.cso"); @@ -166,7 +163,9 @@ pub extern "C" fn shutdown_driver() { } } - DISCONNECT_CLIENT_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_CLIENT_NOTIFIER.lock() { + notifier.send(ClientDisconnectRequest::ServerShutdown).ok(); + } // apply openvr config for the next launch SERVER_DATA_MANAGER.write().session_mut().openvr_config = connection::contruct_openvr_config(); @@ -215,7 +214,9 @@ pub fn notify_restart_driver() { // This call is blocking pub fn restart_driver() { SHOULD_CONNECT_TO_CLIENTS.set(false); - RESTART_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_CLIENT_NOTIFIER.lock() { + notifier.send(ClientDisconnectRequest::ServerRestart).ok(); + } shutdown_driver(); } diff --git a/alvr/server/src/web_server.rs b/alvr/server/src/web_server.rs index 201f4a975f..e34d59038c 100644 --- a/alvr/server/src/web_server.rs +++ b/alvr/server/src/web_server.rs @@ -1,6 +1,6 @@ use crate::{ - DECODER_CONFIG, DISCONNECT_CLIENT_NOTIFIER, FILESYSTEM_LAYOUT, SERVER_DATA_MANAGER, - VIDEO_MIRROR_SENDER, VIDEO_RECORDING_FILE, + connection::ClientDisconnectRequest, DECODER_CONFIG, DISCONNECT_CLIENT_NOTIFIER, + FILESYSTEM_LAYOUT, SERVER_DATA_MANAGER, VIDEO_MIRROR_SENDER, VIDEO_RECORDING_FILE, }; use alvr_common::{log, prelude::*}; use alvr_events::{Event, EventType}; @@ -138,7 +138,9 @@ async fn http_api( data_manager.update_client_list(hostname, action); } - DISCONNECT_CLIENT_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_CLIENT_NOTIFIER.lock() { + notifier.send(ClientDisconnectRequest::Disconnect).ok(); + } } ServerRequest::GetAudioDevices => { if let Ok(list) = SERVER_DATA_MANAGER.read().get_audio_devices_list() { From 47864645f789f44a0decd648b1085a122ecc4555 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Wed, 12 Jul 2023 14:32:13 +0800 Subject: [PATCH 16/28] Progress on sync sockets (21) --- alvr/client_core/src/connection.rs | 43 +++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 84aa8716ed..d3f67e0f7c 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -32,7 +32,7 @@ use std::{ thread, time::{Duration, Instant}, }; -use tokio::{runtime::Runtime, sync::Notify, time}; +use tokio::{runtime::Runtime, time}; #[cfg(target_os = "android")] use crate::audio; @@ -60,7 +60,8 @@ const RETRY_CONNECT_MIN_INTERVAL: Duration = Duration::from_secs(1); const NETWORK_KEEPALIVE_INTERVAL: Duration = Duration::from_secs(1); const CONNECTION_RETRY_INTERVAL: Duration = Duration::from_secs(1); -static DISCONNECT_SERVER_NOTIFIER: Lazy = Lazy::new(Notify::new); +static DISCONNECT_SERVER_NOTIFIER: Lazy>>> = + Lazy::new(|| Mutex::new(None)); pub static CONNECTION_RUNTIME: Lazy>>> = Lazy::new(|| Arc::new(RwLock::new(None))); @@ -161,6 +162,17 @@ fn connection_pipeline( } }; + let (disconnect_sender, disconnect_receiver) = mpsc::channel(); + *DISCONNECT_SERVER_NOTIFIER.lock() = Some(disconnect_sender); + + struct DropGuard; + impl Drop for DropGuard { + fn drop(&mut self) { + *DISCONNECT_SERVER_NOTIFIER.lock() = None; + } + } + let _connection_drop_guard = DropGuard; + let microphone_sample_rate = AudioDevice::new_input(None) .unwrap() .input_sample_rate() @@ -495,7 +507,9 @@ fn connection_pipeline( if let Err(e) = runtime.block_on(control_sender.send(&packet)) { info!("Server disconnected. Cause: {e}"); set_hud_message(SERVER_DISCONNECTED_MESSAGE); - DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { + notifier.send(()).ok(); + } return; } @@ -522,7 +536,9 @@ fn connection_pipeline( Some(Ok(ServerControlPacket::Restarting)) => { info!("{SERVER_RESTART_MESSAGE}"); set_hud_message(SERVER_RESTART_MESSAGE); - DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { + notifier.send(()).ok(); + } return; } @@ -530,7 +546,9 @@ fn connection_pipeline( Some(Err(e)) => { info!("{SERVER_DISCONNECTED_MESSAGE} Cause: {e}"); set_hud_message(SERVER_DISCONNECTED_MESSAGE); - DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { + notifier.send(()).ok(); + } return; } @@ -551,7 +569,9 @@ fn connection_pipeline( Some(Err(e)) => { info!("Client disconnected. Cause: {e}"); set_hud_message(SERVER_DISCONNECTED_MESSAGE); - DISCONNECT_SERVER_NOTIFIER.notify_waiters(); + if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { + notifier.send(()).ok(); + } return; } @@ -565,15 +585,14 @@ fn connection_pipeline( thread::sleep(Duration::from_millis(500)); } - DISCONNECT_SERVER_NOTIFIER.notify_waiters(); - }); - - CONNECTION_RUNTIME.read().as_ref().unwrap().block_on(async { - tokio::select! { - _ = DISCONNECT_SERVER_NOTIFIER.notified() => (), + if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { + notifier.send(()).ok(); } }); + // Block here + disconnect_receiver.recv().ok(); + IS_STREAMING.set(false); *CONNECTION_RUNTIME.write() = None; *TRACKING_SENDER.lock() = None; From e24fd3c3bd120d02ec4657afa0187c4a805c4cf9 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Wed, 12 Jul 2023 17:24:06 +0800 Subject: [PATCH 17/28] Progress on sync sockets (22) Convert control socket interface to sync. Refactor IntResult to ConResult and associated utilities --- alvr/client_core/src/connection.rs | 113 +++++++++------------- alvr/client_core/src/lib.rs | 1 - alvr/common/src/lib.rs | 30 ++---- alvr/common/src/logging.rs | 16 ++-- alvr/server/src/connection.rs | 146 +++++++++++------------------ alvr/server/src/lib.rs | 8 +- alvr/server/src/sockets.rs | 29 +++--- alvr/sockets/src/control_socket.rs | 93 +++++++++++------- 8 files changed, 196 insertions(+), 240 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index d3f67e0f7c..b45e3c07c8 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -91,25 +91,17 @@ fn set_hud_message(message: &str) { pub fn connection_lifecycle_loop( recommended_view_resolution: UVec2, supported_refresh_rates: Vec, -) -> IntResult { +) { set_hud_message(INITIAL_MESSAGE); - loop { - check_interrupt!(IS_ALIVE.value()); - + while IS_ALIVE.value() { if IS_RESUMED.value() { if let Err(e) = connection_pipeline(recommended_view_resolution, supported_refresh_rates.clone()) { - match e { - InterruptibleError::Interrupted => return Ok(()), - InterruptibleError::Other(_) => { - let message = - format!("Connection error:\n{e}\nCheck the PC for more details"); - error!("{message}"); - set_hud_message(&message); - } - } + let message = format!("Connection error:\n{e}\nCheck the PC for more details"); + error!("Connection error: {message}"); + set_hud_message(&message); } } else { debug!("Skip try connection because the device is sleeping"); @@ -122,18 +114,18 @@ pub fn connection_lifecycle_loop( fn connection_pipeline( recommended_view_resolution: UVec2, supported_refresh_rates: Vec, -) -> IntResult { - let runtime = Runtime::new().map_err(to_int_e!())?; +) -> ConResult { + let runtime = Runtime::new().map_err(to_con_e!())?; let (mut proto_control_socket, server_ip) = { let config = Config::load(); - let announcer_socket = AnnouncerSocket::new(&config.hostname).map_err(to_int_e!())?; - let listener_socket = runtime - .block_on(alvr_sockets::get_server_listener()) - .map_err(to_int_e!())?; + let announcer_socket = AnnouncerSocket::new(&config.hostname).map_err(to_con_e!())?; + let listener_socket = alvr_sockets::get_server_listener(&runtime).map_err(to_con_e!())?; loop { - check_interrupt!(IS_ALIVE.value()); + if !IS_ALIVE.value() { + return Ok(()); + } if let Err(e) = announcer_socket.broadcast() { warn!("Broadcast error: {e}"); @@ -147,16 +139,11 @@ fn connection_pipeline( return Ok(()); } - let maybe_pair = runtime.block_on(async { - tokio::select! { - maybe_pair = ProtoControlSocket::connect_to(PeerType::Server(&listener_socket)) => { - maybe_pair.map_err(to_int_e!()) - }, - _ = time::sleep(DISCOVERY_RETRY_PAUSE) => Err(InterruptibleError::Interrupted) - } - }); - - if let Ok(pair) = maybe_pair { + if let Ok(pair) = ProtoControlSocket::connect_to( + &runtime, + DISCOVERY_RETRY_PAUSE, + PeerType::Server(&listener_socket), + ) { break pair; } } @@ -178,9 +165,10 @@ fn connection_pipeline( .input_sample_rate() .unwrap(); - runtime - .block_on( - proto_control_socket.send(&ClientConnectionResult::ConnectionAccepted { + proto_control_socket + .send( + &runtime, + &ClientConnectionResult::ConnectionAccepted { client_protocol_id: alvr_common::protocol_id(), display_name: platform::device_model(), server_ip, @@ -189,27 +177,24 @@ fn connection_pipeline( supported_refresh_rates, microphone_sample_rate, }), - }), + }, ) - .map_err(to_int_e!())?; - let config_packet = runtime.block_on(async { - tokio::select! { - res = proto_control_socket.recv::() => res.map_err(to_int_e!()), - _ = time::sleep(Duration::from_secs(1)) => int_fmt_e!("Timeout waiting for stream config"), - } - })?; + .map_err(to_con_e!())?; + let config_packet = proto_control_socket + .recv::(&runtime, Duration::from_secs(1)) + .map_err(to_con_e!())?; let settings = { let mut session_desc = SessionConfig::default(); session_desc - .merge_from_json(&json::from_str(&config_packet.session).map_err(to_int_e!())?) - .map_err(to_int_e!())?; + .merge_from_json(&json::from_str(&config_packet.session).map_err(to_con_e!())?) + .map_err(to_con_e!())?; session_desc.to_settings() }; let negotiated_config = json::from_str::>(&config_packet.negotiated) - .map_err(to_int_e!())?; + .map_err(to_con_e!())?; let view_resolution = negotiated_config .get("view_resolution") @@ -242,12 +227,7 @@ fn connection_pipeline( let (mut control_sender, mut control_receiver) = proto_control_socket.split(); - match runtime.block_on(async { - tokio::select! { - res = control_receiver.recv() => res, - _ = time::sleep(Duration::from_secs(1)) => fmt_e!("Timeout"), - } - }) { + match control_receiver.recv(&runtime, Duration::from_secs(1)) { Ok(ServerControlPacket::StartStream) => { info!("Stream starting"); set_hud_message(STREAM_STARTING_MESSAGE); @@ -277,12 +257,12 @@ fn connection_pipeline( ); let stream_socket_builder = runtime.block_on(async { tokio::select! { - res = listen_for_server_future => res.map_err(to_int_e!()), - _ = time::sleep(Duration::from_secs(1)) => int_fmt_e!("Timeout while binding stream socket"), + res = listen_for_server_future => res.map_err(to_con_e!()), + _ = time::sleep(Duration::from_secs(1)) => con_fmt_e!("Timeout while binding stream socket"), } })?; - if let Err(e) = runtime.block_on(control_sender.send(&ClientControlPacket::StreamReady)) { + if let Err(e) = control_sender.send(&runtime, &ClientControlPacket::StreamReady) { info!("Server disconnected. Cause: {e}"); set_hud_message(SERVER_DISCONNECTED_MESSAGE); return Ok(()); @@ -295,8 +275,8 @@ fn connection_pipeline( ); let stream_socket = runtime.block_on(async { tokio::select! { - res = accept_from_server_future => res.map_err(to_int_e!()), - _ = time::sleep(Duration::from_secs(2)) => int_fmt_e!("Timeout while setting up streams") + res = accept_from_server_future => res.map_err(to_con_e!()), + _ = time::sleep(Duration::from_secs(2)) => con_fmt_e!("Timeout while setting up streams") } })?; let stream_socket = Arc::new(stream_socket); @@ -385,7 +365,7 @@ fn connection_pipeline( }); let game_audio_thread = if let Switch::Enabled(config) = settings.audio.game_audio { - let device = AudioDevice::new_output(None, None).map_err(to_int_e!())?; + let device = AudioDevice::new_output(None, None).map_err(to_con_e!())?; thread::spawn(move || { alvr_common::show_err(audio::play_audio_loop( @@ -402,7 +382,7 @@ fn connection_pipeline( }; let microphone_thread = if matches!(settings.audio.microphone, Switch::Enabled(_)) { - let device = AudioDevice::new_input(None).map_err(to_int_e!())?; + let device = AudioDevice::new_input(None).map_err(to_con_e!())?; let microphone_sender = stream_socket.request_stream(AUDIO); @@ -504,7 +484,7 @@ fn connection_pipeline( let control_send_thread = thread::spawn(move || { while let Ok(packet) = control_channel_receiver.recv() { if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - if let Err(e) = runtime.block_on(control_sender.send(&packet)) { + if let Err(e) = control_sender.send(runtime, &packet) { info!("Server disconnected. Cause: {e}"); set_hud_message(SERVER_DISCONNECTED_MESSAGE); if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { @@ -519,21 +499,16 @@ fn connection_pipeline( let control_receive_thread = thread::spawn(move || loop { let maybe_packet = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - runtime.block_on(async { - tokio::select! { - res = control_receiver.recv() => Some(res), - _ = time::sleep(Duration::from_millis(500)) => None, - } - }) + control_receiver.recv(runtime, Duration::from_millis(500)) } else { return; }; match maybe_packet { - Some(Ok(ServerControlPacket::InitializeDecoder(config))) => { + Ok(ServerControlPacket::InitializeDecoder(config)) => { decoder::create_decoder(config); } - Some(Ok(ServerControlPacket::Restarting)) => { + Ok(ServerControlPacket::Restarting) => { info!("{SERVER_RESTART_MESSAGE}"); set_hud_message(SERVER_RESTART_MESSAGE); if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { @@ -542,8 +517,9 @@ fn connection_pipeline( return; } - Some(Ok(_)) => (), - Some(Err(e)) => { + Ok(_) => (), + Err(ConnectionError::Timeout) => (), + Err(e) => { info!("{SERVER_DISCONNECTED_MESSAGE} Cause: {e}"); set_hud_message(SERVER_DISCONNECTED_MESSAGE); if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { @@ -552,7 +528,6 @@ fn connection_pipeline( return; } - None => (), } }); diff --git a/alvr/client_core/src/lib.rs b/alvr/client_core/src/lib.rs index 6341594206..1951e491b8 100644 --- a/alvr/client_core/src/lib.rs +++ b/alvr/client_core/src/lib.rs @@ -106,7 +106,6 @@ pub fn initialize( *CONNECTION_THREAD.lock() = Some(thread::spawn(move || { connection::connection_lifecycle_loop(recommended_view_resolution, supported_refresh_rates) - .ok(); })); } diff --git a/alvr/common/src/lib.rs b/alvr/common/src/lib.rs index 667b414e71..ebaf858f02 100644 --- a/alvr/common/src/lib.rs +++ b/alvr/common/src/lib.rs @@ -11,8 +11,8 @@ use std::{ pub mod prelude { pub use crate::{ - check_interrupt, enone, err, err_dbg, fmt_e, int_e, int_fmt_e, interrupt, logging::*, - to_int_e, IntResult, InterruptibleError, StrResult, + con_e, con_fmt_e, enone, err, err_dbg, fmt_e, logging::*, timeout, to_con_e, ConResult, + ConnectionError, StrResult, }; pub use log::{debug, error, info, warn}; } @@ -33,32 +33,22 @@ pub const ALVR_NAME: &str = "ALVR"; pub type StrResult = Result; -pub enum InterruptibleError { - Interrupted, +pub enum ConnectionError { + Timeout, Other(String), } -impl Display for InterruptibleError { +impl Display for ConnectionError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - InterruptibleError::Interrupted => write!(f, "Action interrupted"), - InterruptibleError::Other(s) => write!(f, "{}", s), + ConnectionError::Timeout => write!(f, "Timeout"), + ConnectionError::Other(s) => write!(f, "{}", s), } } } -pub type IntResult = Result; +pub type ConResult = Result; -pub fn interrupt() -> IntResult { - Err(InterruptibleError::Interrupted) -} - -/// Bail out if interrupted -#[macro_export] -macro_rules! check_interrupt { - ($running:expr) => { - if !$running { - return interrupt(); - } - }; +pub fn timeout() -> ConResult { + Err(ConnectionError::Timeout) } // Simple wrapper for AtomicBool when using Ordering::Relaxed. Deref cannot be implemented (cannot diff --git a/alvr/common/src/logging.rs b/alvr/common/src/logging.rs index b17ff87d28..4ec4fe81fa 100644 --- a/alvr/common/src/logging.rs +++ b/alvr/common/src/logging.rs @@ -170,27 +170,27 @@ macro_rules! enone { } #[macro_export] -macro_rules! int_fmt_e { +macro_rules! con_fmt_e { ($($args:tt)+) => { - Err(InterruptibleError::Other(format!($($args)+))) + Err(ConnectionError::Other(format!($($args)+))) }; } #[macro_export] -macro_rules! int_e { +macro_rules! con_e { () => { |e| match e { - InterruptibleError::Interrupted => InterruptibleError::Interrupted, - InterruptibleError::Other(e) => { - InterruptibleError::Other(format!("At {}:{}: {e}", file!(), line!())) + ConnectionError::Timeout => ConnectionError::Timeout, + ConnectionError::Other(e) => { + ConnectionError::Other(format!("At {}:{}: {e}", file!(), line!())) } } }; } #[macro_export] -macro_rules! to_int_e { +macro_rules! to_con_e { () => { - |e| InterruptibleError::Other(format!("At {}:{}: {e}", file!(), line!())) + |e| ConnectionError::Other(format!("At {}:{}: {e}", file!(), line!())) }; } diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index 9fc33a6892..252f7c326c 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -42,7 +42,7 @@ use std::{ thread, time::Duration, }; -use tokio::{runtime::Runtime, sync::Mutex as TMutex, time}; +use tokio::{runtime::Runtime, time}; const RETRY_CONNECT_MIN_INTERVAL: Duration = Duration::from_secs(1); @@ -208,12 +208,16 @@ pub fn contruct_openvr_config() -> OpenvrConfig { } // Alternate connection trials with manual IPs and clients discovered on the local network -pub fn handshake_loop() -> IntResult { - let mut welcome_socket = WelcomeSocket::new().map_err(to_int_e!())?; - - loop { - check_interrupt!(SHOULD_CONNECT_TO_CLIENTS.value()); +pub fn handshake_loop() { + let mut welcome_socket = match WelcomeSocket::new(RETRY_CONNECT_MIN_INTERVAL) { + Ok(socket) => socket, + Err(e) => { + error!("Failed to create discovery socket: {e}"); + return; + } + }; + while SHOULD_CONNECT_TO_CLIENTS.value() { let available_manual_client_ips = { let mut manual_client_ips = HashMap::new(); for (hostname, connection_info) in SERVER_DATA_MANAGER @@ -243,14 +247,13 @@ pub fn handshake_loop() -> IntResult { .client_discovery .clone(); if let Switch::Enabled(config) = discovery_config { - let (client_hostname, client_ip) = match welcome_socket.recv_non_blocking() { + let (client_hostname, client_ip) = match welcome_socket.recv() { Ok(pair) => pair, Err(e) => { - if let InterruptibleError::Other(e) = e { + if let ConnectionError::Other(e) = e { warn!("UDP handshake listening error: {e}"); } - thread::sleep(RETRY_CONNECT_MIN_INTERVAL); continue; } }; @@ -300,22 +303,14 @@ pub fn handshake_loop() -> IntResult { } } -fn try_connect(mut client_ips: HashMap) -> IntResult { - let runtime = Runtime::new().map_err(to_int_e!())?; +fn try_connect(mut client_ips: HashMap) -> ConResult { + let runtime = Runtime::new().map_err(to_con_e!())?; - let (mut proto_socket, client_ip) = runtime - .block_on(async { - let get_proto_socket = ProtoControlSocket::connect_to(PeerType::AnyClient( - client_ips.keys().cloned().collect(), - )); - tokio::select! { - proto_socket = get_proto_socket => proto_socket, - _ = time::sleep(Duration::from_secs(1)) => { - fmt_e!("Control socket failed to connect") - } - } - }) - .map_err(to_int_e!())?; + let (mut proto_socket, client_ip) = ProtoControlSocket::connect_to( + &runtime, + Duration::from_secs(1), + PeerType::AnyClient(client_ips.keys().cloned().collect()), + )?; let (disconnect_sender, disconnect_receiver) = mpsc::channel(); *DISCONNECT_CLIENT_NOTIFIER.lock() = Some(disconnect_sender); @@ -369,14 +364,10 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { display_name, streaming_capabilities, .. - } = runtime.block_on(async { - tokio::select! { - res = proto_socket.recv() => res.map_err(to_int_e!()), - _ = time::sleep(Duration::from_secs(1)) => { - int_fmt_e!("Timeout while waiting on client response") - } - } - })? { + } = proto_socket + .recv(&runtime, Duration::from_secs(1)) + .map_err(to_con_e!())? + { SERVER_DATA_MANAGER.write().update_client_list( client_hostname.clone(), ClientListAction::SetDisplayName(display_name), @@ -401,7 +392,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let streaming_caps = if let Some(streaming_caps) = maybe_streaming_caps { streaming_caps } else { - return int_fmt_e!("Only streaming clients are supported for now"); + return con_fmt_e!("Only streaming clients are supported for now"); }; let settings = SERVER_DATA_MANAGER.read().settings().clone(); @@ -461,7 +452,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { Some(settings.audio.linux_backend), game_audio_config.device.as_ref(), ) - .map_err(to_int_e!())?; + .map_err(to_con_e!())?; #[cfg(not(target_os = "linux"))] if let Switch::Enabled(microphone_desc) = &settings.audio.microphone { @@ -469,15 +460,15 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { Some(settings.audio.linux_backend), microphone_desc.devices.clone(), ) - .map_err(to_int_e!())?; + .map_err(to_con_e!())?; if alvr_audio::is_same_device(&game_audio_device, &sink) || alvr_audio::is_same_device(&game_audio_device, &source) { - return int_fmt_e!("Game audio and microphone cannot point to the same device!"); + return con_fmt_e!("Game audio and microphone cannot point to the same device!"); } } - game_audio_device.input_sample_rate().map_err(to_int_e!())? + game_audio_device.input_sample_rate().map_err(to_con_e!())? } else { 0 }; @@ -485,7 +476,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let client_config = StreamConfigPacket { session: { let session = SERVER_DATA_MANAGER.read().session().clone(); - serde_json::to_string(&session).map_err(to_int_e!())? + serde_json::to_string(&session).map_err(to_con_e!())? }, negotiated: serde_json::json!({ "view_resolution": stream_view_resolution, @@ -494,9 +485,9 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { }) .to_string(), }; - runtime - .block_on(proto_socket.send(&client_config)) - .map_err(to_int_e!())?; + proto_socket + .send(&runtime, &client_config) + .map_err(to_con_e!())?; let (mut control_sender, mut control_receiver) = proto_socket.split(); @@ -510,29 +501,24 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { if SERVER_DATA_MANAGER.read().session().openvr_config != new_openvr_config { SERVER_DATA_MANAGER.write().session_mut().openvr_config = new_openvr_config; - runtime - .block_on(control_sender.send(&ServerControlPacket::Restarting)) + control_sender + .send(&runtime, &ServerControlPacket::Restarting) .ok(); crate::notify_restart_driver(); } - runtime - .block_on(control_sender.send(&ServerControlPacket::StartStream)) - .map_err(to_int_e!())?; + control_sender + .send(&runtime, &ServerControlPacket::StartStream) + .map_err(to_con_e!())?; - match runtime.block_on(async { - tokio::select! { - res = control_receiver.recv() => res.map_err(to_int_e!()), - _ = time::sleep(Duration::from_secs(1)) => int_fmt_e!("Timeout"), - } - }) { + match control_receiver.recv(&runtime, Duration::from_secs(1)) { Ok(ClientControlPacket::StreamReady) => (), Ok(_) => { - return int_fmt_e!("Got unexpected packet waiting for stream ack"); + return con_fmt_e!("Got unexpected packet waiting for stream ack"); } Err(e) => { - return int_fmt_e!("Error while waiting for stream ack: {e}"); + return con_fmt_e!("Error while waiting for stream ack: {e}"); } } @@ -564,7 +550,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { } } }) - .map_err(to_int_e!())?; + .map_err(to_con_e!())?; let stream_socket = Arc::new(stream_socket); let mut video_sender = stream_socket.request_stream(VIDEO); @@ -668,7 +654,7 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { Some(settings.audio.linux_backend), config.devices, ) - .map_err(to_int_e!())?; + .map_err(to_con_e!())?; #[cfg(windows)] if let Ok(id) = alvr_audio::get_windows_device_id(&source) { @@ -865,20 +851,16 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { } }); - let control_sender = Arc::new(TMutex::new(control_sender)); + let control_sender = Arc::new(Mutex::new(control_sender)); let keepalive_thread = thread::spawn({ let control_sender = Arc::clone(&control_sender); let client_hostname = client_hostname.clone(); move || loop { if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - let res = runtime.block_on(async { - control_sender - .lock() - .await - .send(&ServerControlPacket::KeepAlive) - .await - }); + let res = control_sender + .lock() + .send(runtime, &ServerControlPacket::KeepAlive); if let Err(e) = res { info!("Client disconnected. Cause: {e}"); @@ -907,15 +889,10 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let client_hostname = client_hostname.clone(); move || loop { let packet = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - let maybe_packet = runtime.block_on(async { - tokio::select! { - res = control_receiver.recv() => Some(res), - _ = time::sleep(Duration::from_millis(500)) => None, - } - }); - match maybe_packet { - Some(Ok(packet)) => packet, - Some(Err(e)) => { + match control_receiver.recv(runtime, Duration::from_millis(500)) { + Ok(packet) => packet, + Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(e)) => { info!("Client disconnected. Cause: {e}"); SERVER_DATA_MANAGER.write().update_client_list( @@ -930,7 +907,6 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { return; } - None => continue, } } else { return; @@ -955,14 +931,9 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { if let (Some(runtime), Some(config)) = (&*CONNECTION_RUNTIME.read(), maybe_config) { - runtime - .block_on(async { - control_sender - .lock() - .await - .send(&ServerControlPacket::InitializeDecoder(config)) - .await - }) + control_sender + .lock() + .send(runtime, &ServerControlPacket::InitializeDecoder(config)) .ok(); } unsafe { crate::RequestIDR() } @@ -1127,14 +1098,9 @@ fn try_connect(mut client_ips: HashMap) -> IntResult { let res = disconnect_receiver.recv(); if matches!(res, Ok(ClientDisconnectRequest::ServerRestart)) { if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - runtime - .block_on(async { - control_sender - .lock() - .await - .send(&ServerControlPacket::Restarting) - .await - }) + control_sender + .lock() + .send(runtime, &ServerControlPacket::Restarting) .ok(); } } diff --git a/alvr/server/src/lib.rs b/alvr/server/src/lib.rs index eecfa0768b..1dad86a393 100644 --- a/alvr/server/src/lib.rs +++ b/alvr/server/src/lib.rs @@ -346,14 +346,12 @@ pub unsafe extern "C" fn HmdDriverFactory( thread::spawn(move || { if set_default_chap { - // call this when inside a new tokio thread. Calling this on the parent thread will - // crash SteamVR + // call this when inside a new thread. Calling this on the parent thread will crash + // SteamVR unsafe { SetChaperone(2.0, 2.0) }; } - if let Err(InterruptibleError::Other(e)) = connection::handshake_loop() { - warn!("Connection thread closed: {e}"); - } + connection::handshake_loop(); }); } diff --git a/alvr/server/src/sockets.rs b/alvr/server/src/sockets.rs index e704ec8189..2955b95bc2 100644 --- a/alvr/server/src/sockets.rs +++ b/alvr/server/src/sockets.rs @@ -1,8 +1,9 @@ -use alvr_common::{prelude::*, StrResult, *}; +use alvr_common::{prelude::*, StrResult, ALVR_NAME}; use alvr_sockets::{CONTROL_PORT, HANDSHAKE_PACKET_SIZE_BYTES, LOCAL_IP}; use std::{ io::ErrorKind, net::{IpAddr, UdpSocket}, + time::Duration, }; pub struct WelcomeSocket { @@ -11,9 +12,11 @@ pub struct WelcomeSocket { } impl WelcomeSocket { - pub fn new() -> StrResult { + pub fn new(read_timeout: Duration) -> StrResult { let socket = UdpSocket::bind((LOCAL_IP, CONTROL_PORT)).map_err(err!())?; - socket.set_nonblocking(true).map_err(err!())?; + socket + .set_read_timeout(Some(read_timeout)) + .map_err(err!())?; Ok(Self { socket, @@ -22,14 +25,14 @@ impl WelcomeSocket { } // Returns: client IP, client hostname - pub fn recv_non_blocking(&mut self) -> IntResult<(String, IpAddr)> { + pub fn recv(&mut self) -> ConResult<(String, IpAddr)> { let (size, address) = match self.socket.recv_from(&mut self.buffer) { Ok(pair) => pair, Err(e) => { - if e.kind() == ErrorKind::WouldBlock || e.kind() == ErrorKind::Interrupted { - return interrupt(); + if e.kind() == ErrorKind::TimedOut { + return timeout(); } else { - return int_fmt_e!("{e}"); + return con_fmt_e!("{e}"); } } }; @@ -43,16 +46,14 @@ impl WelcomeSocket { let received_protocol_id = u64::from_le_bytes(protocol_id_bytes); if received_protocol_id != alvr_common::protocol_id() { - warn!("Found incompatible client! Upgrade or downgrade\nExpected protocol ID {}, Found {received_protocol_id}", + return con_fmt_e!("Found incompatible client! Upgrade or downgrade\nExpected protocol ID {}, Found {received_protocol_id}", alvr_common::protocol_id()); - - return interrupt(); } let mut hostname_bytes = [0; 32]; hostname_bytes.copy_from_slice(&self.buffer[24..56]); let hostname = std::str::from_utf8(&hostname_bytes) - .map_err(to_int_e!())? + .map_err(to_con_e!())? .trim_end_matches('\x00') .to_owned(); @@ -60,13 +61,11 @@ impl WelcomeSocket { } else if &self.buffer[..16] == b"\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00ALVR" || &self.buffer[..5] == b"\x01ALVR" { - warn!("Found old client. Upgrade"); - - interrupt() + con_fmt_e!("Found old client. Please upgrade") } else { // Unexpected packet. // Note: no need to check for v12 and v13, not found in the wild anymore - interrupt() + con_fmt_e!("Found unrelated packet during discovery") } } } diff --git a/alvr/sockets/src/control_socket.rs b/alvr/sockets/src/control_socket.rs index 71a73acba0..1dde6effbb 100644 --- a/alvr/sockets/src/control_socket.rs +++ b/alvr/sockets/src/control_socket.rs @@ -6,8 +6,12 @@ use futures::{ SinkExt, StreamExt, }; use serde::{de::DeserializeOwned, Serialize}; -use std::{marker::PhantomData, net::IpAddr}; -use tokio::net::{TcpListener, TcpStream}; +use std::{marker::PhantomData, net::IpAddr, time::Duration}; +use tokio::{ + net::{TcpListener, TcpStream}, + runtime::Runtime, + time, +}; use tokio_util::codec::Framed; pub struct ControlSocketSender { @@ -16,9 +20,11 @@ pub struct ControlSocketSender { } impl ControlSocketSender { - pub async fn send(&mut self, packet: &S) -> StrResult { + pub fn send(&mut self, runtime: &Runtime, packet: &S) -> StrResult { let packet_bytes = bincode::serialize(packet).map_err(err!())?; - self.inner.send(packet_bytes.into()).await.map_err(err!()) + runtime + .block_on(self.inner.send(packet_bytes.into())) + .map_err(err!()) } } @@ -28,20 +34,22 @@ pub struct ControlSocketReceiver { } impl ControlSocketReceiver { - pub async fn recv(&mut self) -> StrResult { - let packet_bytes = self - .inner - .next() - .await - .ok_or_else(enone!())? - .map_err(err!())?; - bincode::deserialize(&packet_bytes).map_err(err!()) + pub fn recv(&mut self, runtime: &Runtime, timeout: Duration) -> ConResult { + let packet_bytes = runtime.block_on(async { + tokio::select! { + res = self.inner.next() => { + res.map(|p| p.map_err(to_con_e!())).ok_or_else(enone!()).map_err(to_con_e!()) + } + _ = time::sleep(timeout) => alvr_common::timeout(), + } + })??; + bincode::deserialize(&packet_bytes).map_err(to_con_e!()) } } -pub async fn get_server_listener() -> StrResult { - TcpListener::bind((LOCAL_IP, CONTROL_PORT)) - .await +pub fn get_server_listener(runtime: &Runtime) -> StrResult { + runtime + .block_on(TcpListener::bind((LOCAL_IP, CONTROL_PORT))) .map_err(err!()) } @@ -57,43 +65,64 @@ pub enum PeerType<'a> { } impl ProtoControlSocket { - pub async fn connect_to(peer: PeerType<'_>) -> StrResult<(Self, IpAddr)> { + pub fn connect_to( + runtime: &Runtime, + timeout: Duration, + peer: PeerType<'_>, + ) -> ConResult<(Self, IpAddr)> { let socket = match peer { PeerType::AnyClient(ips) => { let client_addresses = ips .iter() .map(|&ip| (ip, CONTROL_PORT).into()) .collect::>(); - TcpStream::connect(client_addresses.as_slice()) - .await - .map_err(err!())? + runtime.block_on(async { + tokio::select! { + res = TcpStream::connect(client_addresses.as_slice()) => res.map_err(to_con_e!()), + _ = time::sleep(timeout) => alvr_common::timeout(), + } + })? } PeerType::Server(listener) => { - let (socket, _) = listener.accept().await.map_err(err!())?; + let (socket, _) = runtime.block_on(async { + tokio::select! { + res = listener.accept() => res.map_err(to_con_e!()), + _ = time::sleep(timeout) => alvr_common::timeout(), + } + })?; socket } }; - socket.set_nodelay(true).map_err(err!())?; - let peer_ip = socket.peer_addr().map_err(err!())?.ip(); + socket.set_nodelay(true).map_err(to_con_e!())?; + let peer_ip = socket.peer_addr().map_err(to_con_e!())?.ip(); let socket = Framed::new(socket, Ldc::new()); Ok((Self { inner: socket }, peer_ip)) } - pub async fn send(&mut self, packet: &S) -> StrResult { + pub fn send(&mut self, runtime: &Runtime, packet: &S) -> StrResult { let packet_bytes = bincode::serialize(packet).map_err(err!())?; - self.inner.send(packet_bytes.into()).await.map_err(err!()) + runtime + .block_on(self.inner.send(packet_bytes.into())) + .map_err(err!()) } - pub async fn recv(&mut self) -> StrResult { - let packet_bytes = self - .inner - .next() - .await - .ok_or_else(enone!())? - .map_err(err!())?; - bincode::deserialize(&packet_bytes).map_err(err!()) + pub fn recv( + &mut self, + runtime: &Runtime, + timeout: Duration, + ) -> ConResult { + let packet_bytes = runtime.block_on(async { + tokio::select! { + res = self.inner.next() => { + res.map(|p| p.map_err(to_con_e!())).ok_or_else(enone!()).map_err(to_con_e!()) + } + _ = time::sleep(timeout) => Ok(alvr_common::timeout()), + } + })??; + + bincode::deserialize(&packet_bytes).map_err(to_con_e!()) } pub fn split( From a2f09de41ba913b6a1a41938e052b00f65168127 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Thu, 13 Jul 2023 10:38:30 +0800 Subject: [PATCH 18/28] Progress on sync sockets (23) --- alvr/audio/src/lib.rs | 2 +- alvr/client_core/src/audio.rs | 2 +- alvr/client_core/src/connection.rs | 44 +++------ alvr/client_core/src/lib.rs | 4 +- alvr/server/src/connection.rs | 54 +++++------ alvr/sockets/src/control_socket.rs | 15 +-- alvr/sockets/src/stream_socket/mod.rs | 131 +++++++++++++++++--------- alvr/sockets/src/stream_socket/udp.rs | 4 +- 8 files changed, 138 insertions(+), 118 deletions(-) diff --git a/alvr/audio/src/lib.rs b/alvr/audio/src/lib.rs index 9b00e3bdca..b9f51a5f21 100644 --- a/alvr/audio/src/lib.rs +++ b/alvr/audio/src/lib.rs @@ -286,7 +286,7 @@ pub fn record_audio_blocking( }; if let Some(runtime) = &*runtime.read() { - runtime.block_on(sender.send(&(), data)).ok(); + sender.send(runtime, &(), data).ok(); } else { *state.lock() = AudioRecordState::ShouldStop; } diff --git a/alvr/client_core/src/audio.rs b/alvr/client_core/src/audio.rs index 1f697aa56a..35513d7a33 100644 --- a/alvr/client_core/src/audio.rs +++ b/alvr/client_core/src/audio.rs @@ -34,7 +34,7 @@ impl AudioInputCallback for RecorderCallback { } if let Some(runtime) = &*self.runtime.read() { - runtime.block_on(self.sender.send(&(), sample_buffer)).ok(); + self.sender.send(runtime, &(), sample_buffer).ok(); DataCallbackResult::Continue } else { diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index b45e3c07c8..299410b382 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -249,18 +249,14 @@ fn connection_pipeline( } } - let listen_for_server_future = StreamSocketBuilder::listen_for_server( + let stream_socket_builder = StreamSocketBuilder::listen_for_server( + &runtime, settings.connection.stream_port, settings.connection.stream_protocol, settings.connection.client_send_buffer_bytes, settings.connection.client_recv_buffer_bytes, - ); - let stream_socket_builder = runtime.block_on(async { - tokio::select! { - res = listen_for_server_future => res.map_err(to_con_e!()), - _ = time::sleep(Duration::from_secs(1)) => con_fmt_e!("Timeout while binding stream socket"), - } - })?; + ) + .map_err(to_con_e!())?; if let Err(e) = control_sender.send(&runtime, &ClientControlPacket::StreamReady) { info!("Server disconnected. Cause: {e}"); @@ -268,17 +264,13 @@ fn connection_pipeline( return Ok(()); } - let accept_from_server_future = stream_socket_builder.accept_from_server( + let stream_socket = stream_socket_builder.accept_from_server( + &runtime, + Duration::from_secs(2), server_ip, settings.connection.stream_port, settings.connection.packet_size as _, - ); - let stream_socket = runtime.block_on(async { - tokio::select! { - res = accept_from_server_future => res.map_err(to_con_e!()), - _ = time::sleep(Duration::from_secs(2)) => con_fmt_e!("Timeout while setting up streams") - } - })?; + )?; let stream_socket = Arc::new(stream_socket); info!("Connected to server"); @@ -292,11 +284,10 @@ fn connection_pipeline( } let mut video_receiver = - runtime.block_on(stream_socket.subscribe_to_stream::(VIDEO)); - let game_audio_receiver = runtime.block_on(stream_socket.subscribe_to_stream(AUDIO)); + stream_socket.subscribe_to_stream::(&runtime, VIDEO); + let game_audio_receiver = stream_socket.subscribe_to_stream(&runtime, AUDIO); let tracking_sender = stream_socket.request_stream(TRACKING); - let mut haptics_receiver = - runtime.block_on(stream_socket.subscribe_to_stream::(HAPTICS)); + let mut haptics_receiver = stream_socket.subscribe_to_stream::(&runtime, HAPTICS); let statistics_sender = stream_socket.request_stream(STATISTICS); // Important: To make sure this is successfully unset when stopping streaming, the rest of the @@ -533,15 +524,11 @@ fn connection_pipeline( let stream_receive_thread = thread::spawn(move || { while let Some(runtime) = &*CONNECTION_RUNTIME.read() { - let res = runtime.block_on(async { - tokio::select! { - res = stream_socket.recv() => Some(res), - _ = time::sleep(Duration::from_millis(500)) => None, - } - }); + let res = stream_socket.recv(runtime, Duration::from_millis(500)); match res { - Some(Ok(())) => (), - Some(Err(e)) => { + Ok(()) => (), + Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(e)) => { info!("Client disconnected. Cause: {e}"); set_hud_message(SERVER_DISCONNECTED_MESSAGE); if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { @@ -550,7 +537,6 @@ fn connection_pipeline( return; } - None => continue, } } }); diff --git a/alvr/client_core/src/lib.rs b/alvr/client_core/src/lib.rs index 1951e491b8..769df70a5c 100644 --- a/alvr/client_core/src/lib.rs +++ b/alvr/client_core/src/lib.rs @@ -168,7 +168,7 @@ pub fn send_tracking(tracking: Tracking) { if let (Some(runtime), Some(sender)) = (&*CONNECTION_RUNTIME.read(), &mut *TRACKING_SENDER.lock()) { - runtime.block_on(sender.send(&tracking, vec![])).ok(); + sender.send(runtime, &tracking, vec![]).ok(); if let Some(stats) = &mut *STATISTICS_MANAGER.lock() { stats.report_input_acquired(tracking.target_timestamp); @@ -200,7 +200,7 @@ pub fn report_submit(target_timestamp: Duration, vsync_queue: Duration) { (&*CONNECTION_RUNTIME.read(), &mut *STATISTICS_SENDER.lock()) { if let Some(stats) = stats.summary(target_timestamp) { - runtime.block_on(sender.send(&stats, vec![])).ok(); + sender.send(runtime, &stats, vec![]).ok(); } else { error!("Statistics summary not ready!"); } diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index 252f7c326c..9dd888c7a8 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -534,33 +534,26 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { *BITRATE_MANAGER.lock() = BitrateManager::new(settings.video.bitrate.history_size, fps); - let stream_socket = runtime - .block_on(async { - tokio::select! { - res = StreamSocketBuilder::connect_to_client( - client_ip, - settings.connection.stream_port, - settings.connection.stream_protocol, - settings.connection.server_send_buffer_bytes, - settings.connection.server_recv_buffer_bytes, - settings.connection.packet_size as _, - ) => res, - _ = time::sleep(Duration::from_secs(1)) => { - fmt_e!("Timeout while setting up streams") - } - } - }) - .map_err(to_con_e!())?; + let stream_socket = StreamSocketBuilder::connect_to_client( + &runtime, + Duration::from_secs(1), + client_ip, + settings.connection.stream_port, + settings.connection.stream_protocol, + settings.connection.server_send_buffer_bytes, + settings.connection.server_recv_buffer_bytes, + settings.connection.packet_size as _, + ) + .map_err(to_con_e!())?; let stream_socket = Arc::new(stream_socket); let mut video_sender = stream_socket.request_stream(VIDEO); let game_audio_sender = stream_socket.request_stream(AUDIO); - let microphone_receiver = runtime.block_on(stream_socket.subscribe_to_stream(AUDIO)); - let mut tracking_receiver = - runtime.block_on(stream_socket.subscribe_to_stream::(TRACKING)); + let microphone_receiver = stream_socket.subscribe_to_stream(&runtime, AUDIO); + let mut tracking_receiver = stream_socket.subscribe_to_stream::(&runtime, TRACKING); let haptics_sender = stream_socket.request_stream(HAPTICS); let mut statics_receiver = - runtime.block_on(stream_socket.subscribe_to_stream::(STATISTICS)); + stream_socket.subscribe_to_stream::(&runtime, STATISTICS); // Note: here we create CONNECTION_RUNTIME. The rest of the function MUST be infallible, as // CONNECTION_RUNTIME must be destroyed in the thread defined at the end of the function. @@ -584,7 +577,7 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { // IMPORTANT: The only error that can happen here is socket closed. For this reason it's // acceptable to call .ok() and ignore the error. The connection would already be // closing so no corruption handling is necessary - runtime.block_on(video_sender.send(&header, payload)).ok(); + video_sender.send(runtime, &header, payload).ok(); } }); @@ -1027,15 +1020,11 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { let client_hostname = client_hostname.clone(); move || { while let Some(runtime) = &*CONNECTION_RUNTIME.read() { - let res = runtime.block_on(async { - tokio::select! { - res = stream_socket.recv() => Some(res), - _ = time::sleep(Duration::from_millis(500)) => None, - } - }); + let res = stream_socket.recv(runtime, Duration::from_millis(500)); match res { - Some(Ok(())) => (), - Some(Err(e)) => { + Ok(()) => (), + Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(e)) => { info!("Client disconnected. Cause: {e}"); SERVER_DATA_MANAGER.write().update_client_list( @@ -1051,7 +1040,6 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { return; } - None => continue, } } } @@ -1242,8 +1230,8 @@ pub extern "C" fn send_haptics(device_id: u64, duration_s: f32, frequency: f32, &*CONNECTION_RUNTIME.read(), &mut *HAPTICS_SENDER.lock(), ) { - runtime - .block_on(sender.send(&haptics::map_haptics(&config, haptics), vec![])) + sender + .send(runtime, &haptics::map_haptics(&config, haptics), vec![]) .ok(); } } diff --git a/alvr/sockets/src/control_socket.rs b/alvr/sockets/src/control_socket.rs index 1dde6effbb..1aeaed8c09 100644 --- a/alvr/sockets/src/control_socket.rs +++ b/alvr/sockets/src/control_socket.rs @@ -113,14 +113,15 @@ impl ProtoControlSocket { runtime: &Runtime, timeout: Duration, ) -> ConResult { - let packet_bytes = runtime.block_on(async { - tokio::select! { - res = self.inner.next() => { - res.map(|p| p.map_err(to_con_e!())).ok_or_else(enone!()).map_err(to_con_e!()) + let packet_bytes = runtime + .block_on(async { + tokio::select! { + res = self.inner.next() => res.map(|p| p.map_err(to_con_e!())), + _ = time::sleep(timeout) => Some(alvr_common::timeout()), } - _ = time::sleep(timeout) => Ok(alvr_common::timeout()), - } - })??; + }) + .ok_or_else(enone!()) + .map_err(to_con_e!())??; bincode::deserialize(&packet_bytes).map_err(to_con_e!()) } diff --git a/alvr/sockets/src/stream_socket/mod.rs b/alvr/sockets/src/stream_socket/mod.rs index 76b4ddbb65..03ee314c0d 100644 --- a/alvr/sockets/src/stream_socket/mod.rs +++ b/alvr/sockets/src/stream_socket/mod.rs @@ -19,10 +19,14 @@ use std::{ net::IpAddr, ops::{Deref, DerefMut}, sync::Arc, + time::Duration, }; use tcp::{TcpStreamReceiveSocket, TcpStreamSendSocket}; -use tokio::net; -use tokio::sync::{mpsc, Mutex}; +use tokio::{net, runtime::Runtime}; +use tokio::{ + sync::{mpsc, Mutex}, + time, +}; use udp::{UdpStreamReceiveSocket, UdpStreamSendSocket}; pub fn set_socket_buffers( @@ -125,7 +129,7 @@ pub struct StreamSender { } impl StreamSender { - async fn send_buffer(&self, buffer: BytesMut) { + async fn send_buffer(&self, buffer: BytesMut) -> StrResult { match &self.socket { StreamSendSocket::Udp(socket) => socket .inner @@ -133,19 +137,17 @@ impl StreamSender { .await .feed((buffer.freeze(), socket.peer_addr)) .await - .map_err(err!()) - .ok(), + .map_err(err!()), StreamSendSocket::Tcp(socket) => socket .lock() .await .feed(buffer.freeze()) .await - .map_err(err!()) - .ok(), - }; + .map_err(err!()), + } } - pub async fn send(&mut self, header: &T, payload_buffer: Vec) -> StrResult { + pub fn send(&mut self, runtime: &Runtime, header: &T, payload_buffer: Vec) -> StrResult { // packet layout: // [ 2B (stream ID) | 4B (packet index) | 4B (packet shard count) | 4B (shard index)] // this escluses length delimited coding, which is handled by the TCP backend @@ -176,14 +178,17 @@ impl StreamSender { shards_buffer.put_u32(total_shards_count as _); shards_buffer.put_u32(shard_index as u32); shards_buffer.put_slice(shard); - self.send_buffer(shards_buffer.split()).await; + runtime.block_on(self.send_buffer(shards_buffer.split()))?; } match &self.socket { - StreamSendSocket::Udp(socket) => { - socket.inner.lock().await.flush().await.map_err(err!())?; - } - StreamSendSocket::Tcp(socket) => socket.lock().await.flush().await.map_err(err!())?, + StreamSendSocket::Udp(socket) => runtime + .block_on(async { socket.inner.lock().await.flush().await }) + .map_err(err!())?, + + StreamSendSocket::Tcp(socket) => runtime + .block_on(async { socket.lock().await.flush().await }) + .map_err(err!())?, } self.next_packet_index += 1; @@ -313,39 +318,55 @@ pub enum StreamSocketBuilder { } impl StreamSocketBuilder { - pub async fn listen_for_server( + pub fn listen_for_server( + runtime: &Runtime, port: u16, stream_socket_config: SocketProtocol, send_buffer_bytes: SocketBufferSize, recv_buffer_bytes: SocketBufferSize, ) -> StrResult { Ok(match stream_socket_config { - SocketProtocol::Udp => StreamSocketBuilder::Udp( - udp::bind(port, send_buffer_bytes, recv_buffer_bytes).await?, - ), - SocketProtocol::Tcp => StreamSocketBuilder::Tcp( - tcp::bind(port, send_buffer_bytes, recv_buffer_bytes).await?, - ), + SocketProtocol::Udp => StreamSocketBuilder::Udp(runtime.block_on(udp::bind( + port, + send_buffer_bytes, + recv_buffer_bytes, + ))?), + SocketProtocol::Tcp => StreamSocketBuilder::Tcp(runtime.block_on(tcp::bind( + port, + send_buffer_bytes, + recv_buffer_bytes, + ))?), }) } - pub async fn accept_from_server( + pub fn accept_from_server( self, + runtime: &Runtime, + timeout: Duration, server_ip: IpAddr, port: u16, max_packet_size: usize, - ) -> StrResult { + ) -> ConResult { let (send_socket, receive_socket) = match self { StreamSocketBuilder::Udp(socket) => { - let (send_socket, receive_socket) = udp::connect(socket, server_ip, port).await?; + let (send_socket, receive_socket) = + udp::connect(socket, server_ip, port).map_err(to_con_e!())?; + ( StreamSendSocket::Udp(send_socket), StreamReceiveSocket::Udp(receive_socket), ) } StreamSocketBuilder::Tcp(listener) => { - let (send_socket, receive_socket) = - tcp::accept_from_server(listener, server_ip).await?; + let (send_socket, receive_socket) = runtime.block_on(async { + tokio::select! { + res = tcp::accept_from_server(listener, server_ip) => { + res.map_err(to_con_e!()) + }, + _ = time::sleep(timeout) => alvr_common::timeout(), + } + })?; + ( StreamSendSocket::Tcp(send_socket), StreamReceiveSocket::Tcp(receive_socket), @@ -361,27 +382,39 @@ impl StreamSocketBuilder { }) } - pub async fn connect_to_client( + #[allow(clippy::too_many_arguments)] + pub fn connect_to_client( + runtime: &Runtime, + timeout: Duration, client_ip: IpAddr, port: u16, protocol: SocketProtocol, send_buffer_bytes: SocketBufferSize, recv_buffer_bytes: SocketBufferSize, max_packet_size: usize, - ) -> StrResult { + ) -> ConResult { let (send_socket, receive_socket) = match protocol { SocketProtocol::Udp => { - let socket = udp::bind(port, send_buffer_bytes, recv_buffer_bytes).await?; - let (send_socket, receive_socket) = udp::connect(socket, client_ip, port).await?; + let socket = runtime + .block_on(udp::bind(port, send_buffer_bytes, recv_buffer_bytes)) + .map_err(to_con_e!())?; + let (send_socket, receive_socket) = + udp::connect(socket, client_ip, port).map_err(to_con_e!())?; ( StreamSendSocket::Udp(send_socket), StreamReceiveSocket::Udp(receive_socket), ) } SocketProtocol::Tcp => { - let (send_socket, receive_socket) = - tcp::connect_to_client(client_ip, port, send_buffer_bytes, recv_buffer_bytes) - .await?; + let (send_socket, receive_socket) = runtime.block_on(async { + tokio::select! { + res = tcp::connect_to_client(client_ip, port, send_buffer_bytes, recv_buffer_bytes) => { + res.map_err(to_con_e!()) + }, + _ = time::sleep(timeout) => alvr_common::timeout(), + } + })?; + ( StreamSendSocket::Tcp(send_socket), StreamReceiveSocket::Tcp(receive_socket), @@ -417,9 +450,11 @@ impl StreamSocket { } } - pub async fn subscribe_to_stream(&self, stream_id: u16) -> StreamReceiver { + pub fn subscribe_to_stream(&self, runtime: &Runtime, stream_id: u16) -> StreamReceiver { let (sender, receiver) = mpsc::unbounded_channel(); - self.packet_queues.lock().await.insert(stream_id, sender); + runtime + .block_on(self.packet_queues.lock()) + .insert(stream_id, sender); StreamReceiver { receiver, @@ -430,14 +465,24 @@ impl StreamSocket { } } - pub async fn recv(&self) -> StrResult { - match self.receive_socket.lock().await.as_mut().unwrap() { - StreamReceiveSocket::Udp(socket) => { - udp::recv(socket, Arc::clone(&self.packet_queues)).await - } - StreamReceiveSocket::Tcp(socket) => { - tcp::recv(socket, Arc::clone(&self.packet_queues)).await - } + pub fn recv(&self, runtime: &Runtime, timeout: Duration) -> ConResult { + match runtime + .block_on(self.receive_socket.lock()) + .as_mut() + .unwrap() + { + StreamReceiveSocket::Udp(socket) => runtime.block_on(async { + tokio::select! { + res = udp::recv(socket, &self.packet_queues) => res.map_err(to_con_e!()), + _ = time::sleep(timeout) => alvr_common::timeout(), + } + }), + StreamReceiveSocket::Tcp(socket) => runtime.block_on(async { + tokio::select! { + res = tcp::recv(socket, Arc::clone(&self.packet_queues)) => res.map_err(to_con_e!()), + _ = time::sleep(timeout) => alvr_common::timeout(), + } + }), } } } diff --git a/alvr/sockets/src/stream_socket/udp.rs b/alvr/sockets/src/stream_socket/udp.rs index 057b2759a6..3f907632aa 100644 --- a/alvr/sockets/src/stream_socket/udp.rs +++ b/alvr/sockets/src/stream_socket/udp.rs @@ -46,7 +46,7 @@ pub async fn bind( UdpSocket::from_std(socket.into()).map_err(err!()) } -pub async fn connect( +pub fn connect( socket: UdpSocket, peer_ip: IpAddr, port: u16, @@ -69,7 +69,7 @@ pub async fn connect( pub async fn recv( socket: &mut UdpStreamReceiveSocket, - packet_enqueuers: Arc>>>, + packet_enqueuers: &Mutex>>, ) -> StrResult { if let Some(maybe_packet) = socket.inner.next().await { let (mut packet_bytes, address) = maybe_packet.map_err(err!())?; From a4cb1a34005337fce1684bc6a6aaf88197c02dd2 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Thu, 13 Jul 2023 14:18:56 +0800 Subject: [PATCH 19/28] Progress on sync sockets (24) Rewrite packet reconstruction code to suit the timeout patten --- alvr/audio/src/lib.rs | 18 ++-- alvr/client_core/src/connection.rs | 36 +++---- alvr/server/src/connection.rs | 30 ++---- alvr/sockets/src/stream_socket/mod.rs | 139 +++++++++++++------------- 4 files changed, 98 insertions(+), 125 deletions(-) diff --git a/alvr/audio/src/lib.rs b/alvr/audio/src/lib.rs index b9f51a5f21..4584e9cfe7 100644 --- a/alvr/audio/src/lib.rs +++ b/alvr/audio/src/lib.rs @@ -24,7 +24,7 @@ use std::{ thread, time::Duration, }; -use tokio::{runtime::Runtime, time}; +use tokio::runtime::Runtime; static VIRTUAL_MICROPHONE_PAIRS: Lazy> = Lazy::new(|| { [ @@ -302,7 +302,7 @@ pub fn record_audio_blocking( #[cfg(windows)] if mute && device.is_output { - crate::windows::set_mute_windows_device(&device, true).ok(); + crate::windows::set_mute_windows_device(device, true).ok(); } let mut res = stream.play().map_err(err!()); @@ -371,16 +371,10 @@ pub fn receive_samples_loop( let mut recovery_sample_buffer = vec![]; loop { if let Some(runtime) = &*runtime.read() { - let res = runtime.block_on(async { - tokio::select! { - res = receiver.recv_buffer(&mut receiver_buffer) => Some(res), - _ = time::sleep(Duration::from_millis(500)) => None, - } - }); - match res { - Some(Ok(())) => (), - Some(err_res) => return err_res.map_err(err!()), - None => continue, + match receiver.recv_buffer(runtime, Duration::from_millis(500), &mut receiver_buffer) { + Ok(true) => (), + Ok(false) | Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(e)) => return fmt_e!("{e}"), } } else { return Ok(()); diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 299410b382..243c3b0d31 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -32,7 +32,7 @@ use std::{ thread, time::{Duration, Instant}, }; -use tokio::{runtime::Runtime, time}; +use tokio::runtime::Runtime; #[cfg(target_os = "android")] use crate::audio; @@ -307,17 +307,14 @@ fn connection_pipeline( let mut stream_corrupted = false; loop { if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - let res = runtime.block_on(async { - tokio::select! { - res = video_receiver.recv_buffer(&mut receiver_buffer) => Some(res), - _ = time::sleep(Duration::from_millis(500)) => None, - } - }); - - match res { - Some(Ok(())) => (), - Some(Err(_)) => return, - None => continue, + match video_receiver.recv_buffer( + runtime, + Duration::from_millis(500), + &mut receiver_buffer, + ) { + Ok(true) => (), + Ok(false) | Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(_)) => return, } } else { return; @@ -399,17 +396,10 @@ fn connection_pipeline( let haptics_receive_thread = thread::spawn(move || loop { let haptics = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - let res = runtime.block_on(async { - tokio::select! { - res = haptics_receiver.recv_header_only() => Some(res), - _ = time::sleep(Duration::from_millis(500)) => None, - } - }); - - match res { - Some(Ok(packet)) => packet, - Some(Err(_)) => return, - None => continue, + match haptics_receiver.recv_header_only(runtime, Duration::from_millis(500)) { + Ok(packet) => packet, + Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(_)) => return, } } else { return; diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index 9dd888c7a8..dd22cfa36f 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -42,7 +42,7 @@ use std::{ thread, time::Duration, }; -use tokio::{runtime::Runtime, time}; +use tokio::runtime::Runtime; const RETRY_CONNECT_MIN_INTERVAL: Duration = Duration::from_secs(1); @@ -697,16 +697,10 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { loop { let tracking = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - let maybe_tracking = runtime.block_on(async { - tokio::select! { - res = tracking_receiver.recv_header_only() => Some(res), - _ = time::sleep(Duration::from_millis(500)) => None, - } - }); - match maybe_tracking { - Some(Ok(tracking)) => tracking, - Some(Err(_)) => return, - None => continue, + match tracking_receiver.recv_header_only(runtime, Duration::from_millis(500)) { + Ok(tracking) => tracking, + Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(_)) => return, } } else { return; @@ -815,16 +809,10 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { let statistics_thread = thread::spawn(move || loop { let client_stats = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - let maybe_client_stats = runtime.block_on(async { - tokio::select! { - res = statics_receiver.recv_header_only() => Some(res), - _ = time::sleep(Duration::from_millis(500)) => None, - } - }); - match maybe_client_stats { - Some(Ok(stats)) => stats, - Some(Err(_)) => return, - None => continue, + match statics_receiver.recv_header_only(runtime, Duration::from_millis(500)) { + Ok(stats) => stats, + Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(_)) => return, } } else { return; diff --git a/alvr/sockets/src/stream_socket/mod.rs b/alvr/sockets/src/stream_socket/mod.rs index 03ee314c0d..cb0c9415fb 100644 --- a/alvr/sockets/src/stream_socket/mod.rs +++ b/alvr/sockets/src/stream_socket/mod.rs @@ -13,9 +13,8 @@ use bytes::{Buf, BufMut, BytesMut}; use futures::SinkExt; use serde::{de::DeserializeOwned, Serialize}; use std::{ - collections::HashMap, + collections::{BTreeMap, HashMap}, marker::PhantomData, - mem, net::IpAddr, ops::{Deref, DerefMut}, sync::Arc, @@ -229,86 +228,88 @@ impl ReceiverBuffer { pub struct StreamReceiver { receiver: mpsc::UnboundedReceiver, - next_packet_shards: HashMap, - next_packet_shards_count: Option, - next_packet_index: u32, + last_reconstructed_packet_index: u32, + packet_shards: BTreeMap>, + empty_shard_maps: Vec>, _phantom: PhantomData, } -/// Get next packet reconstructing from shards. It can store at max shards from two packets; if the -/// reordering entropy is too high, packets will never be successfully reconstructed. +/// Get next packet reconstructing from shards. +/// Returns true if a packet has been recontructed and copied into the buffer. impl StreamReceiver { - pub async fn recv_buffer(&mut self, buffer: &mut ReceiverBuffer) -> StrResult { - buffer.had_packet_loss = false; - - loop { - let current_packet_index = self.next_packet_index; - self.next_packet_index += 1; - - let mut current_packet_shards = - HashMap::with_capacity(self.next_packet_shards.capacity()); - mem::swap(&mut current_packet_shards, &mut self.next_packet_shards); - - let mut current_packet_shards_count = self.next_packet_shards_count.take(); - - loop { - if let Some(shards_count) = current_packet_shards_count { - if current_packet_shards.len() >= shards_count { - buffer.inner.clear(); - - for i in 0..shards_count { - if let Some(shard) = current_packet_shards.get(&i) { - buffer.inner.put_slice(shard); - } else { - error!("Cannot find shard with given index!"); - buffer.had_packet_loss = true; - - self.next_packet_shards.clear(); - - break; - } - } + pub fn recv_buffer( + &mut self, + runtime: &Runtime, + timeout: Duration, + buffer: &mut ReceiverBuffer, + ) -> ConResult { + // Get shard + let mut shard = runtime.block_on(async { + tokio::select! { + res = self.receiver.recv() => res.ok_or_else(enone!()).map_err(to_con_e!()), + _ = time::sleep(timeout) => alvr_common::timeout(), + } + })?; + let shard_packet_index = shard.get_u32(); + let shards_count = shard.get_u32() as usize; + let shard_index = shard.get_u32() as usize; + + // Discard shard if too old + if shard_packet_index <= self.last_reconstructed_packet_index { + debug!("Received old shard!"); + return Ok(false); + } - return Ok(()); - } + // Insert shards into map + let shard_map = self + .packet_shards + .entry(shard_packet_index) + .or_insert_with(|| self.empty_shard_maps.pop().unwrap_or_default()); + shard_map.insert(shard_index, shard); + + // If the shard map is (probably) complete: + if shard_map.len() == shards_count { + buffer.inner.clear(); + + // Copy shards into final buffer. Fail if there are missing shards. This is impossibly + // rare (if the shards_count value got corrupted) but should be handled. + for idx in 0..shards_count { + if let Some(shard) = shard_map.get(&idx) { + buffer.inner.put_slice(shard); + } else { + error!("Cannot find shard with given index!"); + return Ok(false); } + } - let mut shard = self.receiver.recv().await.ok_or_else(enone!())?; - - let shard_packet_index = shard.get_u32(); - let shards_count = shard.get_u32() as usize; - let shard_index = shard.get_u32() as usize; - - if shard_packet_index == current_packet_index { - current_packet_shards.insert(shard_index, shard); - current_packet_shards_count = Some(shards_count); - } else if shard_packet_index >= self.next_packet_index { - if shard_packet_index > self.next_packet_index { - self.next_packet_shards.clear(); - } + // Check if current packet index is one up the last successful reconstucted packet. + buffer.had_packet_loss = shard_packet_index != self.last_reconstructed_packet_index + 1; + self.last_reconstructed_packet_index = shard_packet_index; - self.next_packet_shards.insert(shard_index, shard); - self.next_packet_shards_count = Some(shards_count); - self.next_packet_index = shard_packet_index; + // Pop old shards and recycle containers + while let Some((packet_index, mut shards)) = self.packet_shards.pop_first() { + shards.clear(); + self.empty_shard_maps.push(shards); - if shard_packet_index > self.next_packet_index - || self.next_packet_shards.len() == shards_count - { - debug!("Skipping to next packet. Signaling packet loss."); - buffer.had_packet_loss = true; - break; - } + if packet_index == shard_packet_index { + break; } - // else: ignore old shard } + + Ok(true) + } else { + Ok(false) } } - pub async fn recv_header_only(&mut self) -> StrResult { + pub fn recv_header_only(&mut self, runtime: &Runtime, timeout: Duration) -> ConResult { let mut buffer = ReceiverBuffer::new(); - self.recv_buffer(&mut buffer).await?; - Ok(buffer.get()?.0) + loop { + if self.recv_buffer(runtime, timeout, &mut buffer)? { + return Ok(buffer.get().map_err(to_con_e!())?.0); + } + } } } @@ -458,9 +459,9 @@ impl StreamSocket { StreamReceiver { receiver, - next_packet_shards: HashMap::new(), - next_packet_shards_count: None, - next_packet_index: 0, + last_reconstructed_packet_index: 0, + packet_shards: BTreeMap::new(), + empty_shard_maps: vec![], _phantom: PhantomData, } } From 9f3308faa0cf528d6e7c5bf312a3d383ac173b72 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Thu, 13 Jul 2023 16:17:06 +0800 Subject: [PATCH 20/28] Progress on sync sockets (25) --- alvr/audio/src/lib.rs | 25 ++++--- alvr/client_core/src/audio.rs | 5 +- alvr/client_core/src/connection.rs | 77 ++++++++++----------- alvr/client_core/src/lib.rs | 3 +- alvr/server/src/connection.rs | 67 +++++++++--------- alvr/sockets/src/stream_socket/mod.rs | 97 +++++++++++---------------- alvr/sockets/src/stream_socket/tcp.rs | 35 +++++++--- alvr/sockets/src/stream_socket/udp.rs | 33 +++++---- 8 files changed, 169 insertions(+), 173 deletions(-) diff --git a/alvr/audio/src/lib.rs b/alvr/audio/src/lib.rs index 4584e9cfe7..04a2888397 100644 --- a/alvr/audio/src/lib.rs +++ b/alvr/audio/src/lib.rs @@ -8,6 +8,7 @@ use alvr_common::{ once_cell::sync::Lazy, parking_lot::{Mutex, RwLock}, prelude::*, + RelaxedAtomic, }; use alvr_session::{ AudioBufferingConfig, CustomAudioDeviceConfig, LinuxAudioBackend, MicrophoneDevicesConfig, @@ -360,7 +361,7 @@ pub fn get_next_frame_batch( // callback will gracefully handle an interruption, and the callback timing and sound wave // continuity will not be affected. pub fn receive_samples_loop( - runtime: &RwLock>, + running: Arc, mut receiver: StreamReceiver<()>, sample_buffer: Arc>>, channels_count: usize, @@ -369,16 +370,12 @@ pub fn receive_samples_loop( ) -> StrResult { let mut receiver_buffer = ReceiverBuffer::new(); let mut recovery_sample_buffer = vec![]; - loop { - if let Some(runtime) = &*runtime.read() { - match receiver.recv_buffer(runtime, Duration::from_millis(500), &mut receiver_buffer) { - Ok(true) => (), - Ok(false) | Err(ConnectionError::Timeout) => continue, - Err(ConnectionError::Other(e)) => return fmt_e!("{e}"), - } - } else { - return Ok(()); - } + while running.value() { + match receiver.recv_buffer(Duration::from_millis(500), &mut receiver_buffer) { + Ok(true) => (), + Ok(false) | Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(e)) => return fmt_e!("{e}"), + }; let (_, packet) = receiver_buffer.get()?; @@ -462,6 +459,8 @@ pub fn receive_samples_loop( } } } + + Ok(()) } struct StreamingSource { @@ -514,7 +513,7 @@ impl Iterator for StreamingSource { } pub fn play_audio_loop( - runtime: &RwLock>, + running: Arc, device: AudioDevice, channels_count: u16, sample_rate: u32, @@ -544,7 +543,7 @@ pub fn play_audio_loop( .map_err(err!())?; receive_samples_loop( - runtime, + running, receiver, sample_buffer, channels_count as _, diff --git a/alvr/client_core/src/audio.rs b/alvr/client_core/src/audio.rs index 35513d7a33..ffce9d8e78 100644 --- a/alvr/client_core/src/audio.rs +++ b/alvr/client_core/src/audio.rs @@ -2,6 +2,7 @@ use alvr_audio::{AudioDevice, AudioRecordState}; use alvr_common::{ parking_lot::{Mutex, RwLock}, prelude::*, + RelaxedAtomic, }; use alvr_session::AudioBufferingConfig; use alvr_sockets::{StreamReceiver, StreamSender}; @@ -125,7 +126,7 @@ impl AudioOutputCallback for PlayerCallback { #[allow(unused_variables)] pub fn play_audio_loop( - runtime: &RwLock>, + running: Arc, device: AudioDevice, channels_count: u16, sample_rate: u32, @@ -164,7 +165,7 @@ pub fn play_audio_loop( stream.start().map_err(err!())?; alvr_audio::receive_samples_loop( - runtime, + running, receiver, sample_buffer, 2, diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index 243c3b0d31..c7255102ef 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -283,11 +283,10 @@ fn connection_pipeline( config.options = settings.video.mediacodec_extra_options; } - let mut video_receiver = - stream_socket.subscribe_to_stream::(&runtime, VIDEO); - let game_audio_receiver = stream_socket.subscribe_to_stream(&runtime, AUDIO); + let mut video_receiver = stream_socket.subscribe_to_stream::(VIDEO); + let game_audio_receiver = stream_socket.subscribe_to_stream(AUDIO); let tracking_sender = stream_socket.request_stream(TRACKING); - let mut haptics_receiver = stream_socket.subscribe_to_stream::(&runtime, HAPTICS); + let mut haptics_receiver = stream_socket.subscribe_to_stream::(HAPTICS); let statistics_sender = stream_socket.request_stream(STATISTICS); // Important: To make sure this is successfully unset when stopping streaming, the rest of the @@ -305,19 +304,11 @@ fn connection_pipeline( let video_receive_thread = thread::spawn(move || { let mut receiver_buffer = ReceiverBuffer::new(); let mut stream_corrupted = false; - loop { - if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - match video_receiver.recv_buffer( - runtime, - Duration::from_millis(500), - &mut receiver_buffer, - ) { - Ok(true) => (), - Ok(false) | Err(ConnectionError::Timeout) => continue, - Err(ConnectionError::Other(_)) => return, - } - } else { - return; + while IS_STREAMING.value() { + match video_receiver.recv_buffer(Duration::from_millis(500), &mut receiver_buffer) { + Ok(true) => (), + Ok(false) | Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(_)) => return, } let Ok((header, nal)) = receiver_buffer.get() else { @@ -357,7 +348,7 @@ fn connection_pipeline( thread::spawn(move || { alvr_common::show_err(audio::play_audio_loop( - &CONNECTION_RUNTIME, + Arc::clone(&IS_STREAMING), device, 2, game_audio_sample_rate, @@ -374,19 +365,21 @@ fn connection_pipeline( let microphone_sender = stream_socket.request_stream(AUDIO); - thread::spawn(move || loop { - match audio::record_audio_blocking( - Arc::clone(&CONNECTION_RUNTIME), - microphone_sender.clone(), - &device, - 1, - false, - ) { - Ok(()) => break, - Err(e) => { - error!("Audio record error: {e}"); + thread::spawn(move || { + while IS_STREAMING.value() { + match audio::record_audio_blocking( + Arc::clone(&CONNECTION_RUNTIME), + microphone_sender.clone(), + &device, + 1, + false, + ) { + Ok(()) => break, + Err(e) => { + error!("Audio record error: {e}"); - continue; + continue; + } } } }) @@ -394,23 +387,21 @@ fn connection_pipeline( thread::spawn(|| ()) }; - let haptics_receive_thread = thread::spawn(move || loop { - let haptics = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - match haptics_receiver.recv_header_only(runtime, Duration::from_millis(500)) { + let haptics_receive_thread = thread::spawn(move || { + while IS_STREAMING.value() { + let haptics = match haptics_receiver.recv_header_only(Duration::from_millis(500)) { Ok(packet) => packet, Err(ConnectionError::Timeout) => continue, Err(ConnectionError::Other(_)) => return, - } - } else { - return; - }; + }; - EVENT_QUEUE.lock().push_back(ClientCoreEvent::Haptics { - device_id: haptics.device_id, - duration: haptics.duration, - frequency: haptics.frequency, - amplitude: haptics.amplitude, - }); + EVENT_QUEUE.lock().push_back(ClientCoreEvent::Haptics { + device_id: haptics.device_id, + duration: haptics.duration, + frequency: haptics.frequency, + amplitude: haptics.amplitude, + }); + } }); // Poll for events that need a constant thread (mainly for the JNI env) diff --git a/alvr/client_core/src/lib.rs b/alvr/client_core/src/lib.rs index 769df70a5c..dafc397efd 100644 --- a/alvr/client_core/src/lib.rs +++ b/alvr/client_core/src/lib.rs @@ -39,6 +39,7 @@ use serde::{Deserialize, Serialize}; use statistics::StatisticsManager; use std::{ collections::VecDeque, + sync::Arc, thread::{self, JoinHandle}, time::Duration, }; @@ -51,7 +52,7 @@ static EVENT_QUEUE: Lazy>> = static IS_ALIVE: RelaxedAtomic = RelaxedAtomic::new(true); static IS_RESUMED: RelaxedAtomic = RelaxedAtomic::new(false); -static IS_STREAMING: RelaxedAtomic = RelaxedAtomic::new(false); +static IS_STREAMING: Lazy> = Lazy::new(|| Arc::new(RelaxedAtomic::new(false))); static CONNECTION_THREAD: Lazy>>> = Lazy::new(|| Mutex::new(None)); diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index dd22cfa36f..44bb097590 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -48,6 +48,8 @@ const RETRY_CONNECT_MIN_INTERVAL: Duration = Duration::from_secs(1); pub static SHOULD_CONNECT_TO_CLIENTS: Lazy> = Lazy::new(|| Arc::new(RelaxedAtomic::new(false))); +pub static IS_STREAMING: Lazy> = + Lazy::new(|| Arc::new(RelaxedAtomic::new(false))); static CONNECTION_RUNTIME: Lazy>>> = Lazy::new(|| Arc::new(RwLock::new(None))); static VIDEO_CHANNEL_SENDER: Lazy>>> = @@ -549,16 +551,16 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { let mut video_sender = stream_socket.request_stream(VIDEO); let game_audio_sender = stream_socket.request_stream(AUDIO); - let microphone_receiver = stream_socket.subscribe_to_stream(&runtime, AUDIO); - let mut tracking_receiver = stream_socket.subscribe_to_stream::(&runtime, TRACKING); + let microphone_receiver = stream_socket.subscribe_to_stream(AUDIO); + let mut tracking_receiver = stream_socket.subscribe_to_stream::(TRACKING); let haptics_sender = stream_socket.request_stream(HAPTICS); - let mut statics_receiver = - stream_socket.subscribe_to_stream::(&runtime, STATISTICS); + let mut statics_receiver = stream_socket.subscribe_to_stream::(STATISTICS); // Note: here we create CONNECTION_RUNTIME. The rest of the function MUST be infallible, as // CONNECTION_RUNTIME must be destroyed in the thread defined at the end of the function. // Failure to respect this might leave a lingering runtime. *CONNECTION_RUNTIME.write() = Some(runtime); + IS_STREAMING.set(true); let (video_channel_sender, video_channel_receiver) = std::sync::mpsc::sync_channel(settings.connection.max_queued_server_video_frames); @@ -664,7 +666,7 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { thread::spawn(move || { alvr_common::show_err(alvr_audio::play_audio_loop( - &CONNECTION_RUNTIME, + Arc::clone(&IS_STREAMING), sink, 1, streaming_caps.microphone_sample_rate, @@ -695,15 +697,12 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { track_controllers = config.tracked.into(); } - loop { - let tracking = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - match tracking_receiver.recv_header_only(runtime, Duration::from_millis(500)) { - Ok(tracking) => tracking, - Err(ConnectionError::Timeout) => continue, - Err(ConnectionError::Other(_)) => return, - } - } else { - return; + while IS_STREAMING.value() { + let tracking = match tracking_receiver.recv_header_only(Duration::from_millis(500)) + { + Ok(tracking) => tracking, + Err(ConnectionError::Timeout) => continue, + Err(ConnectionError::Other(_)) => return, }; let mut tracking_manager_lock = tracking_manager.lock(); @@ -807,28 +806,26 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { } }); - let statistics_thread = thread::spawn(move || loop { - let client_stats = if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - match statics_receiver.recv_header_only(runtime, Duration::from_millis(500)) { + let statistics_thread = thread::spawn(move || { + while IS_STREAMING.value() { + let client_stats = match statics_receiver.recv_header_only(Duration::from_millis(500)) { Ok(stats) => stats, Err(ConnectionError::Timeout) => continue, Err(ConnectionError::Other(_)) => return, - } - } else { - return; - }; + }; - if let Some(stats) = &mut *STATISTICS_MANAGER.lock() { - let timestamp = client_stats.target_timestamp; - let decoder_latency = client_stats.video_decode; - let network_latency = stats.report_statistics(client_stats); - - BITRATE_MANAGER.lock().report_frame_latencies( - &SERVER_DATA_MANAGER.read().settings().video.bitrate.mode, - timestamp, - network_latency, - decoder_latency, - ); + if let Some(stats) = &mut *STATISTICS_MANAGER.lock() { + let timestamp = client_stats.target_timestamp; + let decoder_latency = client_stats.video_decode; + let network_latency = stats.report_statistics(client_stats); + + BITRATE_MANAGER.lock().report_frame_latencies( + &SERVER_DATA_MANAGER.read().settings().video.bitrate.mode, + timestamp, + network_latency, + decoder_latency, + ); + } } }); @@ -1034,7 +1031,10 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { }); let lifecycle_check_thread = thread::spawn(|| { - while SHOULD_CONNECT_TO_CLIENTS.value() && CONNECTION_RUNTIME.read().is_some() { + while IS_STREAMING.value() + && SHOULD_CONNECT_TO_CLIENTS.value() + && CONNECTION_RUNTIME.read().is_some() + { thread::sleep(Duration::from_millis(500)); } @@ -1082,6 +1082,7 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { } // This requests shutdown from threads + IS_STREAMING.set(false); *CONNECTION_RUNTIME.write() = None; *VIDEO_CHANNEL_SENDER.lock() = None; *HAPTICS_SENDER.lock() = None; diff --git a/alvr/sockets/src/stream_socket/mod.rs b/alvr/sockets/src/stream_socket/mod.rs index cb0c9415fb..01b12c1282 100644 --- a/alvr/sockets/src/stream_socket/mod.rs +++ b/alvr/sockets/src/stream_socket/mod.rs @@ -7,7 +7,7 @@ mod tcp; mod udp; -use alvr_common::prelude::*; +use alvr_common::{parking_lot::Mutex, prelude::*}; use alvr_session::{SocketBufferSize, SocketProtocol}; use bytes::{Buf, BufMut, BytesMut}; use futures::SinkExt; @@ -17,15 +17,15 @@ use std::{ marker::PhantomData, net::IpAddr, ops::{Deref, DerefMut}, - sync::Arc, + sync::{ + mpsc::{self, RecvTimeoutError}, + Arc, + }, time::Duration, }; use tcp::{TcpStreamReceiveSocket, TcpStreamSendSocket}; +use tokio::time; use tokio::{net, runtime::Runtime}; -use tokio::{ - sync::{mpsc, Mutex}, - time, -}; use udp::{UdpStreamReceiveSocket, UdpStreamSendSocket}; pub fn set_socket_buffers( @@ -128,20 +128,18 @@ pub struct StreamSender { } impl StreamSender { - async fn send_buffer(&self, buffer: BytesMut) -> StrResult { + fn send_buffer(&self, runtime: &Runtime, buffer: BytesMut) -> StrResult { match &self.socket { - StreamSendSocket::Udp(socket) => socket - .inner - .lock() - .await - .feed((buffer.freeze(), socket.peer_addr)) - .await + StreamSendSocket::Udp(socket) => runtime + .block_on( + socket + .inner + .lock() + .feed((buffer.freeze(), socket.peer_addr)), + ) .map_err(err!()), - StreamSendSocket::Tcp(socket) => socket - .lock() - .await - .feed(buffer.freeze()) - .await + StreamSendSocket::Tcp(socket) => runtime + .block_on(socket.lock().feed(buffer.freeze())) .map_err(err!()), } } @@ -177,17 +175,17 @@ impl StreamSender { shards_buffer.put_u32(total_shards_count as _); shards_buffer.put_u32(shard_index as u32); shards_buffer.put_slice(shard); - runtime.block_on(self.send_buffer(shards_buffer.split()))?; + self.send_buffer(runtime, shards_buffer.split())?; } match &self.socket { StreamSendSocket::Udp(socket) => runtime - .block_on(async { socket.inner.lock().await.flush().await }) + .block_on(socket.inner.lock().flush()) .map_err(err!())?, - StreamSendSocket::Tcp(socket) => runtime - .block_on(async { socket.lock().await.flush().await }) - .map_err(err!())?, + StreamSendSocket::Tcp(socket) => { + runtime.block_on(socket.lock().flush()).map_err(err!())? + } } self.next_packet_index += 1; @@ -227,7 +225,7 @@ impl ReceiverBuffer { } pub struct StreamReceiver { - receiver: mpsc::UnboundedReceiver, + receiver: mpsc::Receiver, last_reconstructed_packet_index: u32, packet_shards: BTreeMap>, empty_shard_maps: Vec>, @@ -239,17 +237,15 @@ pub struct StreamReceiver { impl StreamReceiver { pub fn recv_buffer( &mut self, - runtime: &Runtime, timeout: Duration, buffer: &mut ReceiverBuffer, ) -> ConResult { // Get shard - let mut shard = runtime.block_on(async { - tokio::select! { - res = self.receiver.recv() => res.ok_or_else(enone!()).map_err(to_con_e!()), - _ = time::sleep(timeout) => alvr_common::timeout(), - } - })?; + let mut shard = match self.receiver.recv_timeout(timeout) { + Ok(shard) => Ok(shard), + Err(RecvTimeoutError::Timeout) => alvr_common::timeout(), + Err(RecvTimeoutError::Disconnected) => con_fmt_e!("Disconnected"), + }?; let shard_packet_index = shard.get_u32(); let shards_count = shard.get_u32() as usize; let shard_index = shard.get_u32() as usize; @@ -302,11 +298,11 @@ impl StreamReceiver { } } - pub fn recv_header_only(&mut self, runtime: &Runtime, timeout: Duration) -> ConResult { + pub fn recv_header_only(&mut self, timeout: Duration) -> ConResult { let mut buffer = ReceiverBuffer::new(); loop { - if self.recv_buffer(runtime, timeout, &mut buffer)? { + if self.recv_buffer(timeout, &mut buffer)? { return Ok(buffer.get().map_err(to_con_e!())?.0); } } @@ -436,7 +432,7 @@ pub struct StreamSocket { max_packet_size: usize, send_socket: StreamSendSocket, receive_socket: Arc>>, - packet_queues: Arc>>>, + packet_queues: Arc>>>, } impl StreamSocket { @@ -451,11 +447,10 @@ impl StreamSocket { } } - pub fn subscribe_to_stream(&self, runtime: &Runtime, stream_id: u16) -> StreamReceiver { - let (sender, receiver) = mpsc::unbounded_channel(); - runtime - .block_on(self.packet_queues.lock()) - .insert(stream_id, sender); + pub fn subscribe_to_stream(&self, stream_id: u16) -> StreamReceiver { + let (sender, receiver) = mpsc::channel(); + + self.packet_queues.lock().insert(stream_id, sender); StreamReceiver { receiver, @@ -467,23 +462,13 @@ impl StreamSocket { } pub fn recv(&self, runtime: &Runtime, timeout: Duration) -> ConResult { - match runtime - .block_on(self.receive_socket.lock()) - .as_mut() - .unwrap() - { - StreamReceiveSocket::Udp(socket) => runtime.block_on(async { - tokio::select! { - res = udp::recv(socket, &self.packet_queues) => res.map_err(to_con_e!()), - _ = time::sleep(timeout) => alvr_common::timeout(), - } - }), - StreamReceiveSocket::Tcp(socket) => runtime.block_on(async { - tokio::select! { - res = tcp::recv(socket, Arc::clone(&self.packet_queues)) => res.map_err(to_con_e!()), - _ = time::sleep(timeout) => alvr_common::timeout(), - } - }), + match self.receive_socket.lock().as_mut().unwrap() { + StreamReceiveSocket::Udp(socket) => { + udp::recv(runtime, timeout, socket, &self.packet_queues) + } + StreamReceiveSocket::Tcp(socket) => { + tcp::recv(runtime, timeout, socket, &self.packet_queues) + } } } } diff --git a/alvr/sockets/src/stream_socket/tcp.rs b/alvr/sockets/src/stream_socket/tcp.rs index 6233b03b60..2537e1cb73 100644 --- a/alvr/sockets/src/stream_socket/tcp.rs +++ b/alvr/sockets/src/stream_socket/tcp.rs @@ -1,15 +1,21 @@ use crate::{Ldc, LOCAL_IP}; -use alvr_common::prelude::*; +use alvr_common::{parking_lot::Mutex, prelude::*}; use alvr_session::SocketBufferSize; use bytes::{Buf, Bytes, BytesMut}; use futures::{ stream::{SplitSink, SplitStream}, StreamExt, }; -use std::{collections::HashMap, net::IpAddr, sync::Arc}; +use std::{ + collections::HashMap, + net::IpAddr, + sync::{mpsc, Arc}, + time::Duration, +}; use tokio::{ net::{TcpListener, TcpStream}, - sync::{mpsc, Mutex}, + runtime::Runtime, + time, }; use tokio_util::codec::Framed; @@ -67,20 +73,27 @@ pub async fn connect_to_client( Ok((Arc::new(Mutex::new(send_socket)), receive_socket)) } -pub async fn recv( +pub fn recv( + runtime: &Runtime, + timeout: Duration, socket: &mut TcpStreamReceiveSocket, - packet_enqueuers: Arc>>>, -) -> StrResult { - if let Some(maybe_packet) = socket.next().await { - let mut packet = maybe_packet.map_err(err!())?; + packet_enqueuers: &Mutex>>, +) -> ConResult { + if let Some(maybe_packet) = runtime.block_on(async { + tokio::select! { + res = socket.next() => res.map(|p| p.map_err(to_con_e!())), + _ = time::sleep(timeout) => Some(alvr_common::timeout()), + } + }) { + let mut packet = maybe_packet?; let stream_id = packet.get_u16(); - if let Some(enqueuer) = packet_enqueuers.lock().await.get_mut(&stream_id) { - enqueuer.send(packet).map_err(err!())?; + if let Some(enqueuer) = packet_enqueuers.lock().get_mut(&stream_id) { + enqueuer.send(packet).map_err(to_con_e!())?; } Ok(()) } else { - fmt_e!("Socket closed") + con_fmt_e!("Socket closed") } } diff --git a/alvr/sockets/src/stream_socket/udp.rs b/alvr/sockets/src/stream_socket/udp.rs index 3f907632aa..2b8ac3aca7 100644 --- a/alvr/sockets/src/stream_socket/udp.rs +++ b/alvr/sockets/src/stream_socket/udp.rs @@ -1,5 +1,5 @@ use crate::{Ldc, LOCAL_IP}; -use alvr_common::prelude::*; +use alvr_common::{parking_lot::Mutex, prelude::*}; use alvr_session::SocketBufferSize; use bytes::{Buf, Bytes, BytesMut}; use futures::{ @@ -9,12 +9,10 @@ use futures::{ use std::{ collections::HashMap, net::{IpAddr, SocketAddr}, - sync::Arc, -}; -use tokio::{ - net::UdpSocket, - sync::{mpsc, Mutex}, + sync::{mpsc, Arc}, + time::Duration, }; +use tokio::{net::UdpSocket, runtime::Runtime, time}; use tokio_util::udp::UdpFramed; #[allow(clippy::type_complexity)] @@ -67,12 +65,19 @@ pub fn connect( )) } -pub async fn recv( +pub fn recv( + runtime: &Runtime, + timeout: Duration, socket: &mut UdpStreamReceiveSocket, - packet_enqueuers: &Mutex>>, -) -> StrResult { - if let Some(maybe_packet) = socket.inner.next().await { - let (mut packet_bytes, address) = maybe_packet.map_err(err!())?; + packet_enqueuers: &Mutex>>, +) -> ConResult { + if let Some(maybe_packet) = runtime.block_on(async { + tokio::select! { + res = socket.inner.next() => res.map(|p| p.map_err(to_con_e!())), + _ = time::sleep(timeout) => Some(alvr_common::timeout()), + } + }) { + let (mut packet_bytes, address) = maybe_packet.map_err(to_con_e!())?; if address != socket.peer_addr { // Non fatal @@ -80,12 +85,12 @@ pub async fn recv( } let stream_id = packet_bytes.get_u16(); - if let Some(enqueuer) = packet_enqueuers.lock().await.get_mut(&stream_id) { - enqueuer.send(packet_bytes).map_err(err!())?; + if let Some(enqueuer) = packet_enqueuers.lock().get_mut(&stream_id) { + enqueuer.send(packet_bytes).map_err(to_con_e!())?; } Ok(()) } else { - fmt_e!("Socket closed") + con_fmt_e!("Socket closed") } } From e440137dcc88d0d548abb45aa2c6c81300e8067a Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Thu, 13 Jul 2023 17:32:49 +0800 Subject: [PATCH 21/28] Progress on sync sockets (26) --- alvr/sockets/src/stream_socket/mod.rs | 47 ++++++++++++--------------- alvr/sockets/src/stream_socket/tcp.rs | 46 +++++++++++++++++--------- alvr/sockets/src/stream_socket/udp.rs | 13 +++++--- 3 files changed, 59 insertions(+), 47 deletions(-) diff --git a/alvr/sockets/src/stream_socket/mod.rs b/alvr/sockets/src/stream_socket/mod.rs index 01b12c1282..f862b47220 100644 --- a/alvr/sockets/src/stream_socket/mod.rs +++ b/alvr/sockets/src/stream_socket/mod.rs @@ -24,7 +24,6 @@ use std::{ time::Duration, }; use tcp::{TcpStreamReceiveSocket, TcpStreamSendSocket}; -use tokio::time; use tokio::{net, runtime::Runtime}; use udp::{UdpStreamReceiveSocket, UdpStreamSendSocket}; @@ -323,16 +322,18 @@ impl StreamSocketBuilder { recv_buffer_bytes: SocketBufferSize, ) -> StrResult { Ok(match stream_socket_config { - SocketProtocol::Udp => StreamSocketBuilder::Udp(runtime.block_on(udp::bind( + SocketProtocol::Udp => StreamSocketBuilder::Udp(udp::bind( + runtime, port, send_buffer_bytes, recv_buffer_bytes, - ))?), - SocketProtocol::Tcp => StreamSocketBuilder::Tcp(runtime.block_on(tcp::bind( + )?), + SocketProtocol::Tcp => StreamSocketBuilder::Tcp(tcp::bind( + runtime, port, send_buffer_bytes, recv_buffer_bytes, - ))?), + )?), }) } @@ -346,8 +347,7 @@ impl StreamSocketBuilder { ) -> ConResult { let (send_socket, receive_socket) = match self { StreamSocketBuilder::Udp(socket) => { - let (send_socket, receive_socket) = - udp::connect(socket, server_ip, port).map_err(to_con_e!())?; + let (send_socket, receive_socket) = udp::connect(socket, server_ip, port); ( StreamSendSocket::Udp(send_socket), @@ -355,14 +355,8 @@ impl StreamSocketBuilder { ) } StreamSocketBuilder::Tcp(listener) => { - let (send_socket, receive_socket) = runtime.block_on(async { - tokio::select! { - res = tcp::accept_from_server(listener, server_ip) => { - res.map_err(to_con_e!()) - }, - _ = time::sleep(timeout) => alvr_common::timeout(), - } - })?; + let (send_socket, receive_socket) = + tcp::accept_from_server(runtime, timeout, listener, server_ip)?; ( StreamSendSocket::Tcp(send_socket), @@ -392,25 +386,24 @@ impl StreamSocketBuilder { ) -> ConResult { let (send_socket, receive_socket) = match protocol { SocketProtocol::Udp => { - let socket = runtime - .block_on(udp::bind(port, send_buffer_bytes, recv_buffer_bytes)) + let socket = udp::bind(runtime, port, send_buffer_bytes, recv_buffer_bytes) .map_err(to_con_e!())?; - let (send_socket, receive_socket) = - udp::connect(socket, client_ip, port).map_err(to_con_e!())?; + let (send_socket, receive_socket) = udp::connect(socket, client_ip, port); + ( StreamSendSocket::Udp(send_socket), StreamReceiveSocket::Udp(receive_socket), ) } SocketProtocol::Tcp => { - let (send_socket, receive_socket) = runtime.block_on(async { - tokio::select! { - res = tcp::connect_to_client(client_ip, port, send_buffer_bytes, recv_buffer_bytes) => { - res.map_err(to_con_e!()) - }, - _ = time::sleep(timeout) => alvr_common::timeout(), - } - })?; + let (send_socket, receive_socket) = tcp::connect_to_client( + runtime, + timeout, + client_ip, + port, + send_buffer_bytes, + recv_buffer_bytes, + )?; ( StreamSendSocket::Tcp(send_socket), diff --git a/alvr/sockets/src/stream_socket/tcp.rs b/alvr/sockets/src/stream_socket/tcp.rs index 2537e1cb73..a3f840b0d0 100644 --- a/alvr/sockets/src/stream_socket/tcp.rs +++ b/alvr/sockets/src/stream_socket/tcp.rs @@ -22,12 +22,15 @@ use tokio_util::codec::Framed; pub type TcpStreamSendSocket = Arc, Bytes>>>; pub type TcpStreamReceiveSocket = SplitStream>; -pub async fn bind( +pub fn bind( + runtime: &Runtime, port: u16, send_buffer_bytes: SocketBufferSize, recv_buffer_bytes: SocketBufferSize, ) -> StrResult { - let socket = TcpListener::bind((LOCAL_IP, port)).await.map_err(err!())?; + let socket = runtime + .block_on(TcpListener::bind((LOCAL_IP, port))) + .map_err(err!())?; let socket = socket2::Socket::from(socket.into_std().map_err(err!())?); super::set_socket_buffers(&socket, send_buffer_bytes, recv_buffer_bytes).ok(); @@ -35,38 +38,51 @@ pub async fn bind( TcpListener::from_std(socket.into()).map_err(err!()) } -pub async fn accept_from_server( +pub fn accept_from_server( + runtime: &Runtime, + timeout: Duration, listener: TcpListener, server_ip: IpAddr, -) -> StrResult<(TcpStreamSendSocket, TcpStreamReceiveSocket)> { - let (socket, server_address) = listener.accept().await.map_err(err!())?; +) -> ConResult<(TcpStreamSendSocket, TcpStreamReceiveSocket)> { + let (socket, server_address) = runtime.block_on(async { + tokio::select! { + res = listener.accept() => res.map_err(to_con_e!()), + _ = time::sleep(timeout) => alvr_common::timeout(), + } + })?; if server_address.ip() != server_ip { - return fmt_e!("Connected to wrong client: {server_address} != {server_ip}"); + return con_fmt_e!("Connected to wrong client: {server_address} != {server_ip}"); } - socket.set_nodelay(true).map_err(err!())?; + socket.set_nodelay(true).map_err(to_con_e!())?; let socket = Framed::new(socket, Ldc::new()); let (send_socket, receive_socket) = socket.split(); Ok((Arc::new(Mutex::new(send_socket)), receive_socket)) } -pub async fn connect_to_client( +pub fn connect_to_client( + runtime: &Runtime, + timeout: Duration, client_ip: IpAddr, port: u16, send_buffer_bytes: SocketBufferSize, recv_buffer_bytes: SocketBufferSize, -) -> StrResult<(TcpStreamSendSocket, TcpStreamReceiveSocket)> { - let socket = TcpStream::connect((client_ip, port)) - .await - .map_err(err!())?; - let socket = socket2::Socket::from(socket.into_std().map_err(err!())?); +) -> ConResult<(TcpStreamSendSocket, TcpStreamReceiveSocket)> { + let socket = runtime.block_on(async { + tokio::select! { + res = TcpStream::connect((client_ip, port)) => res.map_err(to_con_e!()), + _ = time::sleep(timeout) => alvr_common::timeout(), + } + })?; + + let socket = socket2::Socket::from(socket.into_std().map_err(to_con_e!())?); super::set_socket_buffers(&socket, send_buffer_bytes, recv_buffer_bytes).ok(); - let socket = TcpStream::from_std(socket.into()).map_err(err!())?; - socket.set_nodelay(true).map_err(err!())?; + let socket = TcpStream::from_std(socket.into()).map_err(to_con_e!())?; + socket.set_nodelay(true).map_err(to_con_e!())?; let socket = Framed::new(socket, Ldc::new()); let (send_socket, receive_socket) = socket.split(); diff --git a/alvr/sockets/src/stream_socket/udp.rs b/alvr/sockets/src/stream_socket/udp.rs index 2b8ac3aca7..bb39c9e3a0 100644 --- a/alvr/sockets/src/stream_socket/udp.rs +++ b/alvr/sockets/src/stream_socket/udp.rs @@ -31,12 +31,15 @@ pub struct UdpStreamReceiveSocket { // Create tokio socket, convert to socket2, apply settings, convert back to tokio. This is done to // let tokio set all the internal parameters it needs from the start. -pub async fn bind( +pub fn bind( + runtime: &Runtime, port: u16, send_buffer_bytes: SocketBufferSize, recv_buffer_bytes: SocketBufferSize, ) -> StrResult { - let socket = UdpSocket::bind((LOCAL_IP, port)).await.map_err(err!())?; + let socket = runtime + .block_on(UdpSocket::bind((LOCAL_IP, port))) + .map_err(err!())?; let socket = socket2::Socket::from(socket.into_std().map_err(err!())?); super::set_socket_buffers(&socket, send_buffer_bytes, recv_buffer_bytes).ok(); @@ -48,12 +51,12 @@ pub fn connect( socket: UdpSocket, peer_ip: IpAddr, port: u16, -) -> StrResult<(UdpStreamSendSocket, UdpStreamReceiveSocket)> { +) -> (UdpStreamSendSocket, UdpStreamReceiveSocket) { let peer_addr = (peer_ip, port).into(); let socket = UdpFramed::new(socket, Ldc::new()); let (send_socket, receive_socket) = socket.split(); - Ok(( + ( UdpStreamSendSocket { peer_addr, inner: Arc::new(Mutex::new(send_socket)), @@ -62,7 +65,7 @@ pub fn connect( peer_addr, inner: receive_socket, }, - )) + ) } pub fn recv( From 5ae730c9b1cb9e6efa47ba4dd23133665668b496 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Fri, 14 Jul 2023 11:57:27 +0800 Subject: [PATCH 22/28] Remove some mutexes inside StreamSocket --- alvr/client_core/src/connection.rs | 3 +-- alvr/server/src/connection.rs | 3 +-- alvr/sockets/src/stream_socket/mod.rs | 31 ++++++++++++--------------- alvr/sockets/src/stream_socket/tcp.rs | 4 ++-- alvr/sockets/src/stream_socket/udp.rs | 4 ++-- 5 files changed, 20 insertions(+), 25 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index c7255102ef..ff516a09d7 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -264,14 +264,13 @@ fn connection_pipeline( return Ok(()); } - let stream_socket = stream_socket_builder.accept_from_server( + let mut stream_socket = stream_socket_builder.accept_from_server( &runtime, Duration::from_secs(2), server_ip, settings.connection.stream_port, settings.connection.packet_size as _, )?; - let stream_socket = Arc::new(stream_socket); info!("Connected to server"); diff --git a/alvr/server/src/connection.rs b/alvr/server/src/connection.rs index 44bb097590..b24b1cf552 100644 --- a/alvr/server/src/connection.rs +++ b/alvr/server/src/connection.rs @@ -536,7 +536,7 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { *BITRATE_MANAGER.lock() = BitrateManager::new(settings.video.bitrate.history_size, fps); - let stream_socket = StreamSocketBuilder::connect_to_client( + let mut stream_socket = StreamSocketBuilder::connect_to_client( &runtime, Duration::from_secs(1), client_ip, @@ -547,7 +547,6 @@ fn try_connect(mut client_ips: HashMap) -> ConResult { settings.connection.packet_size as _, ) .map_err(to_con_e!())?; - let stream_socket = Arc::new(stream_socket); let mut video_sender = stream_socket.request_stream(VIDEO); let game_audio_sender = stream_socket.request_stream(AUDIO); diff --git a/alvr/sockets/src/stream_socket/mod.rs b/alvr/sockets/src/stream_socket/mod.rs index f862b47220..b24d2de840 100644 --- a/alvr/sockets/src/stream_socket/mod.rs +++ b/alvr/sockets/src/stream_socket/mod.rs @@ -7,7 +7,7 @@ mod tcp; mod udp; -use alvr_common::{parking_lot::Mutex, prelude::*}; +use alvr_common::prelude::*; use alvr_session::{SocketBufferSize, SocketProtocol}; use bytes::{Buf, BufMut, BytesMut}; use futures::SinkExt; @@ -17,10 +17,7 @@ use std::{ marker::PhantomData, net::IpAddr, ops::{Deref, DerefMut}, - sync::{ - mpsc::{self, RecvTimeoutError}, - Arc, - }, + sync::mpsc::{self, RecvTimeoutError}, time::Duration, }; use tcp::{TcpStreamReceiveSocket, TcpStreamSendSocket}; @@ -368,8 +365,8 @@ impl StreamSocketBuilder { Ok(StreamSocket { max_packet_size, send_socket, - receive_socket: Arc::new(Mutex::new(Some(receive_socket))), - packet_queues: Arc::new(Mutex::new(HashMap::new())), + receive_socket, + packet_queues: HashMap::new(), }) } @@ -415,8 +412,8 @@ impl StreamSocketBuilder { Ok(StreamSocket { max_packet_size, send_socket, - receive_socket: Arc::new(Mutex::new(Some(receive_socket))), - packet_queues: Arc::new(Mutex::new(HashMap::new())), + receive_socket, + packet_queues: HashMap::new(), }) } } @@ -424,8 +421,8 @@ impl StreamSocketBuilder { pub struct StreamSocket { max_packet_size: usize, send_socket: StreamSendSocket, - receive_socket: Arc>>, - packet_queues: Arc>>>, + receive_socket: StreamReceiveSocket, + packet_queues: HashMap>, } impl StreamSocket { @@ -440,10 +437,10 @@ impl StreamSocket { } } - pub fn subscribe_to_stream(&self, stream_id: u16) -> StreamReceiver { + pub fn subscribe_to_stream(&mut self, stream_id: u16) -> StreamReceiver { let (sender, receiver) = mpsc::channel(); - self.packet_queues.lock().insert(stream_id, sender); + self.packet_queues.insert(stream_id, sender); StreamReceiver { receiver, @@ -454,13 +451,13 @@ impl StreamSocket { } } - pub fn recv(&self, runtime: &Runtime, timeout: Duration) -> ConResult { - match self.receive_socket.lock().as_mut().unwrap() { + pub fn recv(&mut self, runtime: &Runtime, timeout: Duration) -> ConResult { + match &mut self.receive_socket { StreamReceiveSocket::Udp(socket) => { - udp::recv(runtime, timeout, socket, &self.packet_queues) + udp::recv(runtime, timeout, socket, &mut self.packet_queues) } StreamReceiveSocket::Tcp(socket) => { - tcp::recv(runtime, timeout, socket, &self.packet_queues) + tcp::recv(runtime, timeout, socket, &mut self.packet_queues) } } } diff --git a/alvr/sockets/src/stream_socket/tcp.rs b/alvr/sockets/src/stream_socket/tcp.rs index a3f840b0d0..b5aa36d3da 100644 --- a/alvr/sockets/src/stream_socket/tcp.rs +++ b/alvr/sockets/src/stream_socket/tcp.rs @@ -93,7 +93,7 @@ pub fn recv( runtime: &Runtime, timeout: Duration, socket: &mut TcpStreamReceiveSocket, - packet_enqueuers: &Mutex>>, + packet_enqueuers: &mut HashMap>, ) -> ConResult { if let Some(maybe_packet) = runtime.block_on(async { tokio::select! { @@ -104,7 +104,7 @@ pub fn recv( let mut packet = maybe_packet?; let stream_id = packet.get_u16(); - if let Some(enqueuer) = packet_enqueuers.lock().get_mut(&stream_id) { + if let Some(enqueuer) = packet_enqueuers.get_mut(&stream_id) { enqueuer.send(packet).map_err(to_con_e!())?; } diff --git a/alvr/sockets/src/stream_socket/udp.rs b/alvr/sockets/src/stream_socket/udp.rs index bb39c9e3a0..43f3d42bfa 100644 --- a/alvr/sockets/src/stream_socket/udp.rs +++ b/alvr/sockets/src/stream_socket/udp.rs @@ -72,7 +72,7 @@ pub fn recv( runtime: &Runtime, timeout: Duration, socket: &mut UdpStreamReceiveSocket, - packet_enqueuers: &Mutex>>, + packet_enqueuers: &mut HashMap>, ) -> ConResult { if let Some(maybe_packet) = runtime.block_on(async { tokio::select! { @@ -88,7 +88,7 @@ pub fn recv( } let stream_id = packet_bytes.get_u16(); - if let Some(enqueuer) = packet_enqueuers.lock().get_mut(&stream_id) { + if let Some(enqueuer) = packet_enqueuers.get_mut(&stream_id) { enqueuer.send(packet_bytes).map_err(to_con_e!())?; } From 2488346643977beff6121a509bea26afb54dfaac Mon Sep 17 00:00:00 2001 From: Charlie Le <20309750+CharlieQLe@users.noreply.github.com> Date: Fri, 14 Jul 2023 18:51:59 -0400 Subject: [PATCH 23/28] Add Flatpak job to generate bundle (#1735) * Add flatpak job * Fix build_linux_flatpak * Run flatpak build commands instead of using the flatpak-builder action * Sudo for remote add * Fix flatpak job * Use distro flatpak builder * Place flatpak file in build * Update Flatpak wiki page --- .github/workflows/prepare-release.yml | 19 ++++++++++++++++++- wiki/Flatpak.md | 24 +++++++++++++++++++----- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml index 48779c005d..d023ae5d4c 100644 --- a/.github/workflows/prepare-release.yml +++ b/.github/workflows/prepare-release.yml @@ -172,8 +172,9 @@ jobs: RUST_BACKTRACE: 1 run: | sudo apt-get update - sudo apt-get install libfuse2 build-essential pkg-config nasm libva-dev libdrm-dev libvulkan-dev libx264-dev libx265-dev cmake libasound2-dev libjack-jackd2-dev libxrandr-dev libunwind-dev libffmpeg-nvenc-dev nvidia-cuda-toolkit libgtk-3-dev + sudo apt-get install libfuse2 build-essential pkg-config nasm libva-dev libdrm-dev libvulkan-dev libx264-dev libx265-dev cmake libasound2-dev libjack-jackd2-dev libxrandr-dev libunwind-dev libffmpeg-nvenc-dev nvidia-cuda-toolkit libgtk-3-dev flatpak flatpak-builder cp alvr/xtask/deb/cuda.pc /usr/share/pkgconfig + sudo flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo cargo xtask prepare-deps --platform linux - name: Build and package ALVR (.tar.gz) @@ -217,6 +218,22 @@ jobs: asset_name: ALVR-x86_64.AppImage.zsync asset_content_type: application/octet-stream + - name: Build and package ALVR flatpak (.flatpak) + id: build_flatpak + run: | + sudo flatpak-builder --repo=.flatpak-repo --install-deps-from=flathub --force-clean --default-branch=stable --arch=x86_64 .flatpak-build-dir alvr/xtask/flatpak/com.valvesoftware.Steam.Utility.alvr.json + flatpak build-bundle .flatpak-repo ./build/com.valvesoftware.Steam.Utility.alvr.flatpak com.valvesoftware.Steam.Utility.alvr stable --runtime + + - name: Upload flatpak streamer for Linux + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.prepare_release.outputs.upload_url }} + asset_path: ./build/com.valvesoftware.Steam.Utility.alvr.flatpak + asset_name: com.valvesoftware.Steam.Utility.alvr.flatpak + asset_content_type: application/octet-stream + build_linux_launcher: runs-on: ubuntu-latest needs: [prepare_release] diff --git a/wiki/Flatpak.md b/wiki/Flatpak.md index f7d1c5a1ef..669ef95abf 100644 --- a/wiki/Flatpak.md +++ b/wiki/Flatpak.md @@ -28,7 +28,6 @@ First, flatpak must be installed from your distro's repositories. Refer to [this Once Flatpak is installed, the flatpak dependencies must also be installed. They are: -* Flatpak Builder * Rust * LLVM * Freedesktop SDK @@ -37,8 +36,7 @@ Once Flatpak is installed, the flatpak dependencies must also be installed. They These can be installed like so: ``` -flatpak install flathub org.flatpak.Builder \ - org.freedesktop.Sdk//22.08 \ +flatpak install flathub org.freedesktop.Sdk//22.08 \ org.freedesktop.Sdk.Extension.llvm16//22.08 \ org.freedesktop.Sdk.Extension.rust-stable//22.08 \ com.valvesoftware.Steam @@ -51,8 +49,24 @@ flatpak install flathub org.freedesktop.Platform.GL.default//22.08-extra \ org.freedesktop.Platform.GL32.default//22.08-extra ``` +## Install + +Once the dependencies are fulfilled, download `com.valvesoftware.Steam.Utility.alvr.flatpak` file from the latest release and install like so: + +``` +flatpak install --bundle com.valvesoftware.Steam.Utility.alvr.flatpak +``` + ## Build and Install +Alternatively, if the file is not available or a newer version is needed, the flatpak can be built from source and installed. + +First, the dependencies from above must be fulfilled. Then, install `flatpak-builder` like so: + +``` +flatpak install flathub org.flatpak.Builder +``` + Once the dependencies are fulfilled, clone and enter the repository. ``` @@ -63,13 +77,13 @@ cd ALVR Once inside the repository, simply run the following command to build and install the Flatpak. ``` -flatpak run org.flatpak.Builder --user --install --force-clean .flatpak-build-dir alvr/xtask/flatpak/com.valvesoftware.Steam.Utility.alvr.json +flatpak run org.flatpak.Builder --install --force-clean .flatpak-build-dir alvr/xtask/flatpak/com.valvesoftware.Steam.Utility.alvr.json ``` If ALVR is not cloned under the home directory, permission to access the directory may need to be given to the build command. An example of this is given below. ``` -flatpak run --filesystem="$(pwd)" org.flatpak.Builder --user --install --force-clean .flatpak-build-dir alvr/xtask/flatpak/com.valvesoftware.Steam.Utility.alvr.json +flatpak run --filesystem="$(pwd)" org.flatpak.Builder --install --force-clean .flatpak-build-dir alvr/xtask/flatpak/com.valvesoftware.Steam.Utility.alvr.json ``` ## Notes From 0c6e37c1fb5030094d98c0b5a05102ff27b94d63 Mon Sep 17 00:00:00 2001 From: Charlie Le <20309750+CharlieQLe@users.noreply.github.com> Date: Sat, 15 Jul 2023 22:25:54 -0400 Subject: [PATCH 24/28] Add SteamVR Flatpak info (#1742) --- wiki/Flatpak.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/wiki/Flatpak.md b/wiki/Flatpak.md index 669ef95abf..723a8aae1d 100644 --- a/wiki/Flatpak.md +++ b/wiki/Flatpak.md @@ -49,9 +49,19 @@ flatpak install flathub org.freedesktop.Platform.GL.default//22.08-extra \ org.freedesktop.Platform.GL32.default//22.08-extra ``` +## Setup + +Install SteamVR via the Steam Flatpak. After installing SteamVR, run the following command: + +``` +sudo setcap CAP_SYS_NICE+ep ~/.var/app/com.valvesoftware.Steam/data/Steam/steamapps/common/SteamVR/bin/linux64/vrcompositor-launcher +``` + +This command is normally run by SteamVR, but due to the lack of sudo access within the Flatpak sandbox, it must be run outside of the Flatpak sandbox. After running the command, run SteamVR once then close it. + ## Install -Once the dependencies are fulfilled, download `com.valvesoftware.Steam.Utility.alvr.flatpak` file from the latest release and install like so: +Download `com.valvesoftware.Steam.Utility.alvr.flatpak` file from the latest release and install like so: ``` flatpak install --bundle com.valvesoftware.Steam.Utility.alvr.flatpak From d56efb88e6f6bf7bff16bde6e42dd49b5bfca94c Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Mon, 17 Jul 2023 12:50:48 +0800 Subject: [PATCH 25/28] Fix tokio crash Thanks to @Vixea for the extensive testing --- alvr/sockets/src/stream_socket/tcp.rs | 6 +++++- alvr/sockets/src/stream_socket/udp.rs | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/alvr/sockets/src/stream_socket/tcp.rs b/alvr/sockets/src/stream_socket/tcp.rs index b5aa36d3da..08a7e8229e 100644 --- a/alvr/sockets/src/stream_socket/tcp.rs +++ b/alvr/sockets/src/stream_socket/tcp.rs @@ -35,6 +35,7 @@ pub fn bind( super::set_socket_buffers(&socket, send_buffer_bytes, recv_buffer_bytes).ok(); + let _tokio_guard = runtime.enter(); TcpListener::from_std(socket.into()).map_err(err!()) } @@ -81,7 +82,10 @@ pub fn connect_to_client( super::set_socket_buffers(&socket, send_buffer_bytes, recv_buffer_bytes).ok(); - let socket = TcpStream::from_std(socket.into()).map_err(to_con_e!())?; + let socket = { + let _tokio_guard = runtime.enter(); + TcpStream::from_std(socket.into()).map_err(to_con_e!())? + }; socket.set_nodelay(true).map_err(to_con_e!())?; let socket = Framed::new(socket, Ldc::new()); let (send_socket, receive_socket) = socket.split(); diff --git a/alvr/sockets/src/stream_socket/udp.rs b/alvr/sockets/src/stream_socket/udp.rs index 43f3d42bfa..437c1d31b7 100644 --- a/alvr/sockets/src/stream_socket/udp.rs +++ b/alvr/sockets/src/stream_socket/udp.rs @@ -44,6 +44,7 @@ pub fn bind( super::set_socket_buffers(&socket, send_buffer_bytes, recv_buffer_bytes).ok(); + let _tokio_guard = runtime.enter(); UdpSocket::from_std(socket.into()).map_err(err!()) } From cefd92eed7ec80a6c5efe83b94bfdd97ad01a2c3 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Mon, 17 Jul 2023 23:51:01 +0800 Subject: [PATCH 26/28] Try fix handshake warning --- alvr/server/src/sockets.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/alvr/server/src/sockets.rs b/alvr/server/src/sockets.rs index 2955b95bc2..c9cf52382c 100644 --- a/alvr/server/src/sockets.rs +++ b/alvr/server/src/sockets.rs @@ -29,7 +29,7 @@ impl WelcomeSocket { let (size, address) = match self.socket.recv_from(&mut self.buffer) { Ok(pair) => pair, Err(e) => { - if e.kind() == ErrorKind::TimedOut { + if matches!(e.kind(), ErrorKind::TimedOut | ErrorKind::WouldBlock) { return timeout(); } else { return con_fmt_e!("{e}"); From d5882503de384be8119ade254fa5b72005e7bf25 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Tue, 18 Jul 2023 16:25:52 +0800 Subject: [PATCH 27/28] Reduce tokio threads to 2 on client --- alvr/client_core/src/connection.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index ff516a09d7..cd7e2e85da 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -115,7 +115,11 @@ fn connection_pipeline( recommended_view_resolution: UVec2, supported_refresh_rates: Vec, ) -> ConResult { - let runtime = Runtime::new().map_err(to_con_e!())?; + let runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(2) + .enable_all() + .build() + .map_err(to_con_e!())?; let (mut proto_control_socket, server_ip) = { let config = Config::load(); From f0b756405209a22b7f3688e9c52579ad875196c0 Mon Sep 17 00:00:00 2001 From: Riccardo Zaglia Date: Tue, 18 Jul 2023 18:06:52 +0800 Subject: [PATCH 28/28] Combine 4 threads in 1 on the client --- alvr/client_core/src/connection.rs | 100 +++++++++++------------------ 1 file changed, 39 insertions(+), 61 deletions(-) diff --git a/alvr/client_core/src/connection.rs b/alvr/client_core/src/connection.rs index cd7e2e85da..be5046fcde 100644 --- a/alvr/client_core/src/connection.rs +++ b/alvr/client_core/src/connection.rs @@ -24,6 +24,7 @@ use alvr_packets::{ use alvr_session::{settings_schema::Switch, SessionConfig}; use alvr_sockets::{ PeerType, ProtoControlSocket, ReceiverBuffer, StreamSender, StreamSocketBuilder, + KEEPALIVE_INTERVAL, }; use serde_json as json; use std::{ @@ -57,7 +58,6 @@ const SERVER_DISCONNECTED_MESSAGE: &str = "The streamer has disconnected."; const DISCOVERY_RETRY_PAUSE: Duration = Duration::from_millis(500); const RETRY_CONNECT_MIN_INTERVAL: Duration = Duration::from_secs(1); -const NETWORK_KEEPALIVE_INTERVAL: Duration = Duration::from_secs(1); const CONNECTION_RETRY_INTERVAL: Duration = Duration::from_secs(1); static DISCONNECT_SERVER_NOTIFIER: Lazy>>> = @@ -407,68 +407,58 @@ fn connection_pipeline( } }); - // Poll for events that need a constant thread (mainly for the JNI env) - #[cfg(target_os = "android")] - thread::spawn(|| { - const BATTERY_POLL_INTERVAL: Duration = Duration::from_secs(5); - - let mut previous_hmd_battery_status = (0.0, false); - let mut battery_poll_deadline = Instant::now(); + let control_send_thread = thread::spawn(move || { + let mut keepalive_deadline = Instant::now(); + #[cfg(target_os = "android")] let battery_manager = platform::android::BatteryManager::new(); + #[cfg(target_os = "android")] + let mut battery_deadline = Instant::now(); - while IS_STREAMING.value() { - if battery_poll_deadline < Instant::now() { - let new_hmd_battery_status = battery_manager.status(); - - if new_hmd_battery_status != previous_hmd_battery_status { - if let Some(sender) = &*CONTROL_CHANNEL_SENDER.lock() { - sender - .send(ClientControlPacket::Battery(crate::BatteryPacket { - device_id: *alvr_common::HEAD_ID, - gauge_value: new_hmd_battery_status.0, - is_plugged: new_hmd_battery_status.1, - })) - .ok(); + while IS_STREAMING.value() && IS_RESUMED.value() && IS_ALIVE.value() { + if let Ok(packet) = control_channel_receiver.recv_timeout(Duration::from_millis(500)) { + if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + if let Err(e) = control_sender.send(runtime, &packet) { + info!("Server disconnected. Cause: {e}"); + set_hud_message(SERVER_DISCONNECTED_MESSAGE); - previous_hmd_battery_status = new_hmd_battery_status; + break; } } - - battery_poll_deadline += BATTERY_POLL_INTERVAL; } - thread::sleep(Duration::from_millis(500)); - } - }); + if Instant::now() > keepalive_deadline { + if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + control_sender + .send(runtime, &ClientControlPacket::KeepAlive) + .ok(); - let keepalive_sender_thread = thread::spawn(move || { - let mut deadline = Instant::now(); - while IS_STREAMING.value() { - if let Some(sender) = &*CONTROL_CHANNEL_SENDER.lock() { - sender.send(ClientControlPacket::KeepAlive).ok(); + keepalive_deadline = Instant::now() + KEEPALIVE_INTERVAL; + } } - deadline += NETWORK_KEEPALIVE_INTERVAL; - while Instant::now() < deadline && IS_STREAMING.value() { - thread::sleep(Duration::from_millis(500)); + #[cfg(target_os = "android")] + if Instant::now() > battery_deadline { + if let Some(runtime) = &*CONNECTION_RUNTIME.read() { + let (gauge_value, is_plugged) = battery_manager.status(); + control_sender + .send( + runtime, + &ClientControlPacket::Battery(crate::BatteryPacket { + device_id: *alvr_common::HEAD_ID, + gauge_value, + is_plugged, + }), + ) + .ok(); + } + + battery_deadline = Instant::now() + Duration::from_secs(5); } } - }); - - let control_send_thread = thread::spawn(move || { - while let Ok(packet) = control_channel_receiver.recv() { - if let Some(runtime) = &*CONNECTION_RUNTIME.read() { - if let Err(e) = control_sender.send(runtime, &packet) { - info!("Server disconnected. Cause: {e}"); - set_hud_message(SERVER_DISCONNECTED_MESSAGE); - if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { - notifier.send(()).ok(); - } - return; - } - } + if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { + notifier.send(()).ok(); } }); @@ -525,16 +515,6 @@ fn connection_pipeline( } }); - let lifecycle_check_thread = thread::spawn(|| { - while IS_STREAMING.value() && IS_RESUMED.value() && IS_ALIVE.value() { - thread::sleep(Duration::from_millis(500)); - } - - if let Some(notifier) = &*DISCONNECT_SERVER_NOTIFIER.lock() { - notifier.send(()).ok(); - } - }); - // Block here disconnect_receiver.recv().ok(); @@ -561,8 +541,6 @@ fn connection_pipeline( control_send_thread.join().ok(); control_receive_thread.join().ok(); stream_receive_thread.join().ok(); - keepalive_sender_thread.join().ok(); - lifecycle_check_thread.join().ok(); Ok(()) }