diff --git a/CITATION.bib b/CITATION.bib index 80e033a10..12cd32adc 100644 --- a/CITATION.bib +++ b/CITATION.bib @@ -1,7 +1,7 @@ -@online{Caesarjl2022, - author = {{C}ontributors, {E}cosystem, and {N}av{A}bility}, - title = {Caesar.jl Solver, v0.13.5}, - year = {2022Q4}, - doi= {Solver DOI: 10.5281/zenodo.7498643}, +@online{Caesarjl2024, + author = {{C}ontributors, {E}cosystem, and {N}av{A}bility(TM)}, + title = {Caesar.jl Solver, v0.16.2}, + year = {2024}, + doi= {Solver DOI: 10.5281/zenodo.5146221}, note = {\url{https://github.com/JuliaRobotics/Caesar.jl}} } diff --git a/NEWS.md b/NEWS.md index 9a173aa47..6e85c16c6 100644 --- a/NEWS.md +++ b/NEWS.md @@ -10,6 +10,10 @@ Major changes and news in Caesar.jl. - Updates for IncrementalInference upgrades relating to StaticArray variable values. - Manifold updates to factors. - Downstreamed std ROS handlers to PyCaesar. +- Fix `saveLAS` to use `Int32`. +- Several compat updates for dependencies. +- Restore Docs build, and update links for NavAbility at WhereWhen.ai Technologies Inc. +- Introduce `FolderDict` as data structure for lower memory consumption, also as potential BlobStore. ## Changes in v0.13 diff --git a/README.md b/README.md index d78e5b0a9..420e9b3f6 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@

-A multimodal/non-Gaussian robotic toolkit for localization and mapping -- reducing the barrier of entry for sensor/data fusion tasks, including Simultaneous Localization and Mapping (SLAM). +A multimodal/non-Gaussian robotic toolkit for localization and mapping -- reducing the barrier of entry for sensor/data fusion tasks, including Simultaneous Localization and Mapping (SLAM). Focus areas are mapping, localization, calibration, synthesis, planning, and digital twins. -[NavAbility.io](http://www.navability.io) is administrating and supporting the ongoing development of Caesar.jl with and to help grow the community, please reach out for any additional information at info@navability.io or via the Slack badge-link below. +[NavAbility(TM) by WhereWhen.ai Technologies Inc.](https://www.wherewhen.ai) is administrating and supporting the ongoing development of Caesar.jl with and to help grow the community, please reach out for any additional information at info@wherewhen.ai or via the Slack badge-link below. # Weblink Info @@ -53,8 +53,10 @@ This project adheres to the [JuliaRobotics code of conduct](https://github.com/J [![Stargazers over time](https://starchart.cc/JuliaRobotics/Caesar.jl.svg)](https://starchart.cc/JuliaRobotics/Caesar.jl) -[doi-img]: https://zenodo.org/badge/55802838.svg -[doi-url]: https://zenodo.org/badge/latestdoi/55802838 +[doi-img]: https://zenodo.org/badge/DOI/10.5281/zenodo.5146221.svg +[doi-url]: https://doi.org/10.5281/zenodo.5146221 + +5146221 [colp-badge]: https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet [colprac]: https://github.com/SciML/ColPrac diff --git a/docs/src/concepts/arena_visualizations.md b/docs/src/concepts/arena_visualizations.md index acf8ccbef..e3e9a28b1 100644 --- a/docs/src/concepts/arena_visualizations.md +++ b/docs/src/concepts/arena_visualizations.md @@ -2,7 +2,62 @@ ## Introduction -Over time, Caesar.jl/Arena.jl has used at various different 3D visualization technologies. Currently work is underway to better standardize within the Julia ecosystem, with the 4th generation of Arena.jl -- note that this is work in progress. Information about legacy generations is included below. +Over time, Caesar.jl/Arena.jl has used at various different 3D visualization technologies. + +### Arena.jl Visualization + +#### [Plotting a PointCloud](@id viz_pointcloud) + +Visualization support for point clouds is available through Arena and Caesar. The follow example shows some of the basics: + +```julia +using Arena +using Caesar +using Downloads +using DelimitedFiles +using LasIO +using Test + +## + +function downloadTestData(datafile, url) + if 0 === Base.filesize(datafile) + Base.mkpath(dirname(datafile)) + @info "Downloading $url" + Downloads.download(url, datafile) + end + return datafile +end + +testdatafolder = joinpath(tempdir(), "caesar", "testdata") # "/tmp/caesar/testdata/" + +lidar_terr1_file = joinpath(testdatafolder,"lidar","simpleICP","terrestrial_lidar1.xyz") +if !isfile(lidar_terr1_file) + lidar_terr1_url = "https://github.com/JuliaRobotics/CaesarTestData.jl/raw/main/data/lidar/simpleICP/terrestrial_lidar1.xyz" + downloadTestData(lidar_terr1_file,lidar_terr1_url) +end + +# load the data to memory +X_fix = readdlm(lidar_terr1_file, Float32) +# convert data to PCL types +pc_fix = Caesar._PCL.PointCloud(X_fix); + + +pl = Arena.plotPointCloud(pc_fix) + +``` + +This should result in a plot similar to: + +```@raw html +

+ +

+``` + + +!!! note + 24Q1: Currently work is underway to better standardize within the Julia ecosystem, with the 4th generation of Arena.jl -- note that this is work in progress. Information about legacy generations is included below. For more formal visualization support, contact [www.NavAbility.io](http://www.navability.io) via email or slack. ## 4th Generation Dev Scripts using Makie.jl diff --git a/docs/src/install_viz.md b/docs/src/install_viz.md index 8c297cd31..eec649b5e 100644 --- a/docs/src/install_viz.md +++ b/docs/src/install_viz.md @@ -1,11 +1,19 @@ # Install Visualization Tools +## 2D/3D Plotting, Arena.jl + +```julia +pkg> add Arena +``` ## 2D Plotting, RoMEPlotting.jl -RoMEPlotting.jl (2D) and Arena.jl (3D) as optional visualization packages: +!!! note + 24Q1: Plotting is being consolidated into Arena.jl and RoMEPlotting.jl will become obsolete. + +RoMEPlotting.jl (2D) and Arena.jl (3D) are optional visualization packages: ```julia -(v1.6) pkg> add RoMEPlotting +pkg> add RoMEPlotting ``` diff --git a/docs/src/installation_environment.md b/docs/src/installation_environment.md index 8302f7ef4..5b19ff48d 100644 --- a/docs/src/installation_environment.md +++ b/docs/src/installation_environment.md @@ -2,88 +2,82 @@ Caesar.jl is one of the packages within the [JuliaRobotics](http://www.juliarobotics.org) community, and adheres to the [code-of-conduct](https://github.com/JuliaRobotics/administration/blob/master/code_of_conduct.md). -## Possible System Dependencies +## New to Julia + +### Installing the Julia Binary + +Although [Julia](https://julialang.org/) (or [JuliaPro](https://juliacomputing.com/)) can be installed on a Linux/Mac/Windows via a package manager, we prefer a highly reproducible and self contained (local environment) install. + +The easiest method is---via the terminal---[as described on the JuliaLang.org downloads page](https://julialang.org/downloads/). +!!! note + Feel free to modify this setup as you see fit. + +## VSCode IDE Environment + +[VSCode IDE](https://www.julia-vscode.org/) allows for interactive development of Julia code using the Julia Extension. After installing and running VSCode, install the Julia Language Support Extension: -The following (Linux) system packages are used by Caesar.jl: +```@raw html +

+ +

``` -# Likely dependencies -sudo apt-get install hdf5-tools imagemagick -# optional packages -sudo apt-get install graphviz xdot +In VSCode, open the command pallette by pressing `Ctrl + Shift + p`. There are a wealth of tips and tricks on how to use VSCode. See [this JuliaCon presentation for as a general introduction into 'piece-by-piece' code execution and much much more](https://www.youtube.com/watch?v=IdhnP00Y1Ks). Working in one of the Julia IDEs like VS Code or Juno should feel something like this (Gif borrowed from [DiffEqFlux.jl](https://github.com/SciML/DiffEqFlux.jl)): +```@raw html +

+ +

``` -For [ROS.org](https://www.ros.org/) users, see at least one usage example at [the ROS Direct page](@ref ros_direct). +There are a variety of useful packages in VSCode, such as `GitLens`, `LiveShare`, and `Todo Browser` as just a few highlights. These *VSCode Extensions* are independent of the already vast JuliaLang Package Ecosystem (see [JuliaObserver.com](https://juliaobserver.com/)). + +!!! note + For [ROS.org](https://www.ros.org/) users, see at least one usage example at [the ROS Direct page](@ref ros_direct). ## Installing Julia Packages +### Vanilla Install + The philosophy around Julia packages are discussed at length in the [Julia core documentation](https://docs.julialang.org/en/stable/manual/packages/), where each Julia package relates to a git repository likely found on [Github.com](http://www.github.com). Also see [JuliaHub.com](https://juliahub.com/ui/Packages/Caesar/BNbRm) for dashboard-style representation of the broader Julia package ecosystem. -To install a Julia package, simply open a `julia` REPL (equally the Julia REPL in VSCode) and type: +To install a Julia package, simply start a `julia` REPL (equally the Julia REPL in VSCode) and then type: ```julia -] # activate Pkg manager -(v1.6) pkg> add Caesar +julia> ] # activate Pkg manager +(v___) pkg> add Caesar ``` +### Version Control, Branches + These are [registered packages](https://pkg.julialang.org/) maintained by [JuliaRegistries/General](http://github.com/JuliaRegistries/General). Unregistered latest packages can also be installed with using only the `Pkg.develop` function: ```julia # Caesar is registered on JuliaRegistries/General julia> ] -(v1.6) pkg> add Caesar -(v1.6) pkg> add Caesar#janes-awesome-fix-branch -(v1.6) pkg> add Caesar@v0.10.0 +(v___) pkg> add Caesar +(v___) pkg> add Caesar#janes-awesome-fix-branch +(v___) pkg> add Caesar@v0.16 # or alternatively your own local fork (just using old link as example) -(v1.6) pkg> add https://github.com/dehann/Caesar.jl +(v___) pkg> add https://github.com/dehann/Caesar.jl ``` -See [Pkg.jl](https://github.com/JuliaLang/Pkg.jl) for details and features regarding package management, development, version control, virtual environments and much more. - -## Next Steps +### Virtual Environments -The sections hereafter describe [Building](@ref building_graphs), [Interacting], and [Solving](@ref solving_graphs) factor graphs. We also recommend reviewing the various examples available in the [Examples section](@ref examples_section). - -## New to Julia - -### Installing the Julia Binary - -Although [Julia](https://julialang.org/) (or [JuliaPro](https://juliacomputing.com/)) can be installed on a Linux computer using the `apt` package manager, we are striving for a fully local installation environment which is highly reproducible on a variety of platforms. - -The easiest method is---via the terminal---to [download the desired](https://julialang.org/downloads/) version of Julia as a binary, extract, setup a symbolic link, and run: - -```bash -cd ~ -mkdir -p .julia -cd .julia -wget https://julialang-s3.julialang.org/bin/linux/x64/1.6/julia-1.6.7-linux-x86_64.tar.gz -tar -xvf julia-1.6.7-linux-x86_64.tar.gz -rm julia-1.6.7-linux-x86_64.tar.gz -cd /usr/local/bin -sudo ln -s ~/.julia/julia-1.6.7/bin/julia julia -``` !!! note - Feel free to modify this setup as you see fit. + Julia has native support for virtual environments and exact package manifests. See [Pkg.jl Docs](https://pkgdocs.julialang.org/v1/environments/) for more info. More details and features regarding package management, development, version control, virtual environments are available there. -This should allow any terminal or process on the computer to run the Julia REPL by type `julia` and testing with: +## Next Steps -## VSCode IDE Environment +The sections hereafter describe [Building](@ref building_graphs), [Interacting], and [Solving](@ref solving_graphs) factor graphs. We also recommend reviewing the various examples available in the [Examples section](@ref examples_section). -[VSCode IDE](https://www.julia-vscode.org/) allows for interactive development of Julia code using the Julia Extension. After installing and running VSCode, install the Julia Language Support Extension: +### Possible System Dependencies -```@raw html -

- -

+The following (Linux) system packages have been required on some systems in the past, but likely does not have to be installed system wide on newer versions of Julia: ``` +# Likely dependencies +sudo apt-get install hdf5-tools imagemagick -In VSCode, open the command pallette by pressing `Ctrl + Shift + p`. There are a wealth of tips and tricks on how to use VSCode. See [this JuliaCon presentation for as a general introduction into 'piece-by-piece' code execution and much much more](https://www.youtube.com/watch?v=IdhnP00Y1Ks). Working in one of the Julia IDEs like VS Code or Juno should feel something like this (Gif borrowed from [DiffEqFlux.jl](https://github.com/SciML/DiffEqFlux.jl)): -```@raw html -

- -

+# optional packages +sudo apt-get install graphviz xdot ``` - -There are a variety of useful packages in VSCode, such as `GitLens`, `LiveShare`, and `Todo Browser` as just a few highlights. These *VSCode Extensions* are independent of the already vast JuliaLang Package Ecosystem (see [JuliaObserver.com](https://juliaobserver.com/)). - diff --git a/ext/CaesarImagesExt.jl b/ext/CaesarImagesExt.jl index 839830e78..81175f9a1 100644 --- a/ext/CaesarImagesExt.jl +++ b/ext/CaesarImagesExt.jl @@ -38,7 +38,7 @@ import Caesar: PackedScatterAlignPose2, PackedScatterAlignPose3 import Caesar: ImageTracks, FEATURE_VIA, FeatTrackValue, FeaturesDict, FeatureTracks, FeatureMountain, PIXELTRACK, MANYTRACKS import Caesar: addFeatureTracks_Frame1_Q!, addFeatureTracks_Frame2_PfwdQ!, addFeatureTracks_Frame2_QbckR! import Caesar: addFeatureTracks, consolidateFeatureTracks!, summarizeFeatureTracks!, buildFeatureMountain, buildFeatureMountainDistributed -import Caesar: unionFeatureMountain, sortKeysMinSighting +import Caesar: unionFeatureMountain, unionFeatureMountain!, sortKeysMinSighting # NOTE usage requires ImageFeatures.jl import Caesar: curateFeatureTracks diff --git a/ext/Images/FeatureMountain.jl b/ext/Images/FeatureMountain.jl index 6c1f126ec..cbfe50ad6 100644 --- a/ext/Images/FeatureMountain.jl +++ b/ext/Images/FeatureMountain.jl @@ -2,7 +2,11 @@ +""" + $SIGNATURES +Add image features to FeatureMountain dict, using an image stored in a blob of a DFG object. +""" function addFeatureTracks_Frame1_Q!( # mountain::FeatureMountain, featToMany, @@ -280,7 +284,14 @@ end # addFeatureTracks_Frame2_QbckR!(featToMany_, fg, pair) +""" + $SIGNATURES + +Starting from basic short tracks between neighboring images, explore to find longer +tracks between many images. +See also: [`summarizeFeatureTracks!`](@ref) +""" function consolidateFeatureTracks!( featToMany_::Dict{Tuple{Symbol,Int},MANYTRACKS}, ) @@ -334,7 +345,14 @@ function consolidateFeatureTracks!( end +""" + $SIGNATURES + +Consolidate many short tracks into longer tracks. A track is the +attempt to follow the same real world feature through a sequence of images. +See also: [`consolidateFeatureTracks`](@ref) +""" function summarizeFeatureTracks!( featToMany_::Dict{Tuple{Symbol,Int},MANYTRACKS}, ) @@ -393,8 +411,6 @@ end ## union features - - function unionFeatureMountain( fMa::Dict{Tuple{Symbol,Int},MANYTRACKS}, fMb::Dict{Tuple{Symbol,Int},MANYTRACKS}, @@ -419,7 +435,27 @@ function unionFeatureMountain( return rM end - +function unionFeatureMountain!( + fMa::Dict{Tuple{Symbol,Int},MANYTRACKS}, + fMb::Dict{Tuple{Symbol,Int},MANYTRACKS}, +) + # Modify fMa by adding everything from fMb + for (ka,va) in fMb + # @info ka + # union if already exists + if haskey(fMa, ka) + # @info "CHECK TYPES" typeof(fMb[ka]) typeof(fMa[ka]) + for (kr,vr) in va + if !haskey(fMa[ka], kr) + fMa[ka][kr] = vr # union(fMb[ka], fMa[ka]) + end + end + else + fMa[ka] = va + end + end + return fMa +end function sortKeysMinSighting( featM::Dict{Tuple{Symbol,Int},<:Any}; @@ -474,10 +510,10 @@ function buildFeatureMountainDistributed( featM = deepcopy(featM_1[1]) # union other tracks into featM for fM in featM_1[2:end] - featM = Caesar.unionFeatureMountain(featM, fM) + featM = Caesar.unionFeatureMountain!(featM, fM) end for fM in featM_2 - featM = Caesar.unionFeatureMountain(featM, fM) + featM = Caesar.unionFeatureMountain!(featM, fM) end return featM diff --git a/ext/WeakdepsPrototypes.jl b/ext/WeakdepsPrototypes.jl index 8fd9df17a..3c23b0876 100644 --- a/ext/WeakdepsPrototypes.jl +++ b/ext/WeakdepsPrototypes.jl @@ -96,6 +96,7 @@ function buildFeatureMountain end function buildFeatureMountainDistributed end function unionFeatureMountain end +function unionFeatureMountain! end function sortKeysMinSighting end ## ============================================== diff --git a/src/Caesar.jl b/src/Caesar.jl index 92165a048..17c825eb3 100644 --- a/src/Caesar.jl +++ b/src/Caesar.jl @@ -91,6 +91,9 @@ include("objects/ObjectAffordanceSubcloud.jl") # ImageDraw functionality, used by many extensions and therefore a regular (but minimum able) dependency include("images/imagedraw.jl") +# experimentals +include("dev/FolderDict.jl") + # weakdeps include("../ext/factors/Pose2AprilTag4Corners.jl") include("../ext/factors/ScanMatcherPose2.jl") @@ -103,7 +106,7 @@ include("Deprecated.jl") @compile_workload begin # In here put "toy workloads" that exercise the code you want to precompile - warmUpSolverJIT() + # warmUpSolverJIT() end end diff --git a/src/dev/FolderDict.jl b/src/dev/FolderDict.jl new file mode 100644 index 000000000..78e286c66 --- /dev/null +++ b/src/dev/FolderDict.jl @@ -0,0 +1,277 @@ +# large dict using some store to hold values for reducing RAM utilization + + +using DataStructures +using UUIDs +using DocStringExtensions +using Serialization + +import Base: getindex, setindex!, delete!, keys, haskey, deepcopy, show + +## + +""" + $TYPEDEF + +Walks and talks like a Dict but actually maps most of the data volume to a folder. +Includes semi-intelligent cache management for faster operations. + +Special Features: +- User can set cache_size +- User can set working folder for storage +- User can set serialization and deserialization functions, e.g. use JSON3 or Serialization +- User can set how Dict keys map to stored id's (see DFG) +- EXPERIMENTAL: thread safe + +Developer Notes +- all keys must always be in `.keydict`, regardless of cache or priority +- pqueue is arbitor, so assumed that .cache will mirror happenings of pqueue + +WIP Constraints: +- FIXME, had trouble inheriting from `Base.AbstractDict` +- TODO, better use of thread-safe locks/mutexes +- TODO, allow mapping to existing directory of elementals + - will only work for `key_to_id = (k::UUID) -> k` +""" +@kwdef struct FolderDict{K,V} + """ regular dict elements kept in memory for rapid access """ + cache::Dict{K,V} = Dict{K,V}() + """ priority queue for managing most important cache elements """ + pqueue::PriorityQueue{K, Int} = PriorityQueue{K,Int}() + """ cache size """ + cache_size::Int = 100 + """ unique labels for dict elements sent to the store """ + keydict::Dict{K, UUID} = Dict{K, UUID}() + """ mapping keys and ids for different use cases, default is always new uuid. + overwrite with `(k) -> k` to make keys and ids identical """ + key_to_id::Function = (k) -> uuid4() + """ read lock via Tasks """ + readtasks::Dict{K, Task} = Dict{K, Task}() + """ write lock via Tasks """ + writetasks::Dict{K, Task} = Dict{K, Task}() + """ event signal for deepcopy synchronization. Blocks new setindex! during a deepcopy """ + copyevent::Base.Event = begin + _e = Base.Event() + notify(_e) # dont start with blocking event, requires a reset for use + _e + end + """ working directory where elemental files are stored """ + wdir::String = begin + wdir_ = joinpath(tempdir(), "$(uuid4())") + mkpath(wdir_) + wdir_ + end + """ serialization function with default """ + serialize::Function = Serialization.serialize + """ deserialization function with default """ + deserialize::Function = Serialization.deserialize +end + +## + + +function show( + io::IO, + sd::FolderDict{K,V} +) where {K,V} + println(io, "FolderDict{$K,$V} at $(sd.wdir)") + println(io, " with $(length(sd.pqueue)) of $(length(sd.keydict)) entries cached, e.g.:") + ks = collect(keys(sd.cache)) + for i in 1:minimum((5,length(sd.cache))) + tk = ks[i] + println(io, " ",tk," => ", sd.cache[tk]) + end +end +Base.show(io::IO, ::MIME"text/plain", fd::FolderDict) = show(io, fd) + +function Base.getindex( + sd::FolderDict, + f +) + # first check if there is an ongoing reader on this key + if haskey(sd.readtasks, f) + # NOTE super remote possibility that a task is deleted before this dict lookup and wait starts + wait(sd.readtasks[f]) + # values should now be cached for multithreaded reads + end + # also check if there is an ongoing writetask on this key + if haskey(sd.writetasks, f) + wait(sd.writetasks[f]) + # now it is safe to proceed in reading newly written value to this key + # TODO slightly excessive lock, since only unlocks once storage write is done, but cache was available sooner. + end + + # if already cached, and assuming a write task has not deleted the cache element yet (MUST delete from pqueue first) + if haskey(sd.pqueue, f) + # increase this priority, but be wary of over emphasis for newcomer keys + sd.pqueue[f] += 1 + # Assume key must be in the cache + return sd.cache[f] + end + # get id associated with this key (also throw KeyError if not present) + flb = sd.keydict[f] + # performance trick, start async load from IO resources while doing some housekeeping + sd.readtasks[f] = @async begin + # All keys must always be present in keydict + toload = joinpath(sd.wdir, "$flb") + # fetch from cold storage + sd.deserialize(toload) + end + + # you've hit a cache miss, so start making space or new elements + _typemin(::PriorityQueue{P,T}) where {P,T} = typemin(T) + maxpriority = _typemin(sd.pqueue) + for k in keys(sd.pqueue) + # decrement all priorities by one - to help newcomer keys compete for priority + sd.pqueue[k] -= 1 + # preemptively find the maxpriority value for later use + maxpriority = maxpriority < sd.pqueue[k] ? sd.pqueue[k] : maxpriority + end + + # remove excess cache if necessary + if sd.cache_size <= length(sd.pqueue) + # by dropping lowest priority cache element + dropkey = first(sd.pqueue)[1] + delete!(sd.pqueue, dropkey) + delete!(sd.cache, dropkey) + end + + # assume middle of the pack priority for this cache-miss + sd.pqueue[f] = round(Int, maxpriority/2) # pqueue is arbitor, hence populated last + # block on IO resources fetching data + # add to previously missed data to cache and pqueue + sd.cache[f] = fetch(sd.readtasks[f]) + # TODO, possible race condition in slight delay betweem writing to cache[f] after fetching data unblocks. + delete!(sd.readtasks, f) + + # return data to user + return sd.cache[f] +end + + +function setindex!( + sd::FolderDict, + v, + k +) + # don't start a new write if a copy is in progress + wait(sd.copyevent) + # first check if there is an ongoing reader on this key + if haskey(sd.readtasks, k) + # NOTE super remote possibility that a task is deleted before this dict lookup and wait starts + wait(sd.readtasks[k]) + end + # also check if there is an ongoing writetask on this key + if haskey(sd.writetasks, k) + wait(sd.writetasks[k]) + # now it is safe to proceed in reading newly written value to this key + # TODO slightly excessive lock, since only unlocks once storage write is done, but cache was available sooner. + end + + # immediately/always insert new data into folder store with a unique id + id = sd.key_to_id(k) + flb = joinpath(sd.wdir, "$id") + sd.writetasks[k] = @async sd.serialize(flb, v) # for sluggish IO + + # should any obsolete files be deleted from the filesystem? + dtsk = if haskey(sd.keydict, k) + # delete this store location + dlb = sd.keydict[k] + delete!(sd.keydict, k) + @async Base.Filesystem.rm(joinpath(sd.wdir, "$dlb")) # for sluggish IO + else + # dummy task for type stability + @async nothing + end + # set new uuid in keydict only after potential overwrite delete + sd.keydict[k] = id + + # ensure pqueue has correct value in all cases + prk = collect(keys(sd.pqueue)) + maxpriority = 0 + # truncate for cache_size, and also allow for cache_size==0 + if 0 < sd.cache_size <= length(sd.pqueue) + maxpriority = sd.pqueue[prk[end]] + rmk = prk[1] + delete!(sd.pqueue,rmk) + delete!(sd.cache, rmk) + end + # assume middle of the pack priority for this cache-miss + sd.pqueue[k] = round(Int, maxpriority/2) + # Reminder, pqueue is arbitor over cache + sd.cache[k] = v + + # wait for any disk mutations to finish + wait(dtsk) + # last thing is to wait and free write task locks, assuming waiting readers + wait(sd.writetasks[k]) + delete!(sd.writetasks, k) + + # return the value + return v +end + + +# # if multiple access occurs (i.e. shared memory) +# if haskey(sd.writetask, f) +# # wait until the underlying task is complete +# wait(sd.writetask[f]) # COUNTER MUST USE FETCH +# end + + +function delete!( + sd::FolderDict, + k +) + dlb = sd.keydict[k] + delete!(sd.keydict, k) + dtsk = @async Base.Filesystem.rm(joinpath(sd.wdir, "$dlb")) # for sluggish IO + + if haskey(sd.pqueue, k) + delete!(sd.pqueue,k) + delete!(sd.cache, k) + end + + wait(dtsk) + + # TBD unusual Julia return of full collection + return sd +end + + +keys(sd::FolderDict) = keys(sd.keydict) +haskey(sd::FolderDict, k) = haskey(sd.keydict, k) + +function deepcopy( + sd::FolderDict{K,V} +) where {K,V} + # block any new writes that want to start + reset(sd.copyevent) + # wait for any remaining write tasks to finish + for (k,t) in sd.writetasks + wait(t) + end + # actually make a full copy of the working folder + tsk = @async Base.Filesystem.cp(sd.wdir, sd_.wdir; force=true) + + # copy or duplicate all but pqueue and cache, which must be newly cached in new copy of FolderDict (to ensure pqueue and cache remain in lock step) + sd_ = FolderDict{K,V}(; + keydict = deepcopy(sd.keydict), + cache_size = sd.cache_size, + key_to_id = sd.key_to_id, + serialize = sd.serialize, + deserialize = sd.deserialize, + ) + + # wait for storage copy to complete + wait(tsk) + + # notify any pending writes + notify(sd.copyevent) + + # return new deepcopy of FolderDict + return sd_ +end + + +## diff --git a/test/pcl/testPointCloud2.jl b/test/pcl/testPointCloud2.jl index 1a35bb2c7..ccf4f2401 100644 --- a/test/pcl/testPointCloud2.jl +++ b/test/pcl/testPointCloud2.jl @@ -38,13 +38,16 @@ end testdatafolder = joinpath(tempdir(), "caesar", "testdata") # "/tmp/caesar/testdata/" radarpclfile = joinpath( testdatafolder,"radar", "convertedRadar", "_PCLPointCloud2_15776.dat") -radarpcl_url = "https://github.com/JuliaRobotics/CaesarTestData.jl/raw/main/data/radar/convertedRadar/_PCLPointCloud2_15776.dat" -downloadTestData(radarpclfile,radarpcl_url) +if !isfile(radarpclfile) + radarpcl_url = "https://github.com/JuliaRobotics/CaesarTestData.jl/raw/main/data/radar/convertedRadar/_PCLPointCloud2_15776.dat" + downloadTestData(radarpclfile,radarpcl_url) +end pandarfile = joinpath(testdatafolder,"lidar","simpleICP","_pandar_PCLPointCloud2.jldat") -pandar_url = "https://github.com/JuliaRobotics/CaesarTestData.jl/raw/main/data/lidar/pandar/_pandar_PCLPointCloud2.jldat" -downloadTestData(pandarfile,pandar_url) - +if !isfile(pandarfile) + pandar_url = "https://github.com/JuliaRobotics/CaesarTestData.jl/raw/main/data/lidar/pandar/_pandar_PCLPointCloud2.jldat" + downloadTestData(pandarfile,pandar_url) +end ## @testset "test Caesar._PCL.PCLPointCloud2 to Caesar._PCL.PointCloud converter." begin @@ -252,13 +255,16 @@ end ## lidar_terr1_file = joinpath(testdatafolder,"lidar","simpleICP","terrestrial_lidar1.xyz") -lidar_terr1_url = "https://github.com/JuliaRobotics/CaesarTestData.jl/raw/main/data/lidar/simpleICP/terrestrial_lidar1.xyz" -downloadTestData(lidar_terr1_file,lidar_terr1_url) +if !isfile(lidar_terr1_file) + lidar_terr1_url = "https://github.com/JuliaRobotics/CaesarTestData.jl/raw/main/data/lidar/simpleICP/terrestrial_lidar1.xyz" + downloadTestData(lidar_terr1_file,lidar_terr1_url) +end lidar_terr2_file = joinpath(testdatafolder,"lidar","simpleICP","terrestrial_lidar2.xyz") -lidar_terr2_url = "https://github.com/JuliaRobotics/CaesarTestData.jl/raw/main/data/lidar/simpleICP/terrestrial_lidar2.xyz" -downloadTestData(lidar_terr2_file,lidar_terr2_url) - +if !isfile(lidar_terr2_file) + lidar_terr2_url = "https://github.com/JuliaRobotics/CaesarTestData.jl/raw/main/data/lidar/simpleICP/terrestrial_lidar2.xyz" + downloadTestData(lidar_terr2_file,lidar_terr2_url) +end # load the data to memory X_fix = readdlm(lidar_terr1_file, Float32) diff --git a/test/runtests.jl b/test/runtests.jl index 981c48c16..28b39d1c3 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -13,6 +13,7 @@ TEST_GROUP = get(ENV, "IIF_TEST_GROUP", "all") if TEST_GROUP in ["all", "basic_functional_group"] println("Starting tests...") # highly multipackage tests that don't fit well in specific library dependencies. + include("testFolderDict.jl") include("testScatterAlignParched.jl") include("testScatterAlignPose2.jl") include("testScatterAlignPose3.jl") diff --git a/test/testFolderDict.jl b/test/testFolderDict.jl new file mode 100644 index 000000000..d5feb6520 --- /dev/null +++ b/test/testFolderDict.jl @@ -0,0 +1,88 @@ + +## test +using Test +using UUIDs +using Caesar +import Caesar: FolderDict + +## + +@testset "Basic functional tests of FolderDict" begin +## + +fd = FolderDict{Symbol, Int}(;cache_size=2) + +@show fd.wdir + +fd[:a] = 1 + +@test haskey(fd, :a) +@test 1 == length(fd.keydict) +@test fd.keydict[:a] isa UUID +@test 1 == length(fd.pqueue) +@test 1 == length(fd.cache) +@test 1 == fd.cache[:a] +@test 1 == fd[:a] # all up test for getindex when key in cache + +fd[:b] = 2 + +@test haskey(fd, :b) +@test 2 == length(fd.keydict) +@test fd.keydict[:a] != fd.keydict[:b] +@test 2 == length(fd.pqueue) +@test 2 == length(fd.cache) +@test 2 == fd.cache[:b] +@test 2 == fd[:b] # all up test for getindex when key in cache + +fd[:c] = 3 + +@test haskey(fd, :c) +@test 3 == length(fd.keydict) +@test fd.keydict[:a] != fd.keydict[:c] +@test 2 == length(fd.pqueue) +@test 2 == length(fd.cache) +@test 3 == fd.cache[:c] +@test 3 == fd[:c] # all up test for getindex when key in cache + +# make sure folder recovery works by fetching from all three keys, with cache_size set to 2 +@test fd[:a] != fd[:b] +@test fd[:b] != fd[:c] + +@show fd; + +delete!(fd, :b) + +# TODO check that the actual folder stored was deleted from permanent storage after `delete!( ,:b)` + +@test 2 == length(fd.keydict) +@test fd.keydict[:a] != fd.keydict[:c] +@test 2 == length(fd.pqueue) +@test 2 == length(fd.cache) +@test 3 == fd.cache[:c] +@test 3 == fd[:c] # all up test for getindex when key in cache + +@test_throws KeyError fd[:b] + +@test 2 == length(intersect([:a; :c], collect(keys(fd)))) +@test !haskey(fd, :b) +@test haskey(fd, :a) +@test haskey(fd, :c) + + +fd_copy = deepcopy(fd) + +@show fd_copy; + +@test !haskey(fd_copy, :b) +@test haskey(fd_copy, :a) +@test haskey(fd_copy, :c) + +# make sure folder recovery works by fetching from all three keys, with cache_size set to 2 +@test fd_copy[:a] != fd_copy[:c] + +# make sure folder recovery works by fetching from all three keys, with cache_size set to 2 +@test fd[:a] == fd_copy[:a] +@test fd[:c] == fd_copy[:c] + +## +end \ No newline at end of file