Skip to content
2 changes: 1 addition & 1 deletion nvblox/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ if(BUILD_REDISTRIBUTABLE)

# For the benchmarking executable. This is not deployed so we're fine to build it with this
# dynamically-linked library.
find_package(benchmark REQUIRED)
# find_package(benchmark REQUIRED)
else()
# By default, use system dependencies for these.
# In the case of ROS builds, glog will likely be found at a higher level.
Expand Down
14 changes: 7 additions & 7 deletions nvblox/executables/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,10 @@ target_link_libraries(fuse_redwood
set_target_properties(fuse_redwood PROPERTIES CUDA_SEPARABLE_COMPILATION OFF)

# Benchmarking executable
add_executable(benchmark
src/benchmark.cpp
)
target_link_libraries(benchmark
nvblox_lib nvblox_datasets nvblox_test_utils benchmark::benchmark
)
set_target_properties(benchmark PROPERTIES CUDA_SEPARABLE_COMPILATION OFF)
#add_executable(benchmark
# src/benchmark.cpp
#)
#target_link_libraries(benchmark
# nvblox_lib nvblox_datasets nvblox_test_utils benchmark::benchmark
#)
#set_target_properties(benchmark PROPERTIES CUDA_SEPARABLE_COMPILATION OFF)
10 changes: 5 additions & 5 deletions nvblox/include/nvblox/core/internal/impl/unified_vector_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ template <typename OtherVectorType>
void unified_vector<T>::copyFromAsync(const OtherVectorType& other,
const CudaStream cuda_stream) {
resizeAsync(other.size(), cuda_stream);
if (other.data() != nullptr) {
if (other.data() != nullptr && buffer_capacity_ > 0) {
checkCudaErrors(cudaMemcpyAsync(buffer_, other.data(),
sizeof(T) * other.size(), cudaMemcpyDefault,
cuda_stream));
Expand Down Expand Up @@ -149,7 +149,7 @@ inline std::vector<bool> unified_vector<bool>::toVectorAsync(
const CudaStream cuda_stream) const {
// The memory layout of std::vector<bool> is different so we have to first
// copy to an intermediate buffer.
CHECK(buffer_ != nullptr);
CHECK(buffer_ != nullptr && buffer_capacity_ > 0);
std::unique_ptr<bool[]> bool_buffer(new bool[buffer_size_]);
checkCudaErrors(cudaMemcpyAsync(bool_buffer.get(), buffer_,
sizeof(bool) * buffer_size_,
Expand Down Expand Up @@ -234,7 +234,7 @@ void unified_vector<T>::reserveAsync(size_t capacity,
checkCudaErrors(cudaMallocHost(&new_buffer, sizeof(T) * capacity));
}

if (buffer_ != nullptr) {
if (buffer_ != nullptr && buffer_capacity_ > 0) {
// Copy the old values to the new buffer.
CHECK(capacity >= buffer_size_);
checkCudaErrors(cudaMemcpyAsync(new_buffer, buffer_,
Expand Down Expand Up @@ -290,7 +290,7 @@ void unified_vector<T>::clearNoDealloc() {

template <typename T>
void unified_vector<T>::clear() {
if (buffer_ != nullptr) {
if (buffer_ != nullptr && buffer_capacity_ > 0) {
if (memory_type_ == MemoryType::kHost) {
checkCudaErrors(cudaFreeHost(reinterpret_cast<void*>(buffer_)));
} else {
Expand Down Expand Up @@ -346,7 +346,7 @@ template <typename T>
void unified_vector<T>::setZeroAsync(const CudaStream cuda_stream) {
// It is safe to use cudaMemset since the memory is ALWAYS allocated with
// cudaMalloc.
CHECK(buffer_ != nullptr);
CHECK(buffer_ != nullptr && buffer_capacity_ > 0);
checkCudaErrors(
cudaMemsetAsync(buffer_, 0, buffer_size_ * sizeof(T), cuda_stream));
}
Expand Down
1 change: 1 addition & 0 deletions nvblox/include/nvblox/integrators/esdf_integrator.h
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ class EsdfIntegrator {

/// @brief TsdfLayer related parameter
/// Maximum (TSDF) distance at which a voxel is considered a site
//float tsdf_max_site_distance_vox_ = 1.7321;
float max_tsdf_site_distance_vox_ = kDefaultMaxTsdfSiteDistanceVox;

/// @brief TsdfLayer related parameter
Expand Down
6 changes: 6 additions & 0 deletions nvblox/include/nvblox/map/internal/impl/layer_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,12 @@ bool BlockLayer<BlockType>::isBlockAllocated(const Index3D& index) const {
return (it != blocks_.end());
}

template <typename BlockType>
void BlockLayer<BlockType>::clear() {
gpu_layer_view_up_to_date_ = false;
blocks_.clear();
}

template <typename BlockType>
bool BlockLayer<BlockType>::clearBlock(const Index3D& index) {
if (blocks_.erase(index)) {
Expand Down
2 changes: 1 addition & 1 deletion nvblox/include/nvblox/map/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ class BlockLayer : public BaseLayer {
size_t size() const { return blocks_.size(); }

/// Clear the layer of all data
void clear() { blocks_.clear(); }
void clear();

// Clear (deallocate) a single block
bool clearBlock(const Index3D& index);
Expand Down