diff --git a/.Doxyfile b/.Doxyfile index 432c9c62f1c..a652adf69ea 100644 --- a/.Doxyfile +++ b/.Doxyfile @@ -194,7 +194,7 @@ QT_AUTOBRIEF = NO # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. +# not recognized anymore. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO @@ -268,7 +268,7 @@ OPTIMIZE_OUTPUT_VHDL = NO # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, # C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: # FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: # Fortran. In the later case the parser tries to guess whether the code is fixed @@ -494,7 +494,7 @@ INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file # names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows +# in case and if your file system supports case-sensitive file names. Windows # and Mac users are advised to set this option to NO. # The default value is: system dependent. @@ -654,7 +654,7 @@ SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the +# popen()) the command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. @@ -1402,7 +1402,7 @@ EXT_LINKS_IN_WINDOW = NO FORMULA_FONTSIZE = 10 -# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # @@ -1414,7 +1414,7 @@ FORMULA_FONTSIZE = 10 FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering +# http://www.mathjax.org) which uses client side JavaScript for the rendering # instead of using prerendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path @@ -1484,7 +1484,7 @@ MATHJAX_CODEFILE = SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a web server instead of a web client using Javascript. There +# implemented using a web server instead of a web client using JavaScript. There # are two flavors of web server based searching depending on the EXTERNAL_SEARCH # setting. When disabled, doxygen will generate a PHP script for searching and # an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing @@ -1620,7 +1620,7 @@ EXTRA_PACKAGES = amsmath \ # Note: Only use a user-defined header if you know what you are doing! The # following commands have a special meaning inside the header: $title, # $datetime, $date, $doxygenversion, $projectname, $projectnumber, -# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string, +# $projectbrief, $projectlogo. Doxygen will replace $title with the empty string, # for the replacement values of the other commands the user is refered to # HTML_HEADER. # This tag requires that the tag GENERATE_LATEX is set to YES. diff --git a/CMakeLists.txt b/CMakeLists.txt index 27d172f900b..00a2ef70b8f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -73,12 +73,12 @@ include_directories(${PROJECT_BINARY_DIR}) # ---[ Includes & defines for CUDA -# cuda_compile() does not have per-call dependencies or include pathes +# cuda_compile() does not have per-call dependencies or include paths # (cuda_compile() has per-call flags, but we set them here too for clarity) # -# list(REMOVE_ITEM ...) invocations remove PRIVATE and PUBLIC keywords from collected definitions and include pathes +# list(REMOVE_ITEM ...) invocations remove PRIVATE and PUBLIC keywords from collected definitions and include paths if(HAVE_CUDA) - # pass include pathes to cuda_include_directories() + # pass include paths to cuda_include_directories() set(Caffe_ALL_INCLUDE_DIRS ${Caffe_INCLUDE_DIRS}) list(REMOVE_ITEM Caffe_ALL_INCLUDE_DIRS PRIVATE PUBLIC) cuda_include_directories(${Caffe_INCLUDE_DIR} ${Caffe_SRC_DIR} ${Caffe_ALL_INCLUDE_DIRS}) diff --git a/Makefile b/Makefile index b7660e852d6..de8cc7a502c 100644 --- a/Makefile +++ b/Makefile @@ -262,7 +262,7 @@ endif ifeq ($(LINUX), 1) CXX ?= /usr/bin/g++ GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) - # older versions of gcc are too dumb to build boost with -Wuninitalized + # older versions of gcc are too dumb to build boost with -Wuninitialized ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) WARNINGS += -Wno-uninitialized endif @@ -423,7 +423,7 @@ CXXFLAGS += -MMD -MP COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) -# mex may invoke an older gcc that is too liberal with -Wuninitalized +# mex may invoke an older gcc that is too liberal with -Wuninitialized MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) diff --git a/cmake/Cuda.cmake b/cmake/Cuda.cmake index e03feabffcb..50c55e16810 100644 --- a/cmake/Cuda.cmake +++ b/cmake/Cuda.cmake @@ -2,7 +2,7 @@ if(CPU_ONLY) return() endif() -# Known NVIDIA GPU achitectures Caffe can be compiled for. +# Known NVIDIA GPU architectures Caffe can be compiled for. # This list will be used for CUDA_ARCH_NAME = All option set(Caffe_known_gpu_archs "20 21(20) 30 35 50 60 61") @@ -37,7 +37,7 @@ function(caffe_detect_installed_gpus out_variable) if(__nvcc_res EQUAL 0) string(REPLACE "2.1" "2.1(2.0)" __nvcc_out "${__nvcc_out}") - set(CUDA_gpu_detect_output ${__nvcc_out} CACHE INTERNAL "Returned GPU architetures from caffe_detect_gpus tool" FORCE) + set(CUDA_gpu_detect_output ${__nvcc_out} CACHE INTERNAL "Returned GPU architectures from caffe_detect_gpus tool" FORCE) endif() endif() @@ -64,14 +64,14 @@ function(caffe_select_nvcc_arch_flags out_variable) endif() # set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui) - set(CUDA_ARCH_NAME ${__archs_name_default} CACHE STRING "Select target NVIDIA GPU achitecture.") + set(CUDA_ARCH_NAME ${__archs_name_default} CACHE STRING "Select target NVIDIA GPU architecture.") set_property( CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${__archs_names} ) mark_as_advanced(CUDA_ARCH_NAME) # verify CUDA_ARCH_NAME value if(NOT ";${__archs_names};" MATCHES ";${CUDA_ARCH_NAME};") string(REPLACE ";" ", " __archs_names "${__archs_names}") - message(FATAL_ERROR "Only ${__archs_names} architeture names are supported.") + message(FATAL_ERROR "Only ${__archs_names} architecture names are supported.") endif() if(${CUDA_ARCH_NAME} STREQUAL "Manual") @@ -273,7 +273,7 @@ if(Boost_VERSION EQUAL 105500) set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} \"-DBOOST_NOINLINE=__attribute__((noinline))\" ") endif() -# disable some nvcc diagnostic that apears in boost, glog, glags, opencv, etc. +# disable some nvcc diagnostic that appears in boost, glog, glags, opencv, etc. foreach(diag cc_clobber_ignored integer_sign_change useless_using_declaration set_but_not_used) list(APPEND CUDA_NVCC_FLAGS -Xcudafe --diag_suppress=${diag}) endforeach() diff --git a/cmake/Misc.cmake b/cmake/Misc.cmake index fcb246472f0..6f2981183bb 100644 --- a/cmake/Misc.cmake +++ b/cmake/Misc.cmake @@ -33,8 +33,8 @@ set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE CACHE BOOLEAN "Use link paths for sha set(CMAKE_MACOSX_RPATH TRUE) list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES - ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR} __is_systtem_dir) -if(${__is_systtem_dir} STREQUAL -1) + ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR} __is_system_dir) +if(${__is_system_dir} STREQUAL -1) set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}) endif() diff --git a/cmake/Modules/FindAtlas.cmake b/cmake/Modules/FindAtlas.cmake index 7ffa6393bbc..708e2770810 100644 --- a/cmake/Modules/FindAtlas.cmake +++ b/cmake/Modules/FindAtlas.cmake @@ -7,7 +7,7 @@ # Atlas_FOUND # Atlas_INCLUDE_DIRS # Atlas_LIBRARIES -# Atlas_LIBRARYRARY_DIRS +# Atlas_LIBRARY_DIRS set(Atlas_INCLUDE_SEARCH_PATHS /usr/include/atlas diff --git a/cmake/Modules/FindGFlags.cmake b/cmake/Modules/FindGFlags.cmake index 29b60f05037..90655f10f13 100644 --- a/cmake/Modules/FindGFlags.cmake +++ b/cmake/Modules/FindGFlags.cmake @@ -7,7 +7,7 @@ # GFLAGS_FOUND # GFLAGS_INCLUDE_DIRS # GFLAGS_LIBRARIES -# GFLAGS_LIBRARYRARY_DIRS +# GFLAGS_LIBRARY_DIRS include(FindPackageHandleStandardArgs) diff --git a/cmake/Modules/FindGlog.cmake b/cmake/Modules/FindGlog.cmake index 99abbe478a0..eb32fb4ec82 100644 --- a/cmake/Modules/FindGlog.cmake +++ b/cmake/Modules/FindGlog.cmake @@ -7,7 +7,7 @@ # GLOG_FOUND # GLOG_INCLUDE_DIRS # GLOG_LIBRARIES -# GLOG_LIBRARYRARY_DIRS +# GLOG_LIBRARY_DIRS include(FindPackageHandleStandardArgs) diff --git a/cmake/Modules/FindNumPy.cmake b/cmake/Modules/FindNumPy.cmake index a671494caba..12591585b34 100644 --- a/cmake/Modules/FindNumPy.cmake +++ b/cmake/Modules/FindNumPy.cmake @@ -43,7 +43,7 @@ if(PYTHONINTERP_FOUND) endif() endif() else() - message(STATUS "To find NumPy Python interpretator is required to be found.") + message(STATUS "To find NumPy Python interpreter is required to be found.") endif() include(FindPackageHandleStandardArgs) diff --git a/cmake/Modules/FindSnappy.cmake b/cmake/Modules/FindSnappy.cmake index eff2a864a7b..ece42e92f8c 100644 --- a/cmake/Modules/FindSnappy.cmake +++ b/cmake/Modules/FindSnappy.cmake @@ -22,7 +22,7 @@ if(SNAPPY_FOUND) mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES) caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h - SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL) + SNAPPY_VERSION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL) set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}") endif() diff --git a/cmake/Modules/FindvecLib.cmake b/cmake/Modules/FindvecLib.cmake index 4d44e613a00..fc18f0b92ca 100644 --- a/cmake/Modules/FindvecLib.cmake +++ b/cmake/Modules/FindvecLib.cmake @@ -1,4 +1,4 @@ -# Find the vecLib libraries as part of Accelerate.framework or as standalon framework +# Find the vecLib libraries as part of Accelerate.framework or as standalone framework # # The following are set after configuration is done: # VECLIB_FOUND diff --git a/cmake/ProtoBuf.cmake b/cmake/ProtoBuf.cmake index 72ea3230c50..84468ec7da5 100644 --- a/cmake/ProtoBuf.cmake +++ b/cmake/ProtoBuf.cmake @@ -15,7 +15,7 @@ endif() if(PROTOBUF_FOUND) # fetches protobuf version - caffe_parse_header(${PROTOBUF_INCLUDE_DIR}/google/protobuf/stubs/common.h VERION_LINE GOOGLE_PROTOBUF_VERSION) + caffe_parse_header(${PROTOBUF_INCLUDE_DIR}/google/protobuf/stubs/common.h VERSION_LINE GOOGLE_PROTOBUF_VERSION) string(REGEX MATCH "([0-9])00([0-9])00([0-9])" PROTOBUF_VERSION ${GOOGLE_PROTOBUF_VERSION}) set(PROTOBUF_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}") unset(GOOGLE_PROTOBUF_VERSION) diff --git a/cmake/Summary.cmake b/cmake/Summary.cmake index 40b8c2f2966..9fb9748c04f 100644 --- a/cmake/Summary.cmake +++ b/cmake/Summary.cmake @@ -168,7 +168,7 @@ function(caffe_print_configuration_summary) caffe_status("") endif() if(BUILD_docs) - caffe_status("Documentaion:") + caffe_status("Documentation:") caffe_status(" Doxygen :" DOXYGEN_FOUND THEN "${DOXYGEN_EXECUTABLE} (${DOXYGEN_VERSION})" ELSE "No") caffe_status(" config_file : ${DOXYGEN_config_file}") diff --git a/cmake/Templates/CaffeConfig.cmake.in b/cmake/Templates/CaffeConfig.cmake.in index 77c4059e560..6062d0dda14 100644 --- a/cmake/Templates/CaffeConfig.cmake.in +++ b/cmake/Templates/CaffeConfig.cmake.in @@ -46,7 +46,7 @@ if(NOT TARGET caffe AND NOT caffe_BINARY_DIR) endif() # List of IMPORTED libs created by CaffeTargets.cmake -# These targets already specify all needed definitions and include pathes +# These targets already specify all needed definitions and include paths set(Caffe_LIBRARIES caffe) # Cuda support variables diff --git a/cmake/lint.cmake b/cmake/lint.cmake index 70a006572bb..58b4bbdaa46 100644 --- a/cmake/lint.cmake +++ b/cmake/lint.cmake @@ -2,7 +2,7 @@ set(CMAKE_SOURCE_DIR ..) set(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py) set(SRC_FILE_EXTENSIONS h hpp hu c cpp cu cc) -set(EXCLUDE_FILE_EXTENSTIONS pb.h pb.cc) +set(EXCLUDE_FILE_EXTENSIONS pb.h pb.cc) set(LINT_DIRS include src/caffe examples tools python matlab) cmake_policy(SET CMP0009 NEW) # suppress cmake warning @@ -16,7 +16,7 @@ foreach(ext ${SRC_FILE_EXTENSIONS}) endforeach() # find all files that should be excluded -foreach(ext ${EXCLUDE_FILE_EXTENSTIONS}) +foreach(ext ${EXCLUDE_FILE_EXTENSIONS}) file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/*.${ext}) set(EXCLUDED_FILES ${EXCLUDED_FILES} ${FOUND_FILES}) endforeach() diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index ae47e461736..4134d81cebf 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -40,7 +40,7 @@ function(gather_notebooks_as_prebuild_cmd target gathered_dir root) set(full_gathered_dir ${root}/${gathered_dir}) if(NOT PYTHON_EXECUTABLE) - message(STATUS "Python interpeter is not found. Can't include *.ipynb files in docs. Skipping...") + message(STATUS "Python interpreter is not found. Can't include *.ipynb files in docs. Skipping...") return() endif() diff --git a/docs/development.md b/docs/development.md index 36cd399512e..a81f0fe1ea9 100644 --- a/docs/development.md +++ b/docs/development.md @@ -45,7 +45,7 @@ Post [Issues](https://github.com/BVLC/caffe/issues) to propose features, report Large-scale development work is guided by [milestones], which are sets of Issues selected for bundling as releases. Please note that since the core developers are largely researchers, we may work on a feature in isolation for some time before releasing it to the community, so as to claim honest academic contribution. -We do release things as soon as a reasonable technical report may be written, and we still aim to inform the community of ongoing development through Github Issues. +We do release things as soon as a reasonable technical report may be written, and we still aim to inform the community of ongoing development through GitHub Issues. **When you are ready to develop a feature or fixing a bug, follow this protocol**: diff --git a/docs/index.md b/docs/index.md index b633f7cfddc..a85ac326343 100644 --- a/docs/index.md +++ b/docs/index.md @@ -27,7 +27,7 @@ That's 1 ms/image for inference and 4 ms/image for learning and more recent libr We believe that Caffe is among the fastest convnet implementations available. **Community**: Caffe already powers academic research projects, startup prototypes, and even large-scale industrial applications in vision, speech, and multimedia. -Join our community of brewers on the [caffe-users group](https://groups.google.com/forum/#!forum/caffe-users) and [Github](https://github.com/BVLC/caffe/). +Join our community of brewers on the [caffe-users group](https://groups.google.com/forum/#!forum/caffe-users) and [GitHub](https://github.com/BVLC/caffe/).

\* With the ILSVRC2012-winning [SuperVision](http://www.image-net.org/challenges/LSVRC/2012/supervision.pdf) model and prefetching IO. @@ -93,7 +93,7 @@ The BAIR members who have contributed to Caffe are (alphabetical by first name): [Carl Doersch](http://www.carldoersch.com/), [Eric Tzeng](https://github.com/erictzeng), [Evan Shelhamer](http://imaginarynumber.net/), [Jeff Donahue](http://jeffdonahue.com/), [Jon Long](https://github.com/longjon), [Philipp Krähenbühl](http://www.philkr.net/), [Ronghang Hu](http://ronghanghu.com/), [Ross Girshick](http://www.cs.berkeley.edu/~rbg/), [Sergey Karayev](http://sergeykarayev.com/), [Sergio Guadarrama](http://www.eecs.berkeley.edu/~sguada/), [Takuya Narihira](https://github.com/tnarihi), and [Yangqing Jia](http://daggerfs.com/). The open-source community plays an important and growing role in Caffe's development. -Check out the Github [project pulse](https://github.com/BVLC/caffe/pulse) for recent activity and the [contributors](https://github.com/BVLC/caffe/graphs/contributors) for the full list. +Check out the GitHub [project pulse](https://github.com/BVLC/caffe/pulse) for recent activity and the [contributors](https://github.com/BVLC/caffe/graphs/contributors) for the full list. We sincerely appreciate your interest and contributions! If you'd like to contribute, please read the [developing & contributing](development.html) guide. diff --git a/docs/install_apt_debian.md b/docs/install_apt_debian.md index 0a6a3b962e5..9eb61973642 100644 --- a/docs/install_apt_debian.md +++ b/docs/install_apt_debian.md @@ -98,7 +98,7 @@ Note, this requires a `deb-src` entry in your `/etc/apt/sources.list`. #### Compiler Combinations -Some users may find their favorate compiler doesn't work with CUDA. +Some users may find their favorite compiler doesn't work with CUDA. ``` CXX compiler | CUDA 7.5 | CUDA 8.0 | CUDA 9.0 | diff --git a/docs/model_zoo.md b/docs/model_zoo.md index 3f77e82572c..02be2c6be0f 100644 --- a/docs/model_zoo.md +++ b/docs/model_zoo.md @@ -9,7 +9,7 @@ These models are learned and applied for problems ranging from simple regression To help share these models, we introduce the model zoo framework: - A standard format for packaging Caffe model info. -- Tools to upload/download model info to/from Github Gists, and to download trained `.caffemodel` binaries. +- Tools to upload/download model info to/from GitHub Gists, and to download trained `.caffemodel` binaries. - A central wiki page for sharing model info Gists. ## Where to get trained models @@ -46,9 +46,9 @@ This simple format can be handled through bundled scripts or manually if need be ### Hosting model info -Github Gist is a good format for model info distribution because it can contain multiple files, is versionable, and has in-browser syntax highlighting and markdown rendering. +GitHub Gist is a good format for model info distribution because it can contain multiple files, is versionable, and has in-browser syntax highlighting and markdown rendering. -`scripts/upload_model_to_gist.sh ` uploads non-binary files in the model directory as a Github Gist and prints the Gist ID. If `gist_id` is already part of the `/readme.md` frontmatter, then updates existing Gist. +`scripts/upload_model_to_gist.sh ` uploads non-binary files in the model directory as a GitHub Gist and prints the Gist ID. If `gist_id` is already part of the `/readme.md` frontmatter, then updates existing Gist. Try doing `scripts/upload_model_to_gist.sh models/bvlc_alexnet` to test the uploading (don't forget to delete the uploaded gist afterward). diff --git a/docs/tutorial/interfaces.md b/docs/tutorial/interfaces.md index 2578af5d4de..5ea3a8672cf 100644 --- a/docs/tutorial/interfaces.md +++ b/docs/tutorial/interfaces.md @@ -32,7 +32,7 @@ For a full example of fine-tuning, see examples/finetuning_on_flickr_style, but **Testing**: `caffe test` scores models by running them in the test phase and reports the net output as its score. The net architecture must be properly defined to output an accuracy measure or loss as its output. The per-batch score is reported and then the grand average is reported last. # score the learned LeNet model on the validation set as defined in the - # model architeture lenet_train_test.prototxt + # model architecture lenet_train_test.prototxt caffe test -model examples/mnist/lenet_train_test.prototxt -weights examples/mnist/lenet_iter_10000.caffemodel -gpu 0 -iterations 100 **Benchmarking**: `caffe time` benchmarks model execution layer-by-layer through timing and synchronization. This is useful to check system performance and measure relative execution times for models. diff --git a/docs/tutorial/solver.md b/docs/tutorial/solver.md index 81c626386a2..35bb801c471 100644 --- a/docs/tutorial/solver.md +++ b/docs/tutorial/solver.md @@ -173,7 +173,7 @@ $$ (W_t)_i - \alpha \frac{\sqrt{1-(\beta_2)_i^t}}{1-(\beta_1)_i^t}\frac{(m_t)_i}{\sqrt{(v_t)_i}+\varepsilon}. $$ -Kingma et al. [1] proposed to use $$\beta_1 = 0.9, \beta_2 = 0.999, \varepsilon = 10^{-8}$$ as default values. Caffe uses the values of `momemtum, momentum2, delta` for $$\beta_1, \beta_2, \varepsilon$$, respectively. +Kingma et al. [1] proposed to use $$\beta_1 = 0.9, \beta_2 = 0.999, \varepsilon = 10^{-8}$$ as default values. Caffe uses the values of `momentum, momentum2, delta` for $$\beta_1, \beta_2, \varepsilon$$, respectively. [1] D. Kingma, J. Ba. [Adam: A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980). diff --git a/examples/pycaffe/layers/pascal_multilabel_datalayers.py b/examples/pycaffe/layers/pascal_multilabel_datalayers.py index 9420cb328ce..5de74558613 100644 --- a/examples/pycaffe/layers/pascal_multilabel_datalayers.py +++ b/examples/pycaffe/layers/pascal_multilabel_datalayers.py @@ -83,7 +83,7 @@ class BatchLoader(object): """ This class abstracts away the loading of images. Images can either be loaded singly, or in a batch. The latter is used for - the asyncronous data layer to preload batches while other processing is + the asynchronous data layer to preload batches while other processing is performed. """ diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 97b4ee6a8c4..01d1e97ba51 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -11,7 +11,7 @@ namespace caffe { /** * @brief Applies common transformations to the input data, such as - * scaling, mirroring, substracting the image mean... + * scaling, mirroring, subtracting the image mean... */ template class DataTransformer { @@ -139,7 +139,7 @@ class DataTransformer { virtual int Rand(int n); void Transform(const Datum& datum, Dtype* transformed_data); - // Tranformation parameters + // Transformation parameters TransformationParameter param_; diff --git a/include/caffe/layers/batch_norm_layer.hpp b/include/caffe/layers/batch_norm_layer.hpp index 43f7b28be95..16fbec445fc 100644 --- a/include/caffe/layers/batch_norm_layer.hpp +++ b/include/caffe/layers/batch_norm_layer.hpp @@ -66,7 +66,7 @@ class BatchNormLayer : public Layer { int channels_; Dtype eps_; - // extra temporarary variables is used to carry out sums/broadcasting + // extra temporary variables is used to carry out sums/broadcasting // using BLAS Blob batch_sum_multiplier_; Blob num_by_chans_; diff --git a/include/caffe/layers/crop_layer.hpp b/include/caffe/layers/crop_layer.hpp index 5219fa5cb5f..b20cf56742f 100644 --- a/include/caffe/layers/crop_layer.hpp +++ b/include/caffe/layers/crop_layer.hpp @@ -59,7 +59,7 @@ class CropLayer : public Layer { // Recursive copy function: this is similar to crop_copy() but loops over all // but the last two dimensions to allow for ND cropping while still relying on // a CUDA kernel for the innermost two dimensions for performance reasons. An - // alterantive implementation could rely on the kernel more by passing + // alternative implementation could rely on the kernel more by passing // offsets, but this is problematic because of its variable length. // Since in the standard (N,C,W,H) case N,C are usually not cropped a speedup // could be achieved by not looping the application of the copy_kernel around diff --git a/include/caffe/layers/lstm_layer.hpp b/include/caffe/layers/lstm_layer.hpp index a0e67c9d432..eb659348917 100644 --- a/include/caffe/layers/lstm_layer.hpp +++ b/include/caffe/layers/lstm_layer.hpp @@ -122,7 +122,7 @@ class LSTMUnitLayer : public Layer { * @param propagate_down see Layer::Backward. * @param bottom input Blob vector (length 3), into which the error gradients * with respect to the LSTMUnit inputs @f$ c_{t-1} @f$ and the gate - * inputs are computed. Computatation of the error gradients w.r.t. + * inputs are computed. Computation of the error gradients w.r.t. * the sequence indicators is not implemented. * -# @f$ (1 \times N \times D) @f$ * the error gradient w.r.t. the previous timestep cell state diff --git a/include/caffe/layers/softmax_loss_layer.hpp b/include/caffe/layers/softmax_loss_layer.hpp index f07e8a02cf1..46d172c2ab3 100644 --- a/include/caffe/layers/softmax_loss_layer.hpp +++ b/include/caffe/layers/softmax_loss_layer.hpp @@ -38,7 +38,7 @@ namespace caffe { * -# @f$ (1 \times 1 \times 1 \times 1) @f$ * the computed cross-entropy classification loss: @f$ E = * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) - * @f$, for softmax output class probabilites @f$ \hat{p} @f$ + * @f$, for softmax output class probabilities @f$ \hat{p} @f$ */ template class SoftmaxWithLossLayer : public LossLayer { diff --git a/include/caffe/util/benchmark.hpp b/include/caffe/util/benchmark.hpp index d63582776ee..ccebe641412 100644 --- a/include/caffe/util/benchmark.hpp +++ b/include/caffe/util/benchmark.hpp @@ -17,14 +17,14 @@ class Timer { virtual float MicroSeconds(); virtual float Seconds(); - inline bool initted() { return initted_; } + inline bool inited() { return inited_; } inline bool running() { return running_; } inline bool has_run_at_least_once() { return has_run_at_least_once_; } protected: void Init(); - bool initted_; + bool inited_; bool running_; bool has_run_at_least_once_; #ifndef CPU_ONLY diff --git a/matlab/CMakeLists.txt b/matlab/CMakeLists.txt index 987730d9b55..5aa5f451d89 100644 --- a/matlab/CMakeLists.txt +++ b/matlab/CMakeLists.txt @@ -1,5 +1,5 @@ # Builds Matlab (or Octave) interface. In case of Matlab caffe must be -# compield as shared library. Octave can link static or shared caffe library +# compiled as shared library. Octave can link static or shared caffe library # To install octave run: sudo apt-get install liboctave-dev if(NOT BUILD_matlab) @@ -17,7 +17,7 @@ else() endif() if(NOT BUILD_SHARED_LIBS AND build_using MATCHES Matlab) - message(FATAL_ERROR "Matlab MEX interface (with default mex options file) can only be built if caffe is compiled as shared library. Please enable 'BUILD_SHARED_LIBS' in CMake. Aternativelly you can switch to Octave compiler.") + message(FATAL_ERROR "Matlab MEX interface (with default mex options file) can only be built if caffe is compiled as shared library. Please enable 'BUILD_SHARED_LIBS' in CMake. Alternatively you can switch to Octave compiler.") endif() # helper function to set proper mex file extension @@ -43,7 +43,7 @@ string(REPLACE ";" ";-L" link_folders "-L${folders}") string(REPLACE ";" ":" rpath_folders "${folders}") if(build_using MATCHES "Matlab") - set(libflags -lcaffe${Caffe_POSTFIX} ${libflags}) # Matlab R2014a complans for -Wl,--whole-archive + set(libflags -lcaffe${Caffe_POSTFIX} ${libflags}) # Matlab R2014a complains for -Wl,--whole-archive caffe_fetch_and_set_proper_mexext(Matlab_caffe_mex) add_custom_command(OUTPUT ${Matlab_caffe_mex} COMMAND ${Matlab_mex} diff --git a/python/caffe/coord_map.py b/python/caffe/coord_map.py index a3413cfa855..20f294fc3a9 100644 --- a/python/caffe/coord_map.py +++ b/python/caffe/coord_map.py @@ -114,7 +114,7 @@ def inverse(coord_map): def coord_map_from_to(top_from, top_to): """ - Determine the coordinate mapping betweeen a top (from) and a top (to). + Determine the coordinate mapping between a top (from) and a top (to). Walk the graph to find a common ancestor while composing the coord maps for from and to until they meet. As a last step the from map is inverted. """ diff --git a/python/caffe/test/test_python_layer.py b/python/caffe/test/test_python_layer.py index 899514e90f1..bc15934ac2c 100644 --- a/python/caffe/test/test_python_layer.py +++ b/python/caffe/test/test_python_layer.py @@ -50,7 +50,7 @@ class PhaseLayer(caffe.Layer): def setup(self, bottom, top): pass - def reshape(self, bootom, top): + def reshape(self, bottom, top): top[0].reshape() def forward(self, bottom, top): diff --git a/python/classify.py b/python/classify.py index 4544c51b4c2..9967c0e4611 100755 --- a/python/classify.py +++ b/python/classify.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -classify.py is an out-of-the-box image classifer callable from the command line. +classify.py is an out-of-the-box image classifier callable from the command line. By default it configures and runs the Caffe reference ImageNet model. """ diff --git a/scripts/caffe b/scripts/caffe index 8a0b22af6ac..64ba6f128f1 100644 --- a/scripts/caffe +++ b/scripts/caffe @@ -1,7 +1,7 @@ # bash completion for Caffe's command line utility -*- shell-script -*- # COPYRIGHT (C) 2015,2016 Zhou Mo # License: BSD-2-Clause -# Originally appeard at https://github.com/BVLC/caffe/issues/3149 +# Originally appeared at https://github.com/BVLC/caffe/issues/3149 # Updated for caffe (1.0.0~rc3+20160715-g42cd785) _caffe() diff --git a/scripts/cpp_lint.py b/scripts/cpp_lint.py index fb44026718e..ea9c7e9f475 100755 --- a/scripts/cpp_lint.py +++ b/scripts/cpp_lint.py @@ -118,7 +118,7 @@ ignored. Examples: - Assuing that src/.git exists, the header guard CPP variables for + Assuming that src/.git exists, the header guard CPP variables for src/chrome/browser/ui/browser.h are: No flag => CHROME_BROWSER_UI_BROWSER_H_ @@ -2821,7 +2821,7 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though - # technically should should flag if at least one side is missing a + # technically should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Search(r'[^\s<]<([^\s=<].*)', reduced_line) if (match and @@ -3759,7 +3759,7 @@ def _GetTextInside(text, start_pattern): Given a string of lines and a regular expression string, retrieve all the text following the expression and between opening punctuation symbols like (, [, or {, and the matching close-punctuation symbol. This properly nested - occurrences of the punctuations, so for the text like + occurrences of the punctuation, so for the text like printf(a(), b(c())); a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. start_pattern must match string having an open punctuation symbol at the end. @@ -3776,7 +3776,7 @@ def _GetTextInside(text, start_pattern): # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably # rewritten to use _GetTextInside (and use inferior regexp matching today). - # Give opening punctuations to get the matching close-punctuations. + # Give opening punctuation to get the matching close-punctuation. matching_punctuation = {'(': ')', '{': '}', '[': ']'} closing_punctuation = set(itervalues(matching_punctuation)) @@ -3790,22 +3790,22 @@ def _GetTextInside(text, start_pattern): 'start_pattern must ends with an opening punctuation.') assert text[start_position - 1] in matching_punctuation, ( 'start_pattern must ends with an opening punctuation.') - # Stack of closing punctuations we expect to have in text after position. + # Stack of closing punctuation we expect to have in text after position. punctuation_stack = [matching_punctuation[text[start_position - 1]]] position = start_position while punctuation_stack and position < len(text): if text[position] == punctuation_stack[-1]: punctuation_stack.pop() elif text[position] in closing_punctuation: - # A closing punctuation without matching opening punctuations. + # A closing punctuation without matching opening punctuation. return None elif text[position] in matching_punctuation: punctuation_stack.append(matching_punctuation[text[position]]) position += 1 if punctuation_stack: - # Opening punctuations left without matching close-punctuations. + # Opening punctuation left without matching close-punctuation. return None - # punctuations match. + # punctuation match. return text[start_position:position - 1] diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index efc9e04e8c0..bdca2525d9e 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -109,7 +109,7 @@ void CuDNNConvolutionLayer::Reshape( const int stride_w = stride_data[1]; // Specify workspace limit for kernels directly until we have a - // planning strategy and a rewrite of Caffe's GPU memory mangagement + // planning strategy and a rewrite of Caffe's GPU memory management size_t workspace_limit_bytes = 8*1024*1024; for (int i = 0; i < bottom.size(); i++) { diff --git a/src/caffe/layers/cudnn_deconv_layer.cpp b/src/caffe/layers/cudnn_deconv_layer.cpp index 260da5c1ee0..d519aeac356 100644 --- a/src/caffe/layers/cudnn_deconv_layer.cpp +++ b/src/caffe/layers/cudnn_deconv_layer.cpp @@ -111,7 +111,7 @@ void CuDNNDeconvolutionLayer::Reshape( const int stride_w = stride_data[1]; // Specify workspace limit for kernels directly until we have a - // planning strategy and a rewrite of Caffe's GPU memory mangagement + // planning strategy and a rewrite of Caffe's GPU memory management size_t workspace_limit_bytes = 8*1024*1024; for (int i = 0; i < bottom.size(); i++) { diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp index 3c3f460ec34..1e3f4f3557b 100644 --- a/src/caffe/layers/infogain_loss_layer.cpp +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -3,7 +3,7 @@ #include #include "caffe/layers/infogain_loss_layer.hpp" -#include "caffe/util/io.hpp" // for bolb reading of matrix H +#include "caffe/util/io.hpp" // for blob reading of matrix H #include "caffe/util/math_functions.hpp" namespace caffe { diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 210525e20f3..aad2712ef02 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -53,7 +53,7 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, power_layer_.reset(new PowerLayer(power_param)); power_layer_->SetUp(pool_top_vec_, power_top_vec_); // Set up a product_layer_ to compute outputs by multiplying inputs by the - // inverse demoninator computed by the power layer. + // inverse denominator computed by the power layer. product_bottom_vec_.clear(); product_bottom_vec_.push_back(&product_input_); product_bottom_vec_.push_back(&power_output_); diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 853181bd5a2..c3ec6e25c18 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -108,9 +108,9 @@ void PReLULayer::Backward_cpu(const vector*>& top, // always zero. const int div_factor = channel_shared_ ? channels : 1; - // Propagte to param + // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs - // are identical (in-place computaion), we first compute param backward to + // are identical (in-place computation), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_cpu_diff(); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index aeb80eacd03..911f539f975 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -6,7 +6,7 @@ namespace caffe { -// CUDA kernele for forward +// CUDA kernel for forward template __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, @@ -82,7 +82,7 @@ void PReLULayer::Backward_gpu(const vector*>& top, // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs - // are identical (in-place computaion), we first compute param backward to + // are identical (in-place computation), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 3dcad697f6d..aeab84e9da3 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -189,7 +189,7 @@ message SolverParameter { // The prefix for the snapshot. // If not set then is replaced by prototxt file path without extension. // If is set to directory then is augmented by prototxt file name - // without extention. + // without extension. optional string snapshot_prefix = 15; // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. diff --git a/src/caffe/test/test_benchmark.cpp b/src/caffe/test/test_benchmark.cpp index b03fdf69a8a..0debef10d95 100644 --- a/src/caffe/test/test_benchmark.cpp +++ b/src/caffe/test/test_benchmark.cpp @@ -18,7 +18,7 @@ TYPED_TEST_CASE(BenchmarkTest, TestDtypesAndDevices); TYPED_TEST(BenchmarkTest, TestTimerConstructor) { Timer timer; - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_FALSE(timer.running()); EXPECT_FALSE(timer.has_run_at_least_once()); } @@ -26,16 +26,16 @@ TYPED_TEST(BenchmarkTest, TestTimerConstructor) { TYPED_TEST(BenchmarkTest, TestTimerStart) { Timer timer; timer.Start(); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_TRUE(timer.running()); EXPECT_TRUE(timer.has_run_at_least_once()); timer.Start(); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_TRUE(timer.running()); EXPECT_TRUE(timer.has_run_at_least_once()); timer.Stop(); timer.Start(); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_TRUE(timer.running()); EXPECT_TRUE(timer.has_run_at_least_once()); } @@ -43,16 +43,16 @@ TYPED_TEST(BenchmarkTest, TestTimerStart) { TYPED_TEST(BenchmarkTest, TestTimerStop) { Timer timer; timer.Stop(); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_FALSE(timer.running()); EXPECT_FALSE(timer.has_run_at_least_once()); timer.Start(); timer.Stop(); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_FALSE(timer.running()); EXPECT_TRUE(timer.has_run_at_least_once()); timer.Stop(); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_FALSE(timer.running()); EXPECT_TRUE(timer.has_run_at_least_once()); } @@ -60,14 +60,14 @@ TYPED_TEST(BenchmarkTest, TestTimerStop) { TYPED_TEST(BenchmarkTest, TestTimerMilliSeconds) { Timer timer; EXPECT_EQ(timer.MilliSeconds(), 0); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_FALSE(timer.running()); EXPECT_FALSE(timer.has_run_at_least_once()); timer.Start(); boost::this_thread::sleep(boost::posix_time::milliseconds(300)); EXPECT_GE(timer.MilliSeconds(), 300 - kMillisecondsThreshold); EXPECT_LE(timer.MilliSeconds(), 300 + kMillisecondsThreshold); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_FALSE(timer.running()); EXPECT_TRUE(timer.has_run_at_least_once()); } @@ -75,14 +75,14 @@ TYPED_TEST(BenchmarkTest, TestTimerMilliSeconds) { TYPED_TEST(BenchmarkTest, TestTimerSeconds) { Timer timer; EXPECT_EQ(timer.Seconds(), 0); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_FALSE(timer.running()); EXPECT_FALSE(timer.has_run_at_least_once()); timer.Start(); boost::this_thread::sleep(boost::posix_time::milliseconds(300)); EXPECT_GE(timer.Seconds(), 0.3 - kMillisecondsThreshold / 1000.); EXPECT_LE(timer.Seconds(), 0.3 + kMillisecondsThreshold / 1000.); - EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.inited()); EXPECT_FALSE(timer.running()); EXPECT_TRUE(timer.has_run_at_least_once()); } diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 24b957f2acc..a6bf6343439 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -2453,7 +2453,7 @@ TYPED_TEST(NetTest, TestSkipPropagateDown) { for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { string layer_name = this->net_->layer_names()[layer_id]; if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob + // access to bottom_need_backward corresponding to label's blob bool need_back = this->net_->bottom_need_backward()[layer_id][1]; // if propagate_down is true, the loss layer will try to // backpropagate on labels @@ -2469,14 +2469,14 @@ TYPED_TEST(NetTest, TestSkipPropagateDown) { << "layer_need_backward for " << layer_name << " should be True"; } } - // check bottom_need_backward if propagat_down is false + // check bottom_need_backward if propagate_down is false this->InitSkipPropNet(true); vec_layer_need_backward.clear(); vec_layer_need_backward = this->net_->layer_need_backward(); for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { string layer_name = this->net_->layer_names()[layer_id]; if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob + // access to bottom_need_backward corresponding to label's blob bool need_back = this->net_->bottom_need_backward()[layer_id][1]; // if propagate_down is false, the loss layer will not try to // backpropagate on labels diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index bb95cae032d..52ad27f6da2 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -1044,7 +1044,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { pooling_param->set_kernel_h(kernel_h); pooling_param->set_kernel_w(kernel_w); pooling_param->set_stride(2); - // currenty, cuDNN pooling does not support padding + // currently, cuDNN pooling does not support padding pooling_param->set_pad(0); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); CuDNNPoolingLayer layer(layer_param); diff --git a/src/caffe/util/benchmark.cpp b/src/caffe/util/benchmark.cpp index d994225f97b..dae5060889b 100644 --- a/src/caffe/util/benchmark.cpp +++ b/src/caffe/util/benchmark.cpp @@ -6,7 +6,7 @@ namespace caffe { Timer::Timer() - : initted_(false), + : inited_(false), running_(false), has_run_at_least_once_(false) { Init(); @@ -106,7 +106,7 @@ float Timer::Seconds() { } void Timer::Init() { - if (!initted()) { + if (!inited()) { if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY CUDA_CHECK(cudaEventCreate(&start_gpu_)); @@ -115,12 +115,12 @@ void Timer::Init() { NO_GPU; #endif } - initted_ = true; + inited_ = true; } } CPUTimer::CPUTimer() { - this->initted_ = true; + this->inited_ = true; this->running_ = false; this->has_run_at_least_once_ = false; } diff --git a/src/gtest/gtest-all.cpp b/src/gtest/gtest-all.cpp index 81cdb578cd5..b969598ed0f 100644 --- a/src/gtest/gtest-all.cpp +++ b/src/gtest/gtest-all.cpp @@ -612,7 +612,7 @@ class GTestFlagSaver { // Converts a Unicode code point to a narrow string in UTF-8 encoding. // code_point parameter is of type UInt32 because wchar_t may not be // wide enough to contain a code point. -// The output buffer str must containt at least 32 characters. +// The output buffer str must contain at least 32 characters. // The function returns the address of the output buffer. // If the code_point is not a valid Unicode code point // (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output @@ -897,9 +897,9 @@ class GTEST_API_ UnitTestImpl { virtual ~UnitTestImpl(); // There are two different ways to register your own TestPartResultReporter. - // You can register your own repoter to listen either only for test results + // You can register your own reporter to listen either only for test results // from the current thread or for results from all threads. - // By default, each per-thread test result repoter just passes a new + // By default, each per-thread test result reporter just passes a new // TestPartResult to the global test result reporter, which registers the // test part result for the currently running test. @@ -1201,7 +1201,7 @@ class GTEST_API_ UnitTestImpl { default_per_thread_test_part_result_reporter_; // Points to (but doesn't own) the global test part result reporter. - TestPartResultReporterInterface* global_test_part_result_repoter_; + TestPartResultReporterInterface* global_test_part_result_reporter_; // Protects read and write access to global_test_part_result_reporter_. internal::Mutex global_test_part_result_reporter_mutex_; @@ -1765,7 +1765,7 @@ bool UnitTestOptions::MatchesFilter(const String& name, const char* filter) { return false; } - // Skips the pattern separater (the ':' character). + // Skips the pattern separator (the ':' character). cur_pattern++; } } @@ -1981,14 +1981,14 @@ void DefaultPerThreadTestPartResultReporter::ReportTestPartResult( TestPartResultReporterInterface* UnitTestImpl::GetGlobalTestPartResultReporter() { internal::MutexLock lock(&global_test_part_result_reporter_mutex_); - return global_test_part_result_repoter_; + return global_test_part_result_reporter_; } // Sets the global test part result reporter. void UnitTestImpl::SetGlobalTestPartResultReporter( TestPartResultReporterInterface* reporter) { internal::MutexLock lock(&global_test_part_result_reporter_mutex_); - global_test_part_result_repoter_ = reporter; + global_test_part_result_reporter_ = reporter; } // Returns the test part result reporter for the current thread. @@ -2730,7 +2730,7 @@ inline UInt32 ChopLowBits(UInt32* bits, int n) { // Converts a Unicode code point to a narrow string in UTF-8 encoding. // code_point parameter is of type UInt32 because wchar_t may not be // wide enough to contain a code point. -// The output buffer str must containt at least 32 characters. +// The output buffer str must contain at least 32 characters. // The function returns the address of the output buffer. // If the code_point is not a valid Unicode code point // (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output @@ -2768,7 +2768,7 @@ char* CodePointToUtf8(UInt32 code_point, char* str) { return str; } -// The following two functions only make sense if the the system +// The following two functions only make sense if the system // uses UTF-16 for wide string encoding. All supported systems // with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16. @@ -2914,7 +2914,7 @@ bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) { // On windows, this method uses _wcsicmp which compares according to LC_CTYPE // environment variable. On GNU platform this method uses wcscasecmp // which compares according to LC_CTYPE category of the current locale. - // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // On macOS, it uses towlower, which also uses LC_CTYPE category of the // current locale. bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs, const wchar_t* rhs) { @@ -3118,7 +3118,7 @@ void TestResult::RecordProperty(const TestProperty& test_property) { if (!ValidateTestProperty(test_property)) { return; } - internal::MutexLock lock(&test_properites_mutex_); + internal::MutexLock lock(&test_properties_mutex_); const std::vector::iterator property_with_matching_key = std::find_if(test_properties_.begin(), test_properties_.end(), internal::TestPropertyKeyIs(test_property.key())); @@ -5252,7 +5252,7 @@ UnitTestImpl::UnitTestImpl(UnitTest* parent) default_global_test_part_result_reporter_(this), default_per_thread_test_part_result_reporter_(this), #endif // _MSC_VER - global_test_part_result_repoter_( + global_test_part_result_reporter_( &default_global_test_part_result_reporter_), per_thread_test_part_result_reporter_( &default_per_thread_test_part_result_reporter_), @@ -7078,7 +7078,7 @@ struct ExecDeathTestArgs { # if GTEST_OS_MAC inline char** GetEnviron() { - // When Google Test is built as a framework on MacOS X, the environ variable + // When Google Test is built as a framework on macOS, the environ variable // is unavailable. Apple's documentation (man environ) recommends using // _NSGetEnviron() instead. return *_NSGetEnviron(); @@ -8672,7 +8672,7 @@ namespace internal { // Depending on the value of a char (or wchar_t), we print it in one // of three formats: // - as is if it's a printable ASCII (e.g. 'a', '2', ' '), -// - as a hexidecimal escape sequence (e.g. '\x7F'), or +// - as a hexadecimal escape sequence (e.g. '\x7F'), or // - as a special escape sequence (e.g. '\r', '\n'). enum CharFormat { kAsIs, @@ -8775,7 +8775,7 @@ void PrintCharAndCodeTo(Char c, ostream* os) { return; *os << " (" << String::Format("%d", c).c_str(); - // For more convenience, we print c's code again in hexidecimal, + // For more convenience, we print c's code again in hexadecimal, // unless c was already printed in the form '\x##' or the code is in // [1, 9]. if (format == kHexEscape || (1 <= c && c <= 9)) { diff --git a/src/gtest/gtest.h b/src/gtest/gtest.h index 124fb2321f9..20112d0ebed 100644 --- a/src/gtest/gtest.h +++ b/src/gtest/gtest.h @@ -1784,7 +1784,7 @@ struct CompileAssert { // expr is a compile-time constant. (Template arguments must be // determined at compile-time.) // -// - The outter parentheses in CompileAssert<(bool(expr))> are necessary +// - The outer parentheses in CompileAssert<(bool(expr))> are necessary // to work around a bug in gcc 3.4.4 and 4.0.1. If we had written // // CompileAssert @@ -3020,7 +3020,7 @@ class GTEST_API_ String { // On windows, this method uses _wcsicmp which compares according to LC_CTYPE // environment variable. On GNU platform this method uses wcscasecmp // which compares according to LC_CTYPE category of the current locale. - // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // On macOS, it uses towlower, which also uses LC_CTYPE category of the // current locale. static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs, const wchar_t* rhs); @@ -6879,7 +6879,7 @@ String StreamableToString(const T& streamable); // correct overload of FormatForComparisonFailureMessage (see below) // unless we pass the first argument by reference. If we do that, // however, Visual Age C++ 10.1 generates a compiler error. Therefore -// we only apply the work-around for Symbian. +// we only apply the workaround for Symbian. #if defined(__SYMBIAN32__) # define GTEST_CREF_WORKAROUND_ const& #else @@ -7201,7 +7201,7 @@ class TestFactoryBase { GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase); }; -// This class provides implementation of TeastFactoryBase interface. +// This class provides implementation of TestFactoryBase interface. // It is used in TEST and TEST_F macros. template class TestFactoryImpl : public TestFactoryBase { @@ -7852,7 +7852,7 @@ class NativeArray { // Implements Boolean test assertions such as EXPECT_TRUE. expression can be // either a boolean expression or an AssertionResult. text is a textual -// represenation of expression as it was passed into the EXPECT_TRUE. +// representation of expression as it was passed into the EXPECT_TRUE. #define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ if (const ::testing::AssertionResult gtest_ar_ = \ @@ -15453,7 +15453,7 @@ internal::ParamGenerator Range(T start, T end) { // each with C-string values of "foo", "bar", and "baz": // // const char* strings[] = {"foo", "bar", "baz"}; -// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings)); +// INSTANTIATE_TEST_CASE_P(StringSequence, StringTest, ValuesIn(strings)); // // This instantiates tests from test case StlStringTest // each with STL strings with values "a" and "b": @@ -17621,7 +17621,7 @@ class GTEST_API_ TestResult { // Protects mutable state of the property vector and of owned // properties, whose values may be updated. - internal::Mutex test_properites_mutex_; + internal::Mutex test_properties_mutex_; // The vector of TestPartResults std::vector test_part_results_; @@ -18229,7 +18229,7 @@ class GTEST_API_ UnitTest { internal::UnitTestImpl* impl() { return impl_; } const internal::UnitTestImpl* impl() const { return impl_; } - // These classes and funcions are friends as they need to access private + // These classes and functions are friends as they need to access private // members of UnitTest. friend class Test; friend class internal::AssertHelper; diff --git a/tools/extra/launch_resize_and_crop_images.sh b/tools/extra/launch_resize_and_crop_images.sh index 84ca858cd84..dff5132cede 100755 --- a/tools/extra/launch_resize_and_crop_images.sh +++ b/tools/extra/launch_resize_and_crop_images.sh @@ -14,7 +14,7 @@ ## Launch your Mapreduce locally # num_clients: number of processes -# image_lib: OpenCV or PIL, case insensitive. The default value is the faster OpenCV. +# image_lib: OpenCV or PIL, case-insensitive. The default value is the faster OpenCV. # input: the file containing one image path relative to input_folder each line # input_folder: where are the original images # output_folder: where to save the resized and cropped images diff --git a/tools/extra/plot_training_log.py.example b/tools/extra/plot_training_log.py.example index 8caca6b8a67..a4ecb9cf5da 100755 --- a/tools/extra/plot_training_log.py.example +++ b/tools/extra/plot_training_log.py.example @@ -180,5 +180,5 @@ if __name__ == '__main__': if not path_to_log.endswith(get_log_file_suffix()): print 'Log file must end in %s.' % get_log_file_suffix() print_help() - ## plot_chart accpets multiple path_to_logs + ## plot_chart accepts multiple path_to_logs plot_chart(chart_type, path_to_png, path_to_logs) diff --git a/tools/extra/resize_and_crop_images.py b/tools/extra/resize_and_crop_images.py index fd2c3134edb..267ca718c24 100755 --- a/tools/extra/resize_and_crop_images.py +++ b/tools/extra/resize_and_crop_images.py @@ -7,7 +7,7 @@ # gflags gflags.DEFINE_string('image_lib', 'opencv', - 'OpenCV or PIL, case insensitive. The default value is the faster OpenCV.') + 'OpenCV or PIL, case-insensitive. The default value is the faster OpenCV.') gflags.DEFINE_string('input_folder', '', 'The folder that contains all input images, organized in synsets.') gflags.DEFINE_integer('output_side_length', 256,