diff --git a/.gitignore b/.gitignore index f2860f53e..8a0360199 100644 --- a/.gitignore +++ b/.gitignore @@ -1,19 +1,25 @@ # cmake stuff +build*/ Testing/ -cnn/Testing/ -cnn/tests.bin/ +dynet/Testing/ +dynet/tests.bin/ CTestTestfile.cmake config.h Makefile CMakeCache.txt CMakeFiles cmake_install.cmake +python/dynet.cpp +python/dist/ +python/dyNET.egg-info/ # binaries -examples/xor -examples/xor-xent -examples/rnnlm -examples/nlm + +#data +rnnlm/ptb-mikolov/ + +# Python temporary files +*.pyc # Compiled Object files *.slo @@ -44,5 +50,12 @@ examples/nlm *.out *.app +# Editor stuff +*.swp + +# Doc stuff +doc/doxygen/xml +doc/source/tutorials_notebooks + .RData .RHistory diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..1207f4738 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "external/easyloggingpp"] + path = external/easyloggingpp + url = https://github.com/easylogging/easyloggingpp.git diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..2760b0e0b --- /dev/null +++ b/.travis.yml @@ -0,0 +1,27 @@ +language: cpp +compiler: + - gcc + +install: +- if [ "$CXX" = "g++" ]; then export CXX="g++-4.8" CC="gcc-4.8"; fi +addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - gcc-4.8 + - g++-4.8 + +before_script: + - sudo add-apt-repository -y ppa:boost-latest/ppa + - sudo apt-get update -qq + - sudo apt-get install libboost-filesystem1.55-dev libboost-program-options1.55-dev libboost-serialization1.55-dev libboost-test1.55-dev libboost-regex1.55-dev + - hg clone https://bitbucket.org/eigen/eigen + - mkdir build + - cd build + - cmake .. -DEIGEN3_INCLUDE_DIR=eigen + +script: + - make + - make test + diff --git a/CMakeLists.txt b/CMakeLists.txt index f4ded5b31..fd40aa10f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,12 +1,83 @@ -project(cnn) +project(dynet) cmake_minimum_required(VERSION 2.8 FATAL_ERROR) set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) -set(CMAKE_CXX_FLAGS "-Wall -std=c++11 -O3 -g") + +# DYNET uses Eigen which exploits modern CPU architectures. To get the +# best possible performance, the following are recommended: +# 1. use very recent versions of gcc or Clang to build +# 2. use very recent versions of Eigen (ideally the dev version) +# 3. try compiler options like -march=native or other architecture +# flags (the compiler does not always make the best configuration +# decisions without help) + +# NOTE: This seems to be causing problems with linking before using +# make install. It is allegedly preferred, but probably doesn't +# suit our model of not installing the library most of the time. +set(CMAKE_MACOSX_RPATH 0) + +function(find_mkl) + set(MKL_ARCH intel64) + find_path(MKL_INCLUDE_DIR mkl.h + PATHS ${MKL_ROOT} ${MKL_ROOT}/include) + find_library(MKL_CORE_LIB NAMES mkl_intel_lp64 mkl_intel_thread mkl_core + PATHS ${MKL_ROOT} ${MKL_ROOT}/lib/${MKL_ARCH} + ${MKL_ROOT}/lib #OSX + DOC "MKL core library path") + + find_library(MKL_COMPILER_LIB NAMES iomp5 libiomp5md + PATHS ${MKL_ROOT} ${MKL_ROOT}/../compiler/lib/${MKL_ARCH} #Windows + ${MKL_ROOT}/../compilers_and_libraries/linux/lib/${MKL_ARCH}_lin #Linux + ${MKL_ROOT}/../compilers_and_libraries/mac/lib #OSX + DOC "MKL compiler lib (for threaded MKL)") + + if(MKL_INCLUDE_DIR AND MKL_CORE_LIB AND MKL_COMPILER_LIB) + get_filename_component(MKL_CORE_LIB_DIR ${MKL_CORE_LIB} DIRECTORY) + get_filename_component(MKL_COMPILER_LIB_DIR ${MKL_COMPILER_LIB} DIRECTORY) + get_filename_component(MKL_COMPILER_LIB_FILE ${MKL_COMPILER_LIB} NAME) + message(STATUS "Found MKL\n * include: ${MKL_INCLUDE_DIR},\n * core library dir: ${MKL_CORE_LIB_DIR},\n * compiler library: ${MKL_COMPILER_LIB}") + + # Due to a conflict with /MT and /MD, MSVC needs mkl_intel_lp64 linked last, or we can change individual + # projects to use /MT (mkl_intel_lp64 linked with /MT, default MSVC projects use /MD), or we can instead + # link to the DLL versions. For now I'm opting for this solution which seems to work with projects still + # at their default /MD. Linux build requires the mkl_intel_lp64 to be linked first. So...: + if(MSVC) + set(LIBS ${LIBS} mkl_intel_thread mkl_core mkl_intel_lp64 ${MKL_COMPILER_LIB_FILE} PARENT_SCOPE) + else() + set(LIBS ${LIBS} mkl_intel_lp64 mkl_intel_thread mkl_core ${MKL_COMPILER_LIB_FILE} PARENT_SCOPE) + endif() + include_directories(${MKL_INCLUDE_DIR}) + link_directories(${MKL_CORE_LIB_DIR} ${MKL_COMPILER_LIB_DIR}) + set(MKL_LINK_DIRS ${MKL_CORE_LIB_DIR} ${MKL_COMPILER_LIB_DIR} PARENT_SCOPE) # Keeping this for python build + else() + message(FATAL_ERROR "Failed to find MKL in path: ${MKL_ROOT} (Did you set MKL_ROOT properly?)") + endif() +endfunction() + +######## Cross-compiler, cross-platform options +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DEIGEN_FAST_MATH") +if (MKL OR MKL_ROOT) + find_mkl() # sets include/lib directories and sets ${LIBS} needed for linking + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DEIGEN_USE_MKL_ALL") +endif() + + +######## Platform-specific options +if(WIN32) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNOMINMAX") # Disable min/max macros in windef.h +endif() + +######## Compiler-specific options +if(MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W1 -DEIGEN_HAS_C99_MATH /MP") # -Wall produces 20k warnings +else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -funroll-loops -fno-finite-math-only -Wall -Wno-missing-braces -std=c++11 -Ofast -g -march=native") +endif() enable_testing() -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${CMAKE_CURRENT_SOURCE_DIR} + ${PROJECT_SOURCE_DIR}/external/easyloggingpp/src) function(find_cudnn) set(CUDNN_ROOT "" CACHE PATH "CUDNN root path") @@ -29,32 +100,76 @@ function(find_cudnn) endfunction() # look for Boost -find_package(Boost COMPONENTS program_options serialization unit_test_framework REQUIRED) +if(DEFINED ENV{BOOST_ROOT}) + set(Boost_NO_SYSTEM_PATHS ON) + if(DEFINED ${Boost_INCLUDE_DIR}) + get_filename_component(Boost_INCLUDE_DIR "${Boost_INCLUDE_DIR}" REALPATH BASE_DIR "${CMAKE_BINARY_DIR}") + endif() +endif() +set(Boost_REALPATH ON) +find_package(Boost COMPONENTS program_options regex serialization REQUIRED) +message("-- Boost dir is " ${Boost_INCLUDE_DIR}) include_directories(${Boost_INCLUDE_DIR}) +if(MSVC) + # Boost does auto-linking when using a compiler like Microsoft Visual C++, we just need to help it find the libraries + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LIBPATH:${Boost_LIBRARY_DIRS}") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LIBPATH:${Boost_LIBRARY_DIRS}") +else() + set(LIBS ${LIBS} ${Boost_LIBRARIES}) +endif() +# trouble shooting: +# if boost library cannot be found, in addition to install boost library +# check if environment variables are set +# +# to set boost root and its library root in environment variable, use +# for example +# echo "export BOOST_LIBRARYDIR=/usr/local/lib" >> ~/.bashrc +# echo "export BOOST_ROOT=/cygdrive/d/tools/boost_1_58_0/boost_1_58_0" >> ~/.bashrc +# then run source ~/.bashrc to have those environment variable effective immediately -# look for Minerva -find_package(Minerva) -if(MINERVA_FOUND) - include_directories(${MINERVA_INCLUDE_DIR}) - set(HAVE_MINERVA_H 1) - if (ENABLE_CUDA) - set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_ROOT}) - find_package(CUDA REQUIRED) - include_directories(SYSTEM ${CUDA_INCLUDE_DIRS}) - add_definitions(-DHAS_CUDA) - find_cudnn() - include_directories(SYSTEM ${CUDNN_INCLUDE_DIRS}) - else () - message(STATUS "CUDA not enabled (use -DENABLE_CUDA=1)") - endif() -endif(MINERVA_FOUND) +if(BACKEND) + message("-- BACKEND: ${BACKEND}") +else() + message("-- BACKEND not specified, defaulting to eigen.") + set(BACKEND "eigen") +endif() + +if(BACKEND MATCHES "^eigen$") + set(WITH_EIGEN_BACKEND 1) +elseif(BACKEND MATCHES "^cuda$") + set(WITH_CUDA_BACKEND 1) +else() + message(SEND_ERROR "BACKEND must be eigen or cuda") +endif() + +if (WITH_CUDA_BACKEND) + find_package(CUDA REQUIRED) + set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_ROOT}) + include_directories(SYSTEM ${CUDA_INCLUDE_DIRS}) + #list(APPEND CUDA_LIBRARIES /usr/lib64/libpthread.so) + MESSAGE("CUDA_LIBRARIES: ${CUDA_LIBRARIES}") + list(REMOVE_ITEM CUDA_LIBRARIES -lpthread) + set(LIBS ${LIBS} ${CUDA_LIBRARIES}) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DEIGEN_HAS_CUDA_FP16 -DEIGEN_USE_GPU") + #find_cudnn() + #include_directories(SYSTEM ${CUDNN_INCLUDE_DIRS}) +endif() # look for Eigen find_package(Eigen3 REQUIRED) +get_filename_component(EIGEN3_INCLUDE_DIR "${EIGEN3_INCLUDE_DIR}" REALPATH BASE_DIR "${CMAKE_BINARY_DIR}") +message("-- Eigen dir is " ${EIGEN3_INCLUDE_DIR}) include_directories(${EIGEN3_INCLUDE_DIR}) +FIND_PACKAGE(Threads REQUIRED) +set(LIBS ${LIBS} ${CMAKE_THREAD_LIBS_INIT}) + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config.h) +include_directories(${CMAKE_CURRENT_BINARY_DIR}) -add_subdirectory(cnn) +add_subdirectory(dynet) +add_subdirectory(tests) add_subdirectory(examples) - +add_subdirectory(tutorial) +add_subdirectory(python) +enable_testing() diff --git a/LICENSE b/LICENSE index e06d20818..28ab34023 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright 2015 Chris Dyer Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/MEM.notes b/MEM.notes new file mode 100644 index 000000000..80d197a35 --- /dev/null +++ b/MEM.notes @@ -0,0 +1,10 @@ +The code that computes the l2 norm of the gradient is going to need +scratch space on every device that DYNET is using that has a parameter. +1) devices should know whether they have parameters/gradients + +alignment code is hidden away. it's all hard coded, but it looks like +Intel at least is getting more foregiving about alingment problems so +we might not notice opportunities for speedups if something changes. +GPU memory is aligned mostly by CUDA + +the MP stuff needs to be tested by Austin. diff --git a/README.md b/README.md index b14187eb8..2f7802e09 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,18 @@ -# cnn -C++ neural network library +# DyNet +The Dynamic Neural Network Toolkit -#### Building +DyNet (formerly known as [cnn](http://github.com/clab/cnn-v1)) is a neural network library developed by Carnegie Mellon University and many others. It is written in C++ (with bindings in Python) and is designed to be efficient when run on either CPU or GPU, and to work well with networks that have dynamic structures that change for every training instance. For example, these kinds of networks are particularly important in natural language processing tasks, and DyNet has been used to build state-of-the-art systems for [syntactic parsing](https://github.com/clab/lstm-parser), [machine translation](https://github.com/neubig/lamtram), [morphological inflection](https://github.com/mfaruqui/morph-trans), and many other application areas. -In `src`, you need to first use [`cmake`](http://www.cmake.org/) to generate the makefiles +Read the [documentation](http://dynet.readthedocs.io/en/latest/) to get started, and feel free to contact the [dynet-users group](https://groups.google.com/forum/#!forum/dynet-users) group with any questions (if you want to receive email make sure to select "all email" when you sign up). We greatly appreciate any bug reports and contributions, which can be made by filing an issue or making a pull request through the [github page](http://github.com/clab/dynet). - cmake -DEIGEN3_INCLUDE_DIR=/Users/cdyer/software/eigen-eigen-36fd1ba04c12 -G 'Unix Makefiles' +You can also read more technical details in our [technical report](https://arxiv.org/abs/1701.03980). If you use DyNet for research, please cite this report as follows: -Then to compile, run - - make -j 2 - make test - -If you want to see the compile commands that are used, you can run - - make VERBOSE=1 + @article{dynet, + title={DyNet: The Dynamic Neural Network Toolkit}, + author={Graham Neubig and Chris Dyer and Yoav Goldberg and Austin Matthews and Waleed Ammar and Antonios Anastasopoulos and Miguel Ballesteros and David Chiang and Daniel Clothiaux and Trevor Cohn and Kevin Duh and Manaal Faruqui and Cynthia Gan and Dan Garrette and Yangfeng Ji and Lingpeng Kong and Adhiguna Kuncoro and Gaurav Kumar and Chaitanya Malaviya and Paul Michel and Yusuke Oda and Matthew Richardson and Naomi Saphra and Swabha Swayamdipta and Pengcheng Yin}, + journal={arXiv preprint arXiv:1701.03980}, + year={2017} + } +[![Build Status](https://travis-ci.org/clab/dynet.svg?branch=master)](https://travis-ci.org/clab/dynet) +[![Doc build Status](https://readthedocs.org/projects/dynet/badge/?version=latest)](http://dynet.readthedocs.io/en/latest/) diff --git a/cmake/FindTH.cmake b/cmake/FindDyNet.cmake similarity index 100% rename from cmake/FindTH.cmake rename to cmake/FindDyNet.cmake diff --git a/cmake/FindEigen3.cmake b/cmake/FindEigen3.cmake index 9c546a05d..cea1afeab 100644 --- a/cmake/FindEigen3.cmake +++ b/cmake/FindEigen3.cmake @@ -9,6 +9,12 @@ # EIGEN3_FOUND - system has eigen lib with correct version # EIGEN3_INCLUDE_DIR - the eigen include directory # EIGEN3_VERSION - eigen version +# +# This module reads hints about search locations from +# the following enviroment variables: +# +# EIGEN3_ROOT +# EIGEN3_ROOT_DIR # Copyright (c) 2006, 2007 Montel Laurent, # Copyright (c) 2008, 2009 Gael Guennebaud, @@ -62,6 +68,9 @@ if (EIGEN3_INCLUDE_DIR) else (EIGEN3_INCLUDE_DIR) find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library + HINTS + ENV EIGEN3_ROOT + ENV EIGEN3_ROOT_DIR PATHS ${CMAKE_INSTALL_PREFIX}/include ${KDE4_INCLUDE_DIR} diff --git a/cmake/FindMinerva.cmake b/cmake/FindMinerva.cmake deleted file mode 100644 index b02bc9228..000000000 --- a/cmake/FindMinerva.cmake +++ /dev/null @@ -1,17 +0,0 @@ -# -# This will define: -# MINERVA_FOUND -# MINERVA_LIBRARY -# MINERVA_INCLUDE_DIR - -find_path(MINERVA_INCLUDE_DIR - NAMES minerva.h - PATHS ${MINERVA_PREFIX_PATH}/minerva) - -find_library(MINERVA_LIBRARY - NAMES minerva - PATHS ${MINERVA_PREFIX_PATH}/lib) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(Minerva DEFAULT_MSG MINERVA_LIBRARY MINERVA_INCLUDE_DIR) - diff --git a/cmake/FindTH++.cmake b/cmake/FindTH++.cmake deleted file mode 100644 index 8fdda38ec..000000000 --- a/cmake/FindTH++.cmake +++ /dev/null @@ -1,16 +0,0 @@ - -CMAKE_MINIMUM_REQUIRED(VERSION 2.8.7 FATAL_ERROR) - -INCLUDE(FindPackageHandleStandardArgs) - -FIND_LIBRARY(THPP_LIBRARY thpp PATHS "${THPP_PREFIX_PATH}/thpp/build") -FIND_PATH(THPP_INCLUDE_DIR "thpp/Tensor.h" PATHS "${THPP_PREFIX_PATH}") - -SET(THPP_LIBRARIES ${THPP_LIBRARY}) - -FIND_PACKAGE_HANDLE_STANDARD_ARGS( - TH++ - REQUIRED_ARGS - THPP_INCLUDE_DIR - THPP_LIBRARY) - diff --git a/cnn/CMakeLists.txt b/cnn/CMakeLists.txt deleted file mode 100644 index 55b066948..000000000 --- a/cnn/CMakeLists.txt +++ /dev/null @@ -1,64 +0,0 @@ -# ########## Project setup ########## -PROJECT(cnn CXX) - -# ########## cnn library ########## -# Sources: -set(cnn_library_SRCS - cnn.cc - dict.cc - edges.cc - lstm-fast.cc - model.cc - param-edges.cc - rnn.cc - saxe_init.cc - training.cc -) - -if(MINERVA_FOUND) - # for now - list(APPEND cnn_library_SRCS backends/eigen/eigen-backend.cc) -else(MINERVA_FOUND) - list(APPEND cnn_library_SRCS backends/eigen/eigen-backend.cc) -endif(MINERVA_FOUND) - -# Headers: -set(cnn_library_HDRS - cnn.h - edges.h - lstm-fast.h - model.h - param-edges.h - rnn.h - saxe_init.h - tensor.h - timing.h - training.h -) - -file(GLOB TEST_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} tests/*.cc) - -foreach(test_src ${TEST_SRCS}) - #Extract the filename without an extension (NAME_WE) - get_filename_component(testName ${test_src} NAME_WE) - - #Add compile target - add_executable(${testName} ${test_src}) - - #link to Boost libraries AND your targets and dependencies - target_link_libraries(${testName} ${Boost_LIBRARIES} cnn) - - set_target_properties(${testName} PROPERTIES - RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/tests.bin) - - #Finally add it to test execution - - #Notice the WORKING_DIRECTORY and COMMAND - add_test(NAME ${testName} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/tests.bin - COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/tests.bin/${testName} ) -endforeach(test_src) - -# actual target: -add_library(cnn ${cnn_library_SRCS} ${cnn_library_HDRS}) -# target_compile_features(cnn PRIVATE cxx_range_for) - diff --git a/cnn/backends/eigen/eigen-backend.cc b/cnn/backends/eigen/eigen-backend.cc deleted file mode 100644 index 757037f97..000000000 --- a/cnn/backends/eigen/eigen-backend.cc +++ /dev/null @@ -1,116 +0,0 @@ -#include "cnn/backends/eigen/eigen-backend.h" - -#include -#include - -using namespace std; - -namespace cnn { - -std::mt19937* rndeng = nullptr; -void Initialize(int& argc, char**& argv) { - std::random_device rd; - rndeng = new mt19937(rd()); -} - -Eigen::MatrixXf Elewise::Ln(const Eigen::MatrixXf& x) { - return x.array().log(); -} - -Eigen::MatrixXf Elewise::Exp(const Eigen::MatrixXf& x) { - return x.array().exp(); -} - -Eigen::MatrixXf Elewise::SigmoidForward(const Eigen::MatrixXf& x) { - Eigen::MatrixXf fx = x; - for (unsigned i = 0; i < fx.rows(); ++i) - for (unsigned j = 0; j < fx.cols(); ++j) - fx(i,j) = 1.f / (1.f + expf(-x(i,j))); - return fx; -} - -Eigen::MatrixXf Elewise::SigmoidBackward(const Eigen::MatrixXf& diff, const Eigen::MatrixXf& top, const Eigen::MatrixXf& bottom) { - const unsigned rows = top.rows(); - const unsigned cols = top.cols(); - Eigen::MatrixXf dfdx(rows, cols); - for (unsigned i = 0; i < rows; ++i) - for (unsigned j = 0; j < cols; ++j) - dfdx(i,j) = (1.f - top(i,j)) * top(i,j); - return dfdx.cwiseProduct(diff); -} - -Eigen::MatrixXf Elewise::ReluForward(const Eigen::MatrixXf& x) { - Eigen::MatrixXf fx = x; - for (unsigned i = 0; i < fx.rows(); ++i) - for (unsigned j = 0; j < fx.cols(); ++j) - if (fx(i,j) < 0) fx(i,j) = 0; - return fx; -} - -Eigen::MatrixXf Elewise::ReluBackward(const Eigen::MatrixXf& diff, const Eigen::MatrixXf& top, const Eigen::MatrixXf& bottom) { - Eigen::MatrixXf dfdx = diff; - const unsigned rows = diff.rows(); - const unsigned cols = diff.cols(); - for (unsigned i = 0; i < rows; ++i) - for (unsigned j = 0; j < cols; ++j) - if (!top(i,j)) dfdx(i,j) = 0; - return dfdx; -} - -Eigen::MatrixXf Elewise::TanhForward(const Eigen::MatrixXf& x) { - Eigen::MatrixXf fx = x; - for (unsigned i = 0; i < fx.rows(); ++i) - for (unsigned j = 0; j < fx.cols(); ++j) - fx(i,j) = tanhf(fx(i,j)); - return fx; -} - -Eigen::MatrixXf Elewise::TanhBackward(const Eigen::MatrixXf& diff, const Eigen::MatrixXf& top, const Eigen::MatrixXf& bottom) { - const unsigned rows = top.rows(); - const unsigned cols = top.cols(); - Eigen::MatrixXf dfdx(rows, cols); - for (unsigned i = 0; i < rows; ++i) - for (unsigned j = 0; j < cols; ++j) - dfdx(i,j) = 1.f - top(i,j) * top(i,j); - return dfdx.cwiseProduct(diff); -} - -inline float logsumexp(const Eigen::MatrixXf& x) { - float m = x(0,0); - for (unsigned i = 1; i < x.rows(); ++i) { - float r = x(i,0); - if (r > m) m = r; - } - float z = 0; - for (unsigned i = 0; i < x.rows(); ++i) - z += expf(x(i,0) - m); - return m + logf(z); -} - -Eigen::MatrixXf Convolution::SoftmaxForward(const Eigen::MatrixXf& src, SoftmaxAlgorithm algorithm) { - const unsigned rows = src.rows(); - assert(src.cols() == 1); - const float logz = logsumexp(src); - Eigen::MatrixXf fx(rows, 1); - for (unsigned i = 0; i < rows; ++i) - fx(i,0) = expf(src(i,0) - logz); - return fx; -} - -Eigen::MatrixXf Convolution::SoftmaxBackward(const Eigen::MatrixXf& diff, const Eigen::MatrixXf& top, SoftmaxAlgorithm algorithm) { - // d softmax(x)_i / d x_j = softmax(x)_i * (1 - softmax(x)_i) if i == j - // d softmax(x)_i / d x_j = -softmax(x)_i * softmax(x)_j if i != j - const unsigned rows = top.rows(); - - float off_diag_sum = 0; - for (unsigned i = 0; i < rows; ++i) - off_diag_sum -= top(i, 0) * diff(i, 0); - - Eigen::MatrixXf dEdx = Eigen::MatrixXf::Zero(rows, 1); - for (unsigned i = 0; i < rows; ++i) - dEdx(i, 0) = (off_diag_sum + diff(i, 0)) * top(i, 0); - return dEdx; -} - -} // namespace cnn - diff --git a/cnn/backends/eigen/eigen-backend.h b/cnn/backends/eigen/eigen-backend.h deleted file mode 100644 index 7633a1ef1..000000000 --- a/cnn/backends/eigen/eigen-backend.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef CNN_EIGEN_BACKEND_H_ -#define CNN_EIGEN_BACKEND_H_ - -#include "Eigen/Eigen" - -namespace cnn { - -void Initialize(int& argc, char**& argv); - -// This is a class that makes some of the Minerva library calls available -// even with the Eigen backend (it will just be used in porting, and until -// Minerva supports everything it should on the CPU, I hope). -// -// Note about names in the Backward functions: -// Minerva's bottom = CNN's x (the input to the function) -// Minvera's top = CNN's fx (the output of the function) -// Minerva's diff = CNN's dEdf (the derivative of the loss with respect to fx) - -class Elewise { - public: - static Eigen::MatrixXf Ln(const Eigen::MatrixXf&); - static Eigen::MatrixXf Exp(const Eigen::MatrixXf&); - static Eigen::MatrixXf SigmoidForward(const Eigen::MatrixXf&); - static Eigen::MatrixXf SigmoidBackward(const Eigen::MatrixXf& diff, const Eigen::MatrixXf& top, const Eigen::MatrixXf& bottom); - static Eigen::MatrixXf ReluForward(const Eigen::MatrixXf&); - static Eigen::MatrixXf ReluBackward(const Eigen::MatrixXf& diff, const Eigen::MatrixXf& top, const Eigen::MatrixXf& bottom); - static Eigen::MatrixXf TanhForward(const Eigen::MatrixXf&); - static Eigen::MatrixXf TanhBackward(const Eigen::MatrixXf& diff, const Eigen::MatrixXf& top, const Eigen::MatrixXf& bottom); -}; - -typedef unsigned SoftmaxAlgorithm; - -class Convolution { - public: - static Eigen::MatrixXf SoftmaxForward(const Eigen::MatrixXf& src, SoftmaxAlgorithm algorithm); - static Eigen::MatrixXf SoftmaxBackward(const Eigen::MatrixXf& diff, const Eigen::MatrixXf& top, SoftmaxAlgorithm algorithm); -}; - -} // namespace cnn - -#endif diff --git a/cnn/backends/eigen/eigen-serialization.h b/cnn/backends/eigen/eigen-serialization.h deleted file mode 100644 index 41809f6d3..000000000 --- a/cnn/backends/eigen/eigen-serialization.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef CNN_EIGEN_SERIALIZE_H_ -#define CNN_EIGEN_SERIALIZE_H_ - -#include -#include -#include - -namespace boost{ - namespace serialization{ - - template< class Archive, - class S, - int Rows_, - int Cols_, - int Ops_, - int MaxRows_, - int MaxCols_> - inline void save( - Archive & ar, - const Eigen::Matrix & g, - const unsigned int version) - { - int rows = g.rows(); - int cols = g.cols(); - - ar & rows; - ar & cols; - ar & boost::serialization::make_array(g.data(), rows * cols); - } - - template< class Archive, - class S, - int Rows_, - int Cols_, - int Ops_, - int MaxRows_, - int MaxCols_> - inline void load( - Archive & ar, - Eigen::Matrix & g, - const unsigned int version) - { - int rows, cols; - ar & rows; - ar & cols; - g.resize(rows, cols); - ar & boost::serialization::make_array(g.data(), rows * cols); - } - - template< class Archive, - class S, - int Rows_, - int Cols_, - int Ops_, - int MaxRows_, - int MaxCols_> - inline void serialize( - Archive & ar, - Eigen::Matrix & g, - const unsigned int version) - { - split_free(ar, g, version); - } - - - } // namespace serialization -} // namespace boost - -#endif diff --git a/cnn/backends/eigen/random.h b/cnn/backends/eigen/random.h deleted file mode 100644 index 3f37ad2d6..000000000 --- a/cnn/backends/eigen/random.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef CNN_EIGEN_RANDOM_H -#define CNN_EIGEN_RANDOM_H - -#include - -namespace cnn { - -extern std::mt19937* rndeng; - -} // namespace cnn - -#endif diff --git a/cnn/backends/eigen/tensor-eigen.h b/cnn/backends/eigen/tensor-eigen.h deleted file mode 100644 index e38b09118..000000000 --- a/cnn/backends/eigen/tensor-eigen.h +++ /dev/null @@ -1,139 +0,0 @@ -#ifndef CNN_TENSOR_EIGEN_H_ -#define CNN_TENSOR_EIGEN_H_ - -#include -#include -#include - -#include -#include "cnn/backends/eigen/eigen-serialization.h" -#include "cnn/backends/eigen/random.h" - -namespace cnn { - -#define EIGEN_BACKEND 1 - -typedef Eigen::MatrixXf Tensor; -typedef float real; - -inline real as_scalar(const Tensor& t) { - assert(t.cols() == 1); - assert(t.rows() == 1); - return t(0,0); -} - -inline std::vector as_vector(const Tensor& v) { - std::vector res(v.rows() * v.cols()); - std::memcpy(&res[0], v.data(), sizeof(real) * res.size()); - return res; -} - -// dummy function with Eigen backend -inline Tensor FromEigenMatrix(const Eigen::MatrixXf& src) { return src; } - -struct Dim { - Dim() : rows(1), cols(1) {} - explicit Dim(int m) : rows(m), cols(1) {} - Dim(int m, int n) : rows(m), cols(n) {} - inline int size() const { return rows * cols; } - int ndims() const { return (cols == 1 ? 1 : 2); } - inline unsigned Prod() const { return rows * cols; } - Dim(std::initializer_list x) : cols(1) { - unsigned c = 0; - for (auto v : x) { - if (c == 0) rows = v; - if (c == 1) cols = v; - ++c; - } - if (c > 2) { - std::cerr << "Dim class doesn't support more than two dimensions\n"; - abort(); - } - } - int operator[](unsigned i) const { - if (i == 0) return rows; - if (i == 1) return cols; - abort(); - } - int size(unsigned i) const { - return (*this)[i]; - } - unsigned short rows; - unsigned short cols; - Dim transpose() const { return Dim(cols,rows); } - private: - friend class boost::serialization::access; - template void serialize(Archive& ar, const unsigned int) { - ar & rows; - ar & cols; - } -}; - -inline Dim operator*(const Dim& a, const Dim& b) { - assert(a.cols == b.rows); - return Dim(a.rows, b.cols); -} - -inline bool operator==(const Dim& a, const Dim& b) { return (a.rows == b.rows && a.cols == b.cols); } -inline bool operator!=(const Dim& a, const Dim& b) { return !(a == b); } - -inline std::ostream& operator<<(std::ostream& os, const Dim& d) { - return os << '(' << d.rows << ',' << d.cols << ')'; -} - -inline Dim size(const Tensor& m) { return Dim(m.rows(), m.cols()); } - -inline Tensor FromRawData(const Dim& dim, const float* data) { - Tensor t(dim.size(0), dim.ndims() > 1 ? dim.size(1) : 1); - std::memcpy(t.data(), data, sizeof(float) * dim.size()); - return t; -} - -inline Tensor Constant(const Dim& d, real c) { - Tensor m(d.rows, d.cols); - m.fill(c); - return m; -} -inline Tensor Zero(const Dim& d) { return Eigen::MatrixXf::Zero(d.rows, d.cols); } -inline Tensor Ones(const Dim& d) { return Eigen::MatrixXf::Ones(d.rows, d.cols); } -inline Tensor Random(const Dim& d, real scale) { - std::uniform_real_distribution distribution(-scale,scale); - auto b = [&] (real) {return distribution(*rndeng);}; - return Eigen::MatrixXf::NullaryExpr(d.rows, d.cols, b); -} -inline Tensor Random(const Dim& d) { - return Random(d, sqrt(6) / sqrt(d.cols + d.rows)); -} -inline Tensor RandomBernoulli(const Dim& d, real p) { - std::bernoulli_distribution distribution(p); - auto b = [&] (real) {return distribution(*rndeng);}; - return Eigen::MatrixXf::NullaryExpr(d.rows, d.cols, b); -} -inline Tensor RandomNormal(const Dim& d, real mean, real stddev) { - std::normal_distribution distribution(mean, stddev); - auto b = [&] (real) {return distribution(*rndeng);}; - return Eigen::MatrixXf::NullaryExpr(d.rows, d.cols, b); -} -inline real rand01() { - std::uniform_real_distribution distribution(0, 1); - return distribution(*rndeng); -} - -// column major constructor -inline Tensor Ccm(const Dim&d, const std::initializer_list& v) { - Tensor m = Zero(d); - int cc = 0; - int cr = 0; - for (const auto& x : v) { - m(cr, cc) = x; - ++cr; - if (cr == d.rows) { cr = 0; ++cc; } - } - return m; -} - -} // namespace cnn - -#include "cnn/backends/eigen/eigen-backend.h" - -#endif diff --git a/cnn/backends/minerva/tensor-minerva.h b/cnn/backends/minerva/tensor-minerva.h deleted file mode 100644 index 8538485ac..000000000 --- a/cnn/backends/minerva/tensor-minerva.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef CNN_TENSOR_MINERVA_H_ -#define CNN_TENSOR_MINERVA_H_ - -#include -#include -#include -#include - -#include -#include "minerva.h" - -using namespace minerva; - -namespace cnn { - -#define MINERVA_BACKEND 1 - -typedef minerva::NArray Tensor; -typedef float real; -typedef minerva::Scale Dim; - -inline Tensor Constant(const Dim& d, real c) { return minerva::NArray::Constant(d, c); } -inline Tensor Zero(const Dim& d) { return minerva::NArray::Zeros(d); } -inline Tensor Random(const Dim& d) { return minerva::NArray::Zeros(d); } -inline Dim size(const Tensor& m) { return m.Size(); } - -// avoid using this, because it's slow -inline Tensor FromEigenMatrix(const Eigen::MatrixXf& src) { - const Dim size = {src.rows(), src.cols()}; - std::shared_ptr data(new float[size.Prod()], [](float* ptr) { - delete[] ptr; - }); - auto p = src.data(); - auto d = data.get(); - std::memcpy(d, p, size.Prod() * sizeof(float)); - return NArray::MakeNArray(size, data); -} - -// in column-major order, consecutive elements of the columns are contiguous. -// in Minerva, matrices are stored in column-major (i.e., FORTRAN) order -inline Tensor Ccm(const Dim&d, const std::initializer_list& v) { - std::vector vv = v; - std::shared_ptr input_ptr(new float[d.Prod()], [](float* ptr) { delete[] ptr; }); - std::memcpy(input_ptr.get(), &vv[0], d.Prod() * sizeof(float)); - return minerva::NArray::MakeNArray(d, input_ptr); -} - -} // namespace cnn - -#endif diff --git a/cnn/cnn.cc b/cnn/cnn.cc deleted file mode 100644 index b15277721..000000000 --- a/cnn/cnn.cc +++ /dev/null @@ -1,175 +0,0 @@ -#include "cnn/cnn.h" -#include "cnn/edges.h" -#include "cnn/param-edges.h" - -using namespace std; - -namespace cnn { - -Edge::~Edge() {} - -bool Edge::has_parameters() const { return false; } - -Hypergraph::~Hypergraph() { - for (auto n : nodes) delete n; - for (auto e : edges) delete e; - // don't delete parameter_edges since they're a subset of edges -} - -VariableIndex Hypergraph::add_input(real s) { - VariableIndex new_node_index(nodes.size()); - nodes.push_back(new Node(edges.size(), new_node_index)); - ScalarInputEdge* e = new ScalarInputEdge(s); - edges.push_back(e); - edges.back()->head_node = new_node_index; - return new_node_index; -} - -VariableIndex Hypergraph::add_input(const real* ps) { - VariableIndex new_node_index(nodes.size()); - nodes.push_back(new Node(edges.size(), new_node_index)); - ScalarInputEdge* e = new ScalarInputEdge(ps); - edges.push_back(e); - edges.back()->head_node = new_node_index; - return new_node_index; -} - -VariableIndex Hypergraph::add_input(const Dim& d, const vector* pm) { - VariableIndex new_node_index(nodes.size()); - nodes.push_back(new Node(edges.size(), new_node_index)); - InputEdge* e = new InputEdge(d, pm); - edges.push_back(e); - edges.back()->head_node = new_node_index; - return new_node_index; -} - -VariableIndex Hypergraph::add_parameter(Parameters* p) { - VariableIndex new_node_index(nodes.size()); - nodes.push_back(new Node(edges.size(), new_node_index)); - ParameterEdge* new_edge = new ParameterEdge(p); - edges.push_back(new_edge); - parameter_edges.push_back(new_edge); - new_edge->head_node = new_node_index; - return new_node_index; -} - -VariableIndex Hypergraph::add_lookup(LookupParameters* p, const unsigned* pindex) { - VariableIndex new_node_index(nodes.size()); - nodes.push_back(new Node(edges.size(), new_node_index)); - LookupEdge* new_edge = new LookupEdge(p, pindex); - edges.push_back(new_edge); - parameter_edges.push_back(new_edge); - new_edge->head_node = new_node_index; - return new_node_index; -} - -VariableIndex Hypergraph::add_lookup(LookupParameters* p, unsigned index) { - VariableIndex new_node_index(nodes.size()); - nodes.push_back(new Node(edges.size(), new_node_index)); - LookupEdge* new_edge = new LookupEdge(p, index); - edges.push_back(new_edge); - parameter_edges.push_back(new_edge); - new_edge->head_node = new_node_index; - return new_node_index; -} - -VariableIndex Hypergraph::add_const_lookup(LookupParameters* p, unsigned* pindex) { - VariableIndex new_node_index(nodes.size()); - nodes.push_back(new Node(edges.size(), new_node_index)); - LookupEdge* new_edge = new LookupEdge(p, pindex); - new_edge->has_optimizable_parameters = false; - edges.push_back(new_edge); - new_edge->head_node = new_node_index; - return new_node_index; -} - -VariableIndex Hypergraph::add_const_lookup(LookupParameters* p, unsigned index) { - VariableIndex new_node_index(nodes.size()); - nodes.push_back(new Node(edges.size(), new_node_index)); - LookupEdge* new_edge = new LookupEdge(p, index); - new_edge->has_optimizable_parameters = false; - edges.push_back(new_edge); - new_edge->head_node = new_node_index; - return new_node_index; -} - -const Tensor& Hypergraph::incremental_forward() { - while (last_node_evaluated < nodes.size()) { - Node* node = nodes[last_node_evaluated]; - const Edge& in_edge = *edges[node->in_edge]; - vector xs(in_edge.arity()); - unsigned ti = 0; - for (VariableIndex tail_node_index : in_edge.tail) { - xs[ti] = &nodes[tail_node_index]->f; - ++ti; - } - node->f = in_edge.forward(xs); - node->dEdf = Zero(cnn::size(node->f)); - ++last_node_evaluated; - } - return nodes.back()->f; -} - -const Tensor& Hypergraph::forward() { - last_node_evaluated = 0; - return incremental_forward(); -} - -void Hypergraph::backward() { - // here we find constants to avoid doing extra work - vector needs_derivative(nodes.size(), false); - for (unsigned ni = 0; ni < nodes.size(); ++ni) { - const Node& node = *nodes[ni]; - const Edge& in_edge = *edges[node.in_edge]; - bool is_variable = in_edge.has_parameters(); - for (auto tail_node : in_edge.tail) - is_variable |= needs_derivative[tail_node]; - needs_derivative[ni] = is_variable; - } - - // initialize dE/dE = 1 - nodes.back()->dEdf = cnn::Constant({1,1}, 1); - - // loop in reverse topological order - for (int i = nodes.size() - 1; i >= 0; --i) { - const Node& node = *nodes[i]; - const Edge& in_edge = *edges[node.in_edge]; - vector xs(in_edge.arity()); - unsigned ti = 0; - for (unsigned tail_node_index : in_edge.tail) { - xs[ti] = &nodes[tail_node_index]->f; - ++ti; - } - for (unsigned ti = 0; ti < in_edge.tail.size(); ++ti) { - if (needs_derivative[in_edge.tail[ti]]) { - Node& tail_node = *nodes[in_edge.tail[ti]]; - tail_node.dEdf += in_edge.backward(xs, node.f, node.dEdf, ti); - } - } - } - - // accumulate gradients into parameters - for (auto pedge : parameter_edges) - pedge->accumulate_grad(nodes[pedge->head_node]->dEdf); -} - -void Hypergraph::PrintGraphviz() const { - cerr << "digraph G {\n rankdir=LR;\n nodesep=.05;\n"; - unsigned nc = 0; - for (auto node : nodes) { - vector var_names; - const Edge* in_edge = edges[node->in_edge]; - for (auto tail_node : in_edge->tail) - var_names.push_back(nodes[tail_node]->variable_name()); - cerr << " N" << nc << " [label=\"" << node->variable_name() << " = " - << in_edge->as_string(var_names) << "\"];\n"; - ++nc; - } - for (auto edge : edges) - for (auto ni : edge->tail) - cerr << " N" << ni << " -> N" << edge->head_node << ";\n"; - cerr << "}\n"; -} - -} // namespace cnn - diff --git a/cnn/cnn.h b/cnn/cnn.h deleted file mode 100644 index 82fab4a15..000000000 --- a/cnn/cnn.h +++ /dev/null @@ -1,200 +0,0 @@ -#ifndef CNN_CNN_H_ -#define CNN_CNN_H_ - -#include -#include -#include -#include -#include -#include - -#include "cnn/tensor.h" -#include "cnn/model.h" - -// Computation graph where nodes represent forward and backward intermediate -// values, and edges represent functions of multiple values. To represent the -// fact that a function may have multiple arguments, edges have a single head -// and 0, 1, 2, or more tails. (Constants, inputs, and parameters are -// represented as functions of 0 parameters.) -// Example: given the function z = f(x, y), z, x, and y are nodes, and there -// is an edge representing f with which points to the z node (i.e., its head), -// and x and y are the tails of the edge. - -namespace cnn { - -// TODO pull fx and dEdf out of the Node object and have them -// as local tables in forward/backward algorithms - -struct Edge; -struct ParameterEdgeBase; -struct Node; - -BOOST_STRONG_TYPEDEF(unsigned, VariableIndex) -inline void swap(VariableIndex& i1, VariableIndex& i2) { - VariableIndex t = i1; - i1 = i2; - i2 = t; -} - -struct Hypergraph { - Hypergraph() : last_node_evaluated() {} - ~Hypergraph(); - - // INPUTS - // the computational network will pull inputs in from the user's data - // structures and make them available to the computation - VariableIndex add_input(real s); // add scalar - VariableIndex add_input(const real* ps); // add pointer to scalar - VariableIndex add_input(const Dim& d, const std::vector* pdata); - - // PARAMETERS - // parameters are things that are optimized. in contrast to a system like - // Torch where computational modules may have their own parameters, in CNN - // parameters are just parameters - VariableIndex add_parameter(Parameters* p); - // use pindex to point to a memory location where the index will live - // that the caller owns - VariableIndex add_lookup(LookupParameters* p, const unsigned* pindex); - VariableIndex add_lookup(LookupParameters* p, unsigned index); - // just like add_lookup, but don't optimize the lookup parameters - VariableIndex add_const_lookup(LookupParameters* p, unsigned* pindex); - VariableIndex add_const_lookup(LookupParameters* p, unsigned index); - - // COMPUTATIONS - template inline VariableIndex add_function(const std::initializer_list& arguments); - template - inline VariableIndex add_function(const std::initializer_list& arguments, - Args&&... side_information); - template inline VariableIndex add_function(const T& arguments); - - // perform computations - const Tensor& forward(); - const Tensor& incremental_forward(); // if you want to add nodes and evaluate just the new parts - void backward(); - - // debugging - void PrintGraphviz() const; - - // data - std::vector nodes; // **stored in topological order** - std::vector edges; // all edges - std::vector parameter_edges; // edges that contain parameters that can be updated (subset of edges) - VariableIndex last_node_evaluated; // enables forward graphs to be evaluated incrementally -}; - -// represents an SSA variable -// * in_edge is the index of the function that computes the variable -// * out_edges are the list of functions that use this variable -// * f is the computed value of the variable (TODO: remove this, see note below) -// * dEdf is the derivative of the output with respect to the function -struct Node { - // name is currently just used for debugging- maybe eventually for code - // generation though - Node(unsigned in_edge_index, VariableIndex nid) : - in_edge(in_edge_index), - node_id(nid) {} - - // dependency structure - unsigned in_edge; - std::vector out_edges; - - // debugging - std::string variable_name() const { return "v" + std::to_string(node_id); } - VariableIndex node_id; // my id - - // computation - // TODO remove these from here, they should be local to the forward/backward - // algorithms - Tensor f; // f(x_1 , ... , x_n) - Tensor dEdf; // dE/df -}; - -inline void swap(Node& n1, Node& n2) { - using std::swap; - swap(n1.f, n2.f); - swap(n1.dEdf, n2.dEdf); - swap(n1.in_edge, n2.in_edge); - swap(n1.out_edges, n2.out_edges); - swap(n1.node_id, n2.node_id); -} - -// represents a function of zero or more input variables -// functions with zero inputs are constants or optimizeable parameters -struct Edge { - virtual ~Edge(); - // debugging - virtual std::string as_string(const std::vector& var_names) const = 0; - - // computation - virtual Tensor forward(const std::vector& xs) const = 0; - // computes the derivative of E with respect to the ith argument to f, that is, xs[i] - virtual Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const = 0; - virtual bool has_parameters() const; - - // number of arguments to the function - inline unsigned arity() const { return tail.size(); } - - // structure - VariableIndex head_node; // index of node to contain result of f - std::vector tail; // arguments of function -}; - -inline void swap(Edge& e1, Edge& e2) { - using std::swap; - swap(e1.tail, e2.tail); - swap(e1.head_node, e2.head_node); -} - -template -inline VariableIndex Hypergraph::add_function(const std::initializer_list& arguments) { - VariableIndex new_node_index(nodes.size()); - unsigned new_edge_index = edges.size(); - nodes.push_back(new Node(new_edge_index, new_node_index)); - Edge* new_edge = new Function; - edges.push_back(new_edge); - new_edge->head_node = new_node_index; - for (auto ni : arguments) { - new_edge->tail.push_back(ni); - nodes[ni]->out_edges.push_back(new_edge_index); - } - return new_node_index; -} - -// pass side information to the function. these are likely to be nondifferentiable arguments -template -inline VariableIndex Hypergraph::add_function(const std::initializer_list& arguments, - Args&&... side_information) { - VariableIndex new_node_index(nodes.size()); - unsigned new_edge_index = edges.size(); - nodes.push_back(new Node(new_edge_index, new_node_index)); - Edge* new_edge = new Function(std::forward(side_information)...); - edges.push_back(new_edge); - new_edge->head_node = new_node_index; - for (auto ni : arguments) { - new_edge->tail.push_back(ni); - nodes[ni]->out_edges.push_back(new_edge_index); - } - return new_node_index; -} - -template -inline VariableIndex Hypergraph::add_function(const T& arguments) { - VariableIndex new_node_index(nodes.size()); - unsigned new_edge_index = edges.size(); - nodes.push_back(new Node(new_edge_index, new_node_index)); - Edge* new_edge = new Function; - edges.push_back(new_edge); - new_edge->head_node = new_node_index; - for (auto ni : arguments) { - new_edge->tail.push_back(ni); - nodes[ni]->out_edges.push_back(new_edge_index); - } - return new_node_index; -} - -} // namespace cnn - -#endif diff --git a/cnn/dict.cc b/cnn/dict.cc deleted file mode 100644 index 4e1810eb5..000000000 --- a/cnn/dict.cc +++ /dev/null @@ -1,39 +0,0 @@ -#include "dict.h" - -#include -#include -#include - -using namespace std; - -namespace cnn { - -std::vector ReadSentence(const std::string& line, Dict* sd) { - std::istringstream in(line); - std::string word; - std::string sep = "|||"; - std::vector res; - while(in) { - in >> word; - if (word.empty()) break; - res.push_back(sd->Convert(word)); - } - return res; -} - -void ReadSentencePair(const std::string& line, std::vector* s, Dict* sd, std::vector* t, Dict* td) { - std::istringstream in(line); - std::string word; - std::string sep = "|||"; - Dict* d = sd; - std::vector* v = s; - while(in) { - in >> word; - if (word.empty()) break; - if (word == sep) { d = td; v = t; continue; } - v->push_back(d->Convert(word)); - } -} - -} // namespace cnn - diff --git a/cnn/dict.h b/cnn/dict.h deleted file mode 100644 index 6358c0f91..000000000 --- a/cnn/dict.h +++ /dev/null @@ -1,57 +0,0 @@ -#ifndef CNN_DICT_H_ -#define CNN_DICT_H_ - -#include -#include -#include -#include -#include - -namespace cnn { - -class Dict { - typedef std::unordered_map Map; - public: - Dict() : b0_(""), frozen(false) { - } - - inline unsigned size() const { return words_.size() + 1; } - - void Freeze() { frozen = true; } - - inline int Convert(const std::string& word) { - auto i = d_.find(word); - if (i == d_.end()) { - if (frozen) { - std::cerr << "Unknown word encountered: " << word << std::endl; - abort(); - } - words_.push_back(word); - d_[word] = words_.size(); - return words_.size(); - } else { - return i->second; - } - } - - inline const std::string& Convert(const int& id) const { - if (id == 0) return b0_; - assert(id <= (int)words_.size()); - return words_[id-1]; - } - - void clear() { words_.clear(); d_.clear(); } - - private: - std::string b0_; - bool frozen; - std::vector words_; - Map d_; -}; - -std::vector ReadSentence(const std::string& line, Dict* sd); -void ReadSentencePair(const std::string& line, std::vector* s, Dict* sd, std::vector* t, Dict* td); - -} // namespace cnn - -#endif diff --git a/cnn/edges.cc b/cnn/edges.cc deleted file mode 100644 index 90489795f..000000000 --- a/cnn/edges.cc +++ /dev/null @@ -1,723 +0,0 @@ -#include "cnn/edges.h" - -#include -#include -#include - -using namespace std; - -namespace cnn { - -string GaussianNoise::as_string(const vector& arg_names) const { - ostringstream s; - s << arg_names[0] << " + N(0," << stddev << ')'; - return s.str(); -} - -Tensor GaussianNoise::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs[0]; - return x + RandomNormal(cnn::size(x), 0, stddev); -} - -Tensor GaussianNoise::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - return dEdf; -}; - -string Dropout::as_string(const vector& arg_names) const { - ostringstream s; - s << "dropout(" << arg_names[0] << ",p=" << p << ')'; - return s.str(); -} - -Tensor Dropout::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs[0]; - noise_mask = RandomBernoulli(cnn::size(x), p); - return x.cwiseProduct(noise_mask); -} - -Tensor Dropout::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - return dEdf.cwiseProduct(noise_mask); -}; - -string OneMinusX::as_string(const vector& arg_names) const { - ostringstream s; - s << "1 - " << arg_names[0]; - return s.str(); -} - -Tensor OneMinusX::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs[0]; - return Ones(cnn::size(x)) - x; -} - -Tensor OneMinusX::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - return -dEdf; -}; - -string Sum::as_string(const vector& arg_names) const { - ostringstream s; - s << arg_names[0]; - for (unsigned i = 1; i < tail.size(); ++i) - s << " + " << arg_names[i]; - return s.str(); -} - -Tensor Sum::forward(const vector& xs) const { - assert(xs.size() > 0); - Tensor res = *xs[0]; - for (unsigned i = 1; i < xs.size(); ++i) - res += *xs[i]; - return res; -} - -Tensor Sum::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - return dEdf; -}; - -string Tanh::as_string(const vector& arg_names) const { - ostringstream s; - s << "tanh(" << arg_names[0] << ')'; - return s.str(); -} - -Tensor Tanh::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs.front(); - return Elewise::TanhForward(x); -} - -Tensor Tanh::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - const Tensor& x = *xs.front(); - return Elewise::TanhBackward(dEdf, fx, x); -} - -string Square::as_string(const vector& arg_names) const { - ostringstream s; - s << "square(" << arg_names[0] << ')'; - return s.str(); -} - -Tensor Square::forward(const vector& xs) const { - assert(xs.size() == 1); // just a single input - const Tensor& x = *xs.front(); - return x.cwiseProduct(x); -} - -Tensor Square::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - return dEdf.cwiseProduct(*xs.front()) * 2; -}; - -string Exp::as_string(const vector& arg_names) const { - ostringstream os; - os << "exp(" << arg_names[0] << ')'; - return os.str(); -} - -Tensor Exp::forward(const vector& xs) const { - assert(xs.size() == 1); - return Elewise::Exp(*xs.front()); -} - -Tensor Exp::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - return dEdf.array() * fx.array(); -} - -string Log::as_string(const vector& arg_names) const { - ostringstream os; - os << "log(" << arg_names[0] << ')'; - return os.str(); -} - -Tensor Log::forward(const vector& xs) const { - assert(xs.size() == 1); - return Elewise::Ln(*xs.front()); -} - -Tensor Log::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - return dEdf.array() / xs[0]->array(); -} - -string Concatenate::as_string(const vector& arg_names) const { - ostringstream os; - os << "concat(" << arg_names[0]; - for (unsigned i = 1; i < arg_names.size(); ++i) { - os << ',' << arg_names[i]; - } - os << ')'; - return os.str(); -} - -Tensor Concatenate::forward(const vector& xs) const { - assert(xs.size() > 0); - unsigned rows = 0; - for (auto x : xs) rows += x->rows(); - src_row_indices.resize(xs.size()); - Tensor fx(rows, 1); - unsigned i = 0; - unsigned k = 0; - for (auto x : xs) { - src_row_indices[k] = i; - ++k; - const Tensor& cx = *x; - assert(cx.cols() == 1); // this can be relaxed to the same everywhere - const unsigned crows = cx.rows(); - for (unsigned j = 0; j < crows; ++j) { - fx(i, 0) = cx(j, 0); - ++i; - } - } - return fx; -} - -Tensor Concatenate::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i < src_row_indices.size()); - Tensor dEdx = *xs[i]; - const unsigned rows = dEdx.rows(); - const unsigned begin = src_row_indices[i]; - assert(rows + begin <= dEdf.rows()); - for (unsigned i = 0; i < rows; ++i) - dEdx(i,0) = dEdf(i + begin, 0); - return dEdx; -} - -string ConcatenateColumns::as_string(const vector& arg_names) const { - ostringstream os; - os << "concat_cols(" << arg_names[0]; - for (unsigned i = 1; i < arg_names.size(); ++i) { - os << ',' << arg_names[i]; - } - os << ')'; - return os.str(); -} - -Tensor ConcatenateColumns::forward(const vector& xs) const { - assert(xs.size() > 0); - const unsigned rows = xs.front()->rows(); - Tensor fx(rows, xs.size()); - unsigned i = 0; - for (auto x : xs) { - assert(x->rows() == rows); - for (unsigned j = 0; j < rows; ++j) - fx(j, i) = (*x)(j, 0); - ++i; - } - return fx; -} - -Tensor ConcatenateColumns::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i < fx.cols()); - return dEdf.col(i); -} - -string Hinge::as_string(const vector& arg_names) const { - ostringstream os; - os << "hinge(" << arg_names[0] << ",m=" << margin << ")"; - return os.str(); -} - -Tensor Hinge::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs.front(); - const unsigned rows = x.rows(); - if (u.rows() != rows) - u = Tensor(rows, 1); // local forward value - real y = 0; - const real mlystar = margin - x(*pelement, 0); - for (unsigned i = 0; i < rows; ++i) - if (*pelement != i) - y += u(i, 0) = max(real(0), mlystar + x(i,0)); - Tensor res(1,1); - res(0,0) = y; - return res; -} - -Tensor Hinge::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - const Tensor& x = *xs.front(); - const unsigned rows = x.rows(); - Tensor dEdx = Zero(Dim(rows, 1)); - if (fx(0,0) == 0) return dEdx; - const real diff = dEdf(0,0); - unsigned tv = 0; - for (unsigned i = 0; i < rows; ++i) - if (*pelement != i && u(i, 0) > 0) { dEdx(i, 0) = diff; tv++; } - dEdx(*pelement, 0) = -diff * tv; - return dEdx; -} - -string Identity::as_string(const vector& arg_names) const { - return arg_names[0]; -} - -Tensor Identity::forward(const vector& xs) const { - assert(xs.size() == 1); - return *xs.front(); -} - -Tensor Identity::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - return dEdf; -} - -string MaxPooling1D::as_string(const vector& arg_names) const { - ostringstream os; - os << "maxpool1d(" << arg_names.front() << ",w=" << width << ")"; - return os.str(); -} - -Tensor MaxPooling1D::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs.front(); - const unsigned x_rows = x.rows(); - assert(x.cols() == 1); - const unsigned fx_rows = x_rows / width; - ind.resize(fx_rows); - Tensor fx = Zero(Dim(fx_rows, 1)); - for (unsigned i = 0; i < fx_rows; ++i) { - unsigned from = i * width; - unsigned to = from + width; - if (to > x_rows) to = x_rows; - real best = x(from, 0); - unsigned bestr = from; - for (unsigned r = from + 1; r < to; ++r) { - if (x(r, 0) > best) { - best = x(r,0); - bestr = r; - } - } - ind[i] = bestr; - fx(i, 0) = best; - } - return fx; -} - -Tensor MaxPooling1D::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - const Tensor& x = *xs.front(); - const unsigned x_rows = x.rows(); - Tensor dEdx = Zero(Dim(x_rows, 1)); - const unsigned fx_rows = x_rows / width; - assert(fx_rows == ind.size()); - assert(fx_rows == dEdf.rows()); - for (unsigned i = 0; i < fx_rows; ++i) - dEdx(ind[i], 0) = dEdf(i, 0); - return dEdx; -} - -string Softmax::as_string(const vector& arg_names) const { - ostringstream s; - s << "softmax(" << arg_names[0] << ')'; - return s.str(); -} - -Tensor Softmax::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs.front(); - return Convolution::SoftmaxForward(x, 1); -} - -Tensor Softmax::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - return Convolution::SoftmaxBackward(dEdf, fx, 1); -} - -string LogSoftmax::as_string(const vector& arg_names) const { - ostringstream s; - s << "log_softmax(" << arg_names[0] << ')'; - return s.str(); -} - -Tensor LogSoftmax::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs.front(); - return Convolution::SoftmaxForward(x, 1).array().log(); -} - -Tensor LogSoftmax::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - Tensor u = fx.array().exp(); - return Convolution::SoftmaxBackward(dEdf.cwiseQuotient(u), u, 1); -} - -string RestrictedLogSoftmax::as_string(const vector& arg_names) const { - ostringstream s; - s << "r_log_softmax(" << arg_names[0] << ')'; - return s.str(); -} - -inline real logsumexp(const Tensor& x, const vector& denom) { - real m = x(denom[0],0); - for (auto i : denom) { - real r = x(i,0); - if (r > m) m = r; - } - real z = 0; - for (auto i : denom) - z += expf(x(i,0) - m); - return m + logf(z); -} - -Tensor RestrictedLogSoftmax::forward(const vector& xs) const { - assert(xs.size() == 1); - assert(denom.size() > 0); - const Tensor& x = *xs.front(); - const unsigned rows = x.rows(); - assert(x.cols() == 1); - const real logz = logsumexp(x, denom); - Tensor fx(rows, 1); - for (unsigned i = 0; i < rows; ++i) - fx(i,0) = -numeric_limits::infinity(); - for (auto i : denom) - fx(i,0) = x(i,0) - logz; - if (denom.size() == 1) fx(denom.front(), 0) = 0; - return fx; -} - -Tensor RestrictedLogSoftmax::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - const Tensor& x = *xs.front(); - const unsigned rows = x.rows(); - Tensor dEdx = Zero(Dim(rows, 1)); - double z = 0; - for (auto i : denom) - z += dEdf(i, 0); - for (auto i : denom) - dEdx(i, 0) = dEdf(i, 0) - exp(fx(i, 0)) * z; - return dEdx; -} - -// x_1 is a vector -// y = (x_1)_{*pval} -string PickElement::as_string(const vector& arg_names) const { - ostringstream s; - s << "pick(" << arg_names[0] << ',' << *pval << ')'; - return s.str(); -} - -Tensor PickElement::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs.front(); - assert(x.cols() == 1); - assert(*pval < x.rows()); - Tensor fx(1,1); - fx(0,0) = x(*pval, 0); - return fx; -} - -// derivative is 0 in all dimensions except 1 for the selected element -Tensor PickElement::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - assert(dEdf.rows() == 1); - assert(dEdf.cols() == 1); - const Tensor& x = *xs.front(); - - // TODO should be sparse - Tensor dEdx = Zero(Dim(x.rows(), 1)); - dEdx(*pval,0) = dEdf(0,0); - return dEdx; -} - -// x_1 is a vector -// y = (x_1)[start:end] -string PickRange::as_string(const vector& arg_names) const { - ostringstream s; - s << "slice(" << arg_names[0] << ',' << start << ':' << end << ')'; - return s.str(); -} - -// slice of vector from index start (inclusive) to index end (exclusive) -Tensor PickRange::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs.front(); - assert(x.cols() == 1); - assert(start >= 0); - assert(end <= x.rows()); - assert(start < end); - Tensor fx = x.block(start, 0, end-start, 1); - assert(fx.rows() == end-start); - return fx; -} - -// derivative is 0 in all dimensions except the slice range -Tensor PickRange::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - assert(dEdf.rows() == end-start); - assert(dEdf.cols() == 1); - const Tensor& x = *xs.front(); - - // TODO should be sparse - Tensor dEdx = Tensor::Zero(x.rows(), 1); - dEdx.block(start, 0, end-start, 1) = dEdf; - return dEdx; -} - -string MatrixMultiply::as_string(const vector& arg_names) const { - ostringstream s; - s << arg_names[0] << " * " << arg_names[1]; - return s.str(); -} - -Tensor MatrixMultiply::forward(const vector& xs) const { - assert(xs.size() == 2); - return (*xs[0]) * (*xs[1]); -} - -Tensor MatrixMultiply::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i < 2); - if (i == 0) { - return dEdf * xs[1]->transpose(); - } else { - return xs[0]->transpose() * dEdf; - } -} - -string CwiseMultiply::as_string(const vector& arg_names) const { - ostringstream s; - s << arg_names[0] << " \\cdot " << arg_names[1]; - return s.str(); -} - -Tensor CwiseMultiply::forward(const vector& xs) const { - assert(xs.size() == 2); - return xs[0]->cwiseProduct(*xs[1]); -} - -Tensor CwiseMultiply::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i < 2); - if (i == 0) { - return dEdf.cwiseProduct(*xs[1]); - } else { - return dEdf.cwiseProduct(*xs[0]); - } -} - -string Multilinear::as_string(const vector& arg_names) const { - ostringstream s; - s << arg_names[0]; - for (unsigned i = 1; i < arg_names.size(); i += 2) - s << " + " << arg_names[i] << " * " << arg_names[i+1]; - return s.str(); -} - -Tensor Multilinear::forward(const vector& xs) const { - assert(xs.size() % 2 == 1); - Tensor fx = *xs.front(); - //cerr << "Multilinear\n"; - //for (unsigned i = 0; i < xs.size(); i++) - // cerr << " (" << xs[i]->rows() << "," << xs[i]->cols() << ")\n"; - for (unsigned i = 1; i < xs.size(); i += 2) { - if (xs[i]->cols() == 1 && xs[i+1]->cols() == 1) - fx += xs[i]->cwiseProduct(*xs[i + 1]); - else - fx += (*xs[i]) * (*xs[i + 1]); - } - return fx; -} - -Tensor Multilinear::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i < xs.size()); - if (i == 0) return dEdf; - if (i % 2 == 1) { // is a matrix - if (xs[i]->cols() == 1) // diagonal matrix - return dEdf.cwiseProduct(*xs[i+1]); - else - return dEdf * xs[i+1]->transpose(); - } - // is a vector - if (xs[i-1]->cols() == 1) // xs[i-1] is a diagonal matrix - return xs[i-1]->cwiseProduct(dEdf); - return xs[i-1]->transpose() * dEdf; -} - -string Negate::as_string(const vector& arg_names) const { - ostringstream s; - s << '-' << arg_names[0]; - return s.str(); -} - -Tensor Negate::forward(const vector& xs) const { - assert(xs.size() == 1); - return -(*xs[0]); -} - -Tensor Negate::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - return -dEdf; -} - -string Rectify::as_string(const vector& arg_names) const { - ostringstream s; - s << "ReLU(" << arg_names[0] << ')'; - return s.str(); -} - -Tensor Rectify::forward(const vector& xs) const { - assert(xs.size() == 1); - return Elewise::ReluForward(*xs.front()); -} - -Tensor Rectify::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - return Elewise::ReluBackward(dEdf, fx, *xs.front()); -} - -string SquaredEuclideanDistance::as_string(const vector& arg_names) const { - ostringstream s; - s << "|| " << arg_names[0] << " - " << arg_names[1] << " ||^2"; - return s.str(); -} - -Tensor SquaredEuclideanDistance::forward(const vector& xs) const { - assert(xs.size() == 2); - Tensor res(1,1); - res(0,0) = (*xs[0] - *xs[1]).squaredNorm(); - return res; -} - -Tensor SquaredEuclideanDistance::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i < 2); - real scale = dEdf(0,0) * 2; - if (i == 1) scale = -scale; - return scale * (*xs[0] - *xs[1]); -} - -string LogisticSigmoid::as_string(const vector& arg_names) const { - ostringstream s; - s << "\\sigma(" << arg_names[0] << ')'; - return s.str(); -} - -Tensor LogisticSigmoid::forward(const vector& xs) const { - assert(xs.size() == 1); - const Tensor& x = *xs.front(); - return Elewise::SigmoidForward(x); -} - -Tensor LogisticSigmoid::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - assert(i == 0); - const Tensor& x = *xs.front(); - return Elewise::SigmoidBackward(dEdf, fx, x); -} - -// you could do this with LogisticSigmoid, Softmax or a variety of other -// functions, but this is often useful. -// x_1 must be a scalar that is a value between 0 and 1 -// target_y is a value between 0 and 1 -// y = ty * log(x_1) + (1 - ty) * log(x_1) -string BinaryLogLoss::as_string(const vector& arg_names) const { - ostringstream os; - os << "binary_log_loss(" << arg_names[0] << ", " << *ptarget_y << ')'; - return os.str(); -} - -Tensor BinaryLogLoss::forward(const vector& xs) const { - assert(xs.size() == 1); - assert(xs.front()->cols() == 1); - assert(xs.front()->rows() == 1); - const real y_pred = (*xs.front())(0,0); - assert(y_pred >= 0.); - assert(y_pred <= 1.); - const real ty = *ptarget_y; - assert(ty >= 0.); - assert(ty <= 1.); - Tensor fx = *xs.front(); - real& res = fx(0,0); - res = 0; - if (ty > 0.) res -= ty * log(y_pred); - if ((1 - ty) > 0.) res -= (1 - ty) * log1p(-y_pred); - return fx; -} - -Tensor BinaryLogLoss::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - const real y_pred = (*xs.front())(0,0); - const real ty = *ptarget_y; - real scale = 0; - if (ty > 0.) scale -= ty / y_pred; - if ((1 - ty) >= 0.) scale += (1 - ty) / (1 - y_pred); - return dEdf * scale; -} - -} // namespace cnn diff --git a/cnn/edges.h b/cnn/edges.h deleted file mode 100644 index ec478cf74..000000000 --- a/cnn/edges.h +++ /dev/null @@ -1,320 +0,0 @@ -#ifndef CNN_EDGES_H_ -#define CNN_EDGES_H_ - -#include "cnn/cnn.h" - -namespace cnn { - -// n_{i,j} ~ N(0,stddev) -// y = x + n -struct GaussianNoise : public Edge { - explicit GaussianNoise(real stddev) : stddev(stddev) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - real stddev; -}; - -// y = dropout(x,p) where p specifies the dropout probability -struct Dropout : public Edge { - explicit Dropout(real p) : p(p) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - mutable Tensor noise_mask; - real p; -}; - -// y = 1 - x_1 -struct OneMinusX : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = tanh x_1 -struct Tanh : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = x_1 \odot x_1 -struct Square : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = exp x_1 -struct Exp : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = log x_1 (base e, i.e., natural log) -struct Log : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// concatenate rows -struct Concatenate : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - // src_row_indices[i] says what row in fx the ith x vector was assigned to - // used to simplify backprop - mutable std::vector src_row_indices; -}; - -// concatenate column vectors into a matrix -// x_i must be a column vector in R^n -struct ConcatenateColumns : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// Let x be a vector-valued input, x_i represents the score of the ith element, then -// y = \sum{i != element} max{0, margin - x_element + x_i} -struct Hinge : public Edge { - explicit Hinge(unsigned e, real m = 1.0) : element(e), pelement(&element), margin(m) {} - explicit Hinge(unsigned* pe, real m = 1.0) : element(), pelement(pe), margin(m) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - unsigned element; - const unsigned* pelement; - real margin; - mutable Tensor u; // partial forward values -}; - -// y = x_1 -struct Identity : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// hyperparameter: width > 1 -// x_1 is a vector in R^n, which we write x -// y is a vector in R^{n / width} -// y_i = max_{x_{i * width - width + 1}, ..., x_{i * width}} -struct MaxPooling1D : public Edge { - MaxPooling1D(unsigned w) : width(w) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - unsigned width; - mutable std::vector ind; -}; - -// y = x_1 * x_2 -struct MatrixMultiply : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = x_1 \cdot x_2 (Hadamard product) -struct CwiseMultiply : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = x_1 \sum_{i=2, 4 ...} A_i * x_{i+1} -// NOTE: if A_i is a vector then * computes the component-wise product -// this is an ugly hack to deal with diagonal matrices -struct Multilinear : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = -x_1 -struct Negate : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = max(0,x) -struct Rectify : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// you could do this with LogisticSigmoid, Softmax or a variety of other -// functions, but this is often useful. -// x_1 must be a scalar that is a value between 0 and 1 -// target_y is a value between 0 and 1 -// y = ty * log(x_1) + (1 - ty) * log(x_1) -struct BinaryLogLoss : public Edge { - BinaryLogLoss(real ty) : target_y(ty), ptarget_y(&target_y) {} - BinaryLogLoss(real* pty) : target_y(), ptarget_y(pty) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - real target_y; - real* ptarget_y; -}; - -// y = \sum_i x_i -struct Sum : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = || x_1 - x_2 ||^2 -struct SquaredEuclideanDistance : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// y = \sigma(x_1) -struct LogisticSigmoid : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// z = \sum_j \exp (x_i)_j -// y_i = (x_1)_i / z -struct Softmax : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// z = \sum_j \exp (x_i)_j -// y_i = (x_1)_i - \log z -struct LogSoftmax : public Edge { - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; -}; - -// z = \sum_{j \in denom} \exp (x_i)_j -// y_i = (x_1)_i - \log z -struct RestrictedLogSoftmax : public Edge { - explicit RestrictedLogSoftmax(const std::vector& d) : denom(d) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - std::vector denom; -}; - -// x_1 is a vector -// y = (x_1)_{*pval} -// this is used to implement cross-entropy training -struct PickElement : public Edge { - explicit PickElement(unsigned v) : val(v), pval(&val) {} - // use this constructor if you want to change the value after the graph is constructed - explicit PickElement(const unsigned* pv) : val(), pval(pv) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - unsigned val; - const unsigned* pval; -}; - -// x_1 is a vector -// y = x_1[start:end] -// (start inclusive, end exclusive) -struct PickRange : public Edge { - explicit PickRange(unsigned start, unsigned end) : start(start), end(end) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - unsigned start; - unsigned end; -}; - -} // namespace cnn - -#endif diff --git a/cnn/lstm-fast.cc b/cnn/lstm-fast.cc deleted file mode 100644 index e4e100d9e..000000000 --- a/cnn/lstm-fast.cc +++ /dev/null @@ -1,159 +0,0 @@ -#include "cnn/lstm-fast.h" - -#include -#include -#include -#include - -#include "cnn/training.h" - -using namespace std; - -namespace cnn { - -enum { X2I, H2I, C2I, BI, X2O, H2O, C2O, BO, X2C, H2C, BC }; - -LSTMBuilder_CIFG::LSTMBuilder_CIFG(unsigned layers, - unsigned input_dim, - unsigned hidden_dim, - Model* model) : hidden_dim(hidden_dim), layers(layers), zeros(hidden_dim, 0) { - builder_state = 0; // created - - unsigned layer_input_dim = input_dim; - for (unsigned i = 0; i < layers; ++i) { - // i - Parameters* p_x2i = model->add_parameters(Dim({hidden_dim, layer_input_dim})); - Parameters* p_h2i = model->add_parameters(Dim({hidden_dim, hidden_dim})); - Parameters* p_c2i = model->add_parameters(Dim({hidden_dim, hidden_dim})); - Parameters* p_bi = model->add_parameters(Dim({hidden_dim})); - - // o - Parameters* p_x2o = model->add_parameters(Dim({hidden_dim, layer_input_dim})); - Parameters* p_h2o = model->add_parameters(Dim({hidden_dim, hidden_dim})); - Parameters* p_c2o = model->add_parameters(Dim({hidden_dim, hidden_dim})); - Parameters* p_bo = model->add_parameters(Dim({hidden_dim})); - - // c - Parameters* p_x2c = model->add_parameters(Dim({hidden_dim, layer_input_dim})); - Parameters* p_h2c = model->add_parameters(Dim({hidden_dim, hidden_dim})); - Parameters* p_bc = model->add_parameters(Dim({hidden_dim})); - layer_input_dim = hidden_dim; // output (hidden) from 1st layer is input to next - - vector ps = {p_x2i, p_h2i, p_c2i, p_bi, p_x2o, p_h2o, p_c2o, p_bo, p_x2c, p_h2c, p_bc}; - params.push_back(ps); - } // layers -} - -void LSTMBuilder_CIFG::new_graph() { - param_vars.clear(); - h.clear(); - h0.clear(); - c0.clear(); - builder_state = 1; -} - -void LSTMBuilder_CIFG::add_parameter_edges(Hypergraph* hg) { - if (builder_state != 1) { - cerr << "Invalid state: " << builder_state << endl; - abort(); - } - builder_state = 2; - - param_vars.clear(); - h.clear(); - c.clear(); - - for (unsigned i = 0; i < layers; ++i) { - string layer = to_string(i); - auto& p = params[i]; - - // i - VariableIndex i_x2i = hg->add_parameter(p[X2I]); - VariableIndex i_h2i = hg->add_parameter(p[H2I]); - VariableIndex i_c2i = hg->add_parameter(p[C2I]); - VariableIndex i_bi = hg->add_parameter(p[BI]); - - // o - VariableIndex i_x2o = hg->add_parameter(p[X2O]); - VariableIndex i_h2o = hg->add_parameter(p[H2O]); - VariableIndex i_c2o = hg->add_parameter(p[C2O]); - VariableIndex i_bo = hg->add_parameter(p[BO]); - - // c - VariableIndex i_x2c = hg->add_parameter(p[X2C]); - VariableIndex i_h2c = hg->add_parameter(p[H2C]); - VariableIndex i_bc = hg->add_parameter(p[BC]); - - vector vars = {i_x2i, i_h2i, i_c2i, i_bi, i_x2o, i_h2o, i_c2o, i_bo, i_x2c, i_h2c, i_bc}; - param_vars.push_back(vars); - } -} - -void LSTMBuilder_CIFG::start_new_sequence(Hypergraph* hg, - vector c_0, - vector h_0){ - if (builder_state < 2) { - cerr << "Invalid state: " << builder_state << endl; - abort(); - } - builder_state = 3; - - h.clear(); - c.clear(); - h0 = h_0; - c0 = c_0; - if (h0.empty() || c0.empty()) { - VariableIndex zero_input = hg->add_input(Dim({hidden_dim}), &zeros); - if (c0.empty()) { c0 = vector(layers, zero_input); } - if (h0.empty()) { h0 = vector(layers, zero_input); } - } - assert (h0.size() == layers); - assert (c0.size() == layers); -} - -VariableIndex LSTMBuilder_CIFG::add_input(VariableIndex x, Hypergraph* hg) { - if (builder_state != 3) { - cerr << "Invalid state: " << builder_state << endl; - abort(); - } - const unsigned t = h.size(); - h.push_back(vector(layers)); - c.push_back(vector(layers)); - vector& ht = h.back(); - vector& ct = c.back(); - VariableIndex in = x; - for (unsigned i = 0; i < layers; ++i) { - const vector& vars = param_vars[i]; - VariableIndex i_h_tm1; - VariableIndex i_c_tm1; - if (t == 0) { - // intial value for h and c at timestep 0 in layer i - // defaults to zero matrix input if not set in add_parameter_edges - i_h_tm1 = h0[i]; - i_c_tm1 = c0[i]; - } else { // t > 0 - i_h_tm1 = h[t-1][i]; - i_c_tm1 = c[t-1][i]; - } - // input - VariableIndex i_ait = hg->add_function({vars[BI], vars[X2I], in, vars[H2I], i_h_tm1, vars[C2I], i_c_tm1}); - VariableIndex i_it = hg->add_function({i_ait}); - // forget - VariableIndex i_ft = hg->add_function({i_it}); - // write memory cell - VariableIndex i_awt = hg->add_function({vars[BC], vars[X2C], in, vars[H2C], i_h_tm1}); - VariableIndex i_wt = hg->add_function({i_awt}); - // output - VariableIndex i_nwt = hg->add_function({i_it, i_wt}); - VariableIndex i_crt = hg->add_function({i_ft, i_c_tm1}); - ct[i] = hg->add_function({i_crt, i_nwt}); // new memory cell at time t - - VariableIndex i_aot = hg->add_function({vars[BO], vars[X2O], in, vars[H2O], i_h_tm1, vars[C2O], ct[i]}); - VariableIndex i_ot = hg->add_function({i_aot}); - VariableIndex ph_t = hg->add_function({ct[i]}); - in = ht[i] = hg->add_function({i_ot, ph_t}); - } - return ht.back(); -} - -} // namespace cnn diff --git a/cnn/lstm-fast.h b/cnn/lstm-fast.h deleted file mode 100644 index 4617eb71a..000000000 --- a/cnn/lstm-fast.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef CNN_LSTM_FAST_H_ -#define CNN_LSTM_FAST_H_ - -#include "cnn/cnn.h" -#include "cnn/edges.h" - -namespace cnn { - -class Model; - -struct LSTMBuilder_CIFG { - LSTMBuilder_CIFG() {} - explicit LSTMBuilder_CIFG(unsigned layers, - unsigned input_dim, - unsigned hidden_dim, - Model* model); - - // call this to reset the builder when you are going to create - // a new computation graph - void new_graph(); - - // call this before start_new_sequence - void add_parameter_edges(Hypergraph* hg); - - // Start new sequence in given Hypergraph with initial c0 and h0 - // call after add_parameter edges but before add input, - // as well as whenever a new sequence is to be added to the graph - void start_new_sequence(Hypergraph* hg, - std::vector c_0={}, - std::vector h_0={}); - - // add another timestep by reading in the variable x - // return the hidden representation of the deepest layer - VariableIndex add_input(VariableIndex x, Hypergraph* hg); - - // rewind the last timestep - this DOES NOT remove the variables - // from the computation graph, it just means the next time step will - // see a different previous state. You can remind as many times as - // you want. - void rewind_one_step() { - h.pop_back(); - c.pop_back(); - } - - // returns node index (variable) of most recent output - VariableIndex back() const { return h.back().back(); } - - // check to make sure parameters have been added before adding input - unsigned builder_state; - - // first index is layer, then ... - std::vector> params; - - // first index is layer, then ... - std::vector> param_vars; - - // first index is time, second is layer - std::vector> h, c; - - // initial values of h and c at each layer - // - both default to zero matrix input - std::vector h0; - std::vector c0; - - unsigned hidden_dim; - unsigned layers; - std::vector zeros; -}; - -} // namespace cnn - -#endif diff --git a/cnn/model.cc b/cnn/model.cc deleted file mode 100644 index cef223e70..000000000 --- a/cnn/model.cc +++ /dev/null @@ -1,96 +0,0 @@ -#include "cnn/model.h" -#include "cnn/tensor.h" - -#include - -using namespace std; - -namespace cnn { - -ParametersBase::~ParametersBase() {} - -size_t Parameters::size() const { return cnn::size(values).Prod(); } - -void Parameters::rescale_gradient(real scale) { g *= scale; } - -real Parameters::g_squared_l2norm() const { -#if MINERVA_BACKEND - Tensor r = g.Reshape({g.Size().Prod()}); - Tensor sq = Elewise::Mult(r, r); - return sq.Sum(0).Get().get()[0]; -#else - return g.squaredNorm(); -#endif -} - -void Parameters::accumulate_grad(const Tensor& d) { g += d; } - -void Parameters::clear() { -#if MINERVA_BACKEND - g = NArray::Zeros(g.Size()); -#else - g.setZero(); -#endif -} - -size_t LookupParameters::size() const { - return values.size() * dim.Prod(); -} - -real LookupParameters::g_squared_l2norm() const { - real a = 0; -#if MINERVA_BACKEND - cerr << "No impl yet\n"; abort(); -#else - for (auto& it : this->g) - a += it.second.squaredNorm(); -#endif - return a; -} - -void LookupParameters::accumulate_grad(unsigned index, const Tensor& d) { -#if MINERVA_BACKEND - cerr << "No impl yet\n"; abort(); -#else - auto it = this->g.find(index); - if (it == this->g.end()) { - g[index] = d; - } else { - it->second += d; - } -#endif -} - -void LookupParameters::rescale_gradient(real scale) { - for (auto& it : this->g) - it.second *= scale; -} - -void LookupParameters::clear() { g.clear(); } - -Model::~Model() { - for (auto p : all_params) delete p; -} - -Parameters* Model::add_parameters(const Dim& d) { - Parameters* p = new Parameters(d); - all_params.push_back(p); - params.push_back(p); - return p; -} - -Parameters* Model::add_parameters(const Tensor& m) { // initial value is m - Parameters* p = new Parameters(m); - all_params.push_back(p); - params.push_back(p); - return p; -} - -LookupParameters* Model::add_lookup_parameters(unsigned n, const Dim& d) { - LookupParameters* p = new LookupParameters(n,d); - all_params.push_back(p); - lookup_params.push_back(p); - return p; -} - -} // namespace cnn diff --git a/cnn/model.h b/cnn/model.h deleted file mode 100644 index 5120078d4..000000000 --- a/cnn/model.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef CNN_PARAMS_H_ -#define CNN_PARAMS_H_ - -#include -#include - -#include -#include - -#include "cnn/tensor.h" - -namespace cnn { - -// to deal with sparse updates, there are two parameter classes: -// * Parameters represents a vector, matrix, (eventually higher order tensors) -// of parameters. These are densely updated. -// * LookupParameters represents a table of vectors that are used to embed a -// set of discrete objects. These are sparsely updated. - -struct ParametersBase { - friend class Model; - virtual void rescale_gradient(real scale) = 0; - virtual real g_squared_l2norm() const = 0; - virtual size_t size() const = 0; - virtual ~ParametersBase(); -}; - -// represents parameters (e.g., a weight matrix) -struct Parameters : public ParametersBase { - friend class Model; - void rescale_gradient(real scale) override; - real g_squared_l2norm() const override; - size_t size() const override; - - real& operator()(int i, int j) { return values(i,j); } - const real& operator()(int i, int j) const { return values(i,j); } - - void accumulate_grad(const Tensor& g); - void clear(); - - Dim dim; - Tensor values; - Tensor g; - private: - Parameters() {} - explicit Parameters(const Dim& d) : dim(d), values(Random(d)), g(Zero(d)) {} - explicit Parameters(const Tensor& v) : dim(v.rows(), v.cols()), values(v), g(Zero(dim)) {} - friend class boost::serialization::access; - template void serialize(Archive& ar, const unsigned int) { - ar & dim; - ar & values; - } -}; - -// represents a matrix/vector embedding of a discrete set -struct LookupParameters : public ParametersBase { - friend class Model; - void rescale_gradient(real scale) override; - real g_squared_l2norm() const override; - size_t size() const override; - - Tensor& operator[](unsigned i) { return values[i]; } - const Tensor& operator[](unsigned i) const { return values[i]; } - - void accumulate_grad(unsigned index, const Tensor& g); - void clear(); - - Dim dim; - std::vector values; - std::unordered_map g; - private: - LookupParameters() {} - LookupParameters(unsigned n, const Dim& d) : dim(d), values(n) { - for (auto& v : values) v = Random(d); - } - friend class boost::serialization::access; - template void serialize(Archive& ar, const unsigned int) { - ar & dim; - ar & values; - } -}; - -// this is a collection of parameters -// if you need a matrix of parameters, or a lookup table - ask an instance of this class -// this knows how to serialize itself -// parameters know how to track their gradients, but any extra information (like velocity) will live here -class Model { - public: - ~Model(); - Parameters* add_parameters(const Dim& d); // initialized randomly - Parameters* add_parameters(const Tensor& m); // initial value is m - LookupParameters* add_lookup_parameters(unsigned n, const Dim& d); - - const std::vector& all_parameters_list() const { return all_params; } - const std::vector& parameters_list() const { return params; } - const std::vector& lookup_parameters_list() const { return lookup_params; } - - private: - friend class boost::serialization::access; - template - void save(Archive& ar, const unsigned int) const { - int np = params.size(); - int nlp = lookup_params.size(); - ar & np; - ar & nlp; - for (unsigned i = 0; i < params.size(); ++i) - ar & *params[i]; - for (unsigned i = 0; i < lookup_params.size(); ++i) - ar & *lookup_params[i]; - } - template - void load(Archive& ar, const unsigned int) { - int np, nlp; - ar & np; - ar & nlp; - assert(np == (int)params.size()); - assert(nlp == (int)lookup_params.size()); - for (unsigned i = 0; i < params.size(); ++i) - ar & *params[i]; - for (unsigned i = 0; i < lookup_params.size(); ++i) - ar & *lookup_params[i]; - all_params.clear(); - for (auto p : params) all_params.push_back(p); - for (auto p : lookup_params) all_params.push_back(p); - } - BOOST_SERIALIZATION_SPLIT_MEMBER() - - std::vector all_params; - std::vector params; - std::vector lookup_params; -}; - -} // namespace cnn - -#endif diff --git a/cnn/param-edges.cc b/cnn/param-edges.cc deleted file mode 100644 index a604ebebe..000000000 --- a/cnn/param-edges.cc +++ /dev/null @@ -1,102 +0,0 @@ -#include "cnn/param-edges.h" -#include "cnn/tensor.h" - -#include - -using namespace std; - -namespace cnn { - -bool ParameterEdge::has_parameters() const { return true; } - -string ParameterEdge::as_string(const vector& arg_names) const { - ostringstream s; - s << "params(" << dim << ')'; - return s.str(); -} - -Tensor ParameterEdge::forward(const vector& xs) const { - assert(xs.size() == 0); - return params->values; -} - -Tensor ParameterEdge::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - cerr << "called backward() on arity 0 edge\n"; - abort(); -} - -void ParameterEdge::accumulate_grad(const Tensor& g) { - params->accumulate_grad(g); -} - -string InputEdge::as_string(const vector& arg_names) const { - ostringstream s; - s << "inputs(" << dim << ')'; - return s.str(); -} - -Tensor InputEdge::forward(const vector& xs) const { - assert(xs.size() == 0); - assert(dim.size() == pdata->size()); - return FromRawData(dim, &pdata->front()); -} - -Tensor InputEdge::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - cerr << "called backward() on arity 0 edge\n"; - abort(); -} - -string ScalarInputEdge::as_string(const vector& arg_names) const { - ostringstream s; - s << "scalar_inputs(" << data << ')'; - return s.str(); -} - -Tensor ScalarInputEdge::forward(const vector& xs) const { - assert(xs.size() == 0); - return FromRawData(Dim({1}), pdata); -} - -Tensor ScalarInputEdge::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - cerr << "called backward() on arity 0 edge\n"; - abort(); -} - -string LookupEdge::as_string(const vector& arg_names) const { - ostringstream s; - s << "lookup[|x|=" << params->values.size() << " --> " << dim << ']'; - return s.str(); -} - -Tensor LookupEdge::forward(const vector& xs) const { - assert(xs.size() == 0); - return params->values[*pindex]; -} - -Tensor LookupEdge::backward(const vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const { - cerr << "called backward() on arity 0 edge\n"; - abort(); -} - -bool LookupEdge::has_parameters() const { - return has_optimizable_parameters; -} - -void LookupEdge::accumulate_grad(const Tensor& g) { - assert(has_optimizable_parameters); - params->accumulate_grad(*pindex, g); -} - -} // namespace cnn diff --git a/cnn/param-edges.h b/cnn/param-edges.h deleted file mode 100644 index f596b928e..000000000 --- a/cnn/param-edges.h +++ /dev/null @@ -1,76 +0,0 @@ -#ifndef CNN_PARAM_EDGES_H_ -#define CNN_PARAM_EDGES_H_ - -#include "cnn/cnn.h" -#include "cnn/model.h" - -namespace cnn { - -struct ParameterEdgeBase : public Edge { - virtual void accumulate_grad(const Tensor& g) = 0; -}; - -// represents optimizable parameters -struct ParameterEdge : public ParameterEdgeBase { - explicit ParameterEdge(Parameters* p) : dim(p->dim), params(p) {} - bool has_parameters() const override; - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - void accumulate_grad(const Tensor& g) override; - Dim dim; - Parameters* params; -}; - -// represents specified (not learned) inputs to the network -struct InputEdge : public Edge { - explicit InputEdge(const Dim& d, const std::vector* pd) : dim(d), pdata(pd) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - Dim dim; - const std::vector* pdata; -}; - -// represents specified (not learned) scalar inputs to the network -struct ScalarInputEdge : public Edge { - explicit ScalarInputEdge(real s) : data(s), pdata(&data) {} - explicit ScalarInputEdge(const real* ps) : data(), pdata(ps) {} - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - const cnn::real data; - const cnn::real* pdata; -}; - -// represents a matrix/vector embedding of an item of a discrete set (1-hot coding) -struct LookupEdge : public ParameterEdgeBase { - LookupEdge(LookupParameters* p, unsigned ind) : dim(p->dim), index(ind), pindex(&index), params(p), has_optimizable_parameters(true) {} - LookupEdge(LookupParameters* p, const unsigned* pind) : dim(p->dim), index(), pindex(pind), params(p), has_optimizable_parameters(true) {} - bool has_parameters() const override; - std::string as_string(const std::vector& arg_names) const override; - Tensor forward(const std::vector& xs) const override; - Tensor backward(const std::vector& xs, - const Tensor& fx, - const Tensor& dEdf, - unsigned i) const override; - void accumulate_grad(const Tensor& g) override; - Dim dim; - unsigned index; - const unsigned* pindex; - LookupParameters* params; - bool has_optimizable_parameters; -}; - -} // namespace cnn - -#endif diff --git a/cnn/rnn.cc b/cnn/rnn.cc deleted file mode 100644 index 0409c8276..000000000 --- a/cnn/rnn.cc +++ /dev/null @@ -1,102 +0,0 @@ -#include "cnn/rnn.h" - -#include -#include -#include -#include - -#include "cnn/training.h" - -using namespace std; - -namespace cnn { - -RNNBuilder::RNNBuilder(unsigned layers, - unsigned input_dim, - unsigned hidden_dim, - Model* model) : hidden_dim(hidden_dim), layers(layers), zeros(hidden_dim, 0) { - builder_state = 0; // created - assert(layers < 10); - - unsigned layer_input_dim = input_dim; - for (unsigned i = 0; i < layers; ++i) { - Parameters* p_x2h = model->add_parameters(Dim({hidden_dim, layer_input_dim})); - Parameters* p_h2h = model->add_parameters(Dim({hidden_dim, hidden_dim})); - Parameters* p_hb = model->add_parameters(Dim({hidden_dim})); - vector ps = {p_x2h, p_h2h, p_hb}; - params.push_back(ps); - layer_input_dim = hidden_dim; - } -} - -void RNNBuilder::new_graph() { - param_vars.clear(); - h.clear(); - h0.clear(); - builder_state = 1; -} - -void RNNBuilder::add_parameter_edges(Hypergraph* hg) { - if (builder_state != 1) { - cerr << "Invalid state: " << builder_state << endl; - abort(); - } - builder_state = 2; - - for (unsigned i = 0; i < layers; ++i) { - Parameters* p_x2h = params[i][0]; - Parameters* p_h2h = params[i][1]; - Parameters* p_hb = params[i][2]; - const string ts = to_string(i); - VariableIndex i_x2h = hg->add_parameter(p_x2h); - VariableIndex i_h2h = hg->add_parameter(p_h2h); - VariableIndex i_hb = hg->add_parameter(p_hb); - vector vars = {i_x2h, i_h2h, i_hb}; - param_vars.push_back(vars); - } -} - -void RNNBuilder::start_new_sequence(Hypergraph* hg, vector h_0) { - if (builder_state < 2) { - cerr << "Invalid state: " << builder_state << endl; - abort(); - } - builder_state = 3; - - h.clear(); - h0 = h_0; - if (h0.empty()) { - VariableIndex zero_input = hg->add_input(Dim({hidden_dim}), &zeros); - h0 = vector(layers, zero_input); - } - assert (h0.size() == layers); -} - -VariableIndex RNNBuilder::add_input(VariableIndex x, Hypergraph* hg) { - if (builder_state != 3) { - cerr << "Invalid state: " << builder_state << endl; - abort(); - } - const unsigned t = h.size(); - string ts = to_string(t); - h.push_back(vector(layers)); - vector& ht = h.back(); - VariableIndex in = x; - for (unsigned i = 0; i < layers; ++i) { - const vector& vars = param_vars[i]; - VariableIndex i_h_tm1; - if (t == 0) { // first time step - // initial value of h for layer i at timestep 0 - // defaults to zero matrix if not set in add_parameter_edges - i_h_tm1 = h0[i]; - } else { // tth time step - i_h_tm1 = h[t-1][i]; - } - // h3 = hbias + h2h * h_{t-1} + x2h * in - VariableIndex i_h3 = hg->add_function({vars[2], vars[0], in, vars[1], i_h_tm1}); - in = ht[i] = hg->add_function({i_h3}); - } - return ht.back(); -} - -} // namespace cnn diff --git a/cnn/rnn.h b/cnn/rnn.h deleted file mode 100644 index 5394fa84b..000000000 --- a/cnn/rnn.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef CNN_RNN_H_ -#define CNN_RNN_H_ - -#include "cnn/cnn.h" -#include "cnn/edges.h" - -namespace cnn { - -class Model; - -struct RNNBuilder { - RNNBuilder() {} - explicit RNNBuilder(unsigned layers, - unsigned input_dim, - unsigned hidden_dim, - Model* model); - - // call this to reset the builder when you are going to create - // a new computation graph - void new_graph(); - - // call this before add_input - void add_parameter_edges(Hypergraph* hg); - - // Reset for new sequence on hypergraph hg with shared parameters - // call this before add_input and after add_parameter_edges, or - // when starting a new sequence on the same hypergraph. - // h_0 is used to initialize hidden layers at timestep 0 to given values - void start_new_sequence(Hypergraph* hg, std::vector h_0={}); - - // add another timestep by reading in the variable x - // return the hidden representation of the deepest layer - VariableIndex add_input(VariableIndex x, Hypergraph* hg); - - // rewind the last timestep - this DOES NOT remove the variables - // from the computation graph, it just means the next time step will - // see a different previous state. You can remind as many times as - // you want. - void rewind_one_step() { - h.pop_back(); - } - - // returns node (index) of most recent output - VariableIndex back() const { return h.back().back(); } - - // check to make sure parameters have been added before adding input - unsigned builder_state; - - // first index is layer, then x2h h2h hb - std::vector> params; - - // first index is layer, then x2h h2h hb - std::vector> param_vars; - - // first index is time, second is layer - std::vector> h; - - // initial value of h - // defaults to zero matrix input - std::vector h0; - - Hypergraph* hg; - - unsigned hidden_dim; - unsigned layers; - std::vector zeros; -}; - -} // namespace cnn - -#endif diff --git a/cnn/saxe_init.cc b/cnn/saxe_init.cc deleted file mode 100644 index deeb34f6e..000000000 --- a/cnn/saxe_init.cc +++ /dev/null @@ -1,19 +0,0 @@ -#include "saxe_init.h" - -#include -#include - -#include - -using namespace std; - -namespace cnn { - -Tensor OrthonormalRandom(int dim, real g) { - Eigen::MatrixXf m = RandomNormal(Dim({dim, dim}), 0.0, 0.01); - Eigen::JacobiSVD svd(m, Eigen::ComputeFullU); - return FromEigenMatrix(svd.matrixU()); -} - -} - diff --git a/cnn/saxe_init.h b/cnn/saxe_init.h deleted file mode 100644 index 9cccebd27..000000000 --- a/cnn/saxe_init.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef SAXE_INIT_H_ -#define SAXE_INIT_H_ - -#include "cnn/tensor.h" - -namespace cnn { - -// returns a dim x dim matrix -Tensor OrthonormalRandom(int dim, real g); - -} - -#endif diff --git a/cnn/tensor.h b/cnn/tensor.h deleted file mode 100644 index f6a9c56d3..000000000 --- a/cnn/tensor.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef CNN_TENSOR_H_ -#define CNN_TENSOR_H_ - -#include "config.h" -#ifdef HAVE_MINERVA_H -//# include "backends/minerva/tensor-minerva.h" -# include "backends/eigen/tensor-eigen.h" -#else -# include "backends/eigen/tensor-eigen.h" -#endif - -#endif diff --git a/cnn/tests/test_edges.cc b/cnn/tests/test_edges.cc deleted file mode 100644 index de9754466..000000000 --- a/cnn/tests/test_edges.cc +++ /dev/null @@ -1,227 +0,0 @@ -#define BOOST_TEST_DYN_LINK -#define BOOST_TEST_MODULE "CNNEdges" -#include - -#include - -#include "cnn/tests/test_utils.h" -#include "cnn/tensor.h" -#include "cnn/edges.h" - -using namespace std; -using namespace cnn; - -BOOST_GLOBAL_FIXTURE(TestTensorSetup) - -BOOST_AUTO_TEST_CASE(ESqrL2) -{ - Tensor U = Ccm({2,1}, {4,5}); - Tensor V = Ccm({2,1}, {1,1}); - cerr << U << endl; - SquaredEuclideanDistance e; - vector xs = {&U, &V}; - Tensor W = e.forward(xs); - cerr << "Norm^2:" << W << endl; - double eps = 1e-5; - BOOST_CHECK_CLOSE(t(W,0,0),25., eps); - Tensor dEdf = Ccm({1,1}, {1}); - Tensor d1 = e.backward(xs, W, dEdf, 0); - Tensor d2 = e.backward(xs, W, dEdf, 1); - cerr << d1 << endl; - cerr << d2 << endl; - BOOST_CHECK_CLOSE(t(d1,0,0), 6., eps); - BOOST_CHECK_CLOSE(t(d1,1,0), 8., eps); - BOOST_CHECK_CLOSE(t(d2,0,0), -6., eps); - BOOST_CHECK_CLOSE(t(d2,1,0), -8., eps); -} - -BOOST_AUTO_TEST_CASE(EMatrixMultiply) { - Tensor U = Ccm({2,3}, {1,2,3,4,5,6}); - Tensor V = Ccm({3,2}, {7,8,9,10,11,12}); - MatrixMultiply mm; - vector xs = {&U, &V}; - Tensor W = mm.forward(xs); - BOOST_REQUIRE_EQUAL(Dim({2,2}),size(W)); - double eps = 1e-5; - BOOST_CHECK_CLOSE(t(W,0,0), 76., eps); - BOOST_CHECK_CLOSE(t(W,1,0), 100., eps); - BOOST_CHECK_CLOSE(t(W,0,1), 103., eps); - BOOST_CHECK_CLOSE(t(W,1,1), 136., eps); - cerr << U << endl; - cerr << V << endl; - cerr << W << endl; -} - -BOOST_AUTO_TEST_CASE(EColumnConcat) -{ - Tensor u1 = Ccm({2,1}, {1, 2}); - Tensor u2 = Ccm({2,1}, {3, 4}); - Tensor u3 = Ccm({2,1}, {5, 6}); - cerr << u1 << endl; - cerr << u2 << endl; - cerr << u3 << endl; - vector xs = {&u1, &u2, &u3}; - ConcatenateColumns cc; - Tensor U = cc.forward(xs); - cerr << U << endl; - Tensor V = Ccm({3,2}, {7,8,9,10,11,12}); - MatrixMultiply mm; - vector xs2 = {&U, &V}; - Tensor W = mm.forward(xs2); - cerr << W << endl; - BOOST_REQUIRE_EQUAL(Dim({2,2}),size(W)); - double eps = 1e-5; - BOOST_CHECK_CLOSE(t(W,0,0), 76., eps); - BOOST_CHECK_CLOSE(t(W,1,0), 100., eps); - BOOST_CHECK_CLOSE(t(W,0,1), 103., eps); - BOOST_CHECK_CLOSE(t(W,1,1), 136., eps); - Tensor b1 = cc.backward(xs, U, U, 0); - Tensor b2 = cc.backward(xs, U, U, 1); - Tensor b3 = cc.backward(xs, U, U, 2); - cerr << b1 << endl; - cerr << b2 << endl; - cerr << b3 << endl; - BOOST_CHECK_EQUAL(t(u1,0,0), t(b1,0,0)); - BOOST_CHECK_EQUAL(t(u1,1,0), t(b1,1,0)); - BOOST_CHECK_EQUAL(t(u2,0,0), t(b2,0,0)); - BOOST_CHECK_EQUAL(t(u2,1,0), t(b2,1,0)); - BOOST_CHECK_EQUAL(t(u3,0,0), t(b3,0,0)); - BOOST_CHECK_EQUAL(t(u3,1,0), t(b3,1,0)); -} - -BOOST_AUTO_TEST_CASE(ERowConcat) -{ - Tensor u1 = Ccm({2,1}, {1, 4}); - Tensor u2 = Ccm({2,1}, {2, 5}); - Tensor u3 = Ccm({2,1}, {3, 6}); - vector xs = {&u1, &u2, &u3}; - Concatenate cr; - Tensor U = cr.forward(xs); - cerr << U << endl; - BOOST_REQUIRE_EQUAL(Dim({6,1}),size(U)); - double eps = 1e-5; - BOOST_CHECK_CLOSE(t(U,0,0), 1., eps); - BOOST_CHECK_CLOSE(t(U,1,0), 4., eps); - BOOST_CHECK_CLOSE(t(U,2,0), 2., eps); - BOOST_CHECK_CLOSE(t(U,3,0), 5., eps); - BOOST_CHECK_CLOSE(t(U,4,0), 3., eps); - BOOST_CHECK_CLOSE(t(U,5,0), 6., eps); - - Tensor b1 = cr.backward(xs, U, U, 0); - Tensor b2 = cr.backward(xs, U, U, 1); - Tensor b3 = cr.backward(xs, U, U, 2); - cerr << b1 << endl; - cerr << b2 << endl; - cerr << b3 << endl; - BOOST_CHECK_EQUAL(t(u1,0,0), t(b1,0,0)); - BOOST_CHECK_EQUAL(t(u1,1,0), t(b1,1,0)); - BOOST_CHECK_EQUAL(t(u2,0,0), t(b2,0,0)); - BOOST_CHECK_EQUAL(t(u2,1,0), t(b2,1,0)); - BOOST_CHECK_EQUAL(t(u3,0,0), t(b3,0,0)); - BOOST_CHECK_EQUAL(t(u3,1,0), t(b3,1,0)); -} - -BOOST_AUTO_TEST_CASE(EMultilinear) { - Tensor b = Ccm({3,1},{1,2,3}); - Tensor W = Ccm({3,2},{2,4,6,3,5,7}); - Tensor x = Ccm({2,1},{-1,1}); - Multilinear ml; - vector mlxs = {&b, &W, &x}; - Tensor r1 = ml.forward(mlxs); - Sum se; - MatrixMultiply mm; - Tensor p = mm.forward(vector({&W, &x})); - Tensor r2 = se.forward(vector({&p, &b})); - BOOST_REQUIRE_EQUAL(size(r1), size(r2)); - double eps = 1e-5; - cerr << r1 << endl; - cerr << r2 << endl; - BOOST_CHECK_CLOSE(t(r1,0,0), 2., eps); - BOOST_CHECK_CLOSE(t(r1,1,0), 3., eps); - BOOST_CHECK_CLOSE(t(r1,2,0), 4., eps); - BOOST_CHECK_CLOSE(t(r2,0,0), 2., eps); - BOOST_CHECK_CLOSE(t(r2,1,0), 3., eps); - BOOST_CHECK_CLOSE(t(r2,2,0), 4., eps); - Tensor dEdf = Ccm({3,1}, {1., 0.5, 0.25}); - Tensor dEdx = ml.backward(mlxs, r1, dEdf, 0); - BOOST_CHECK_EQUAL(size(dEdx), size(b)); - dEdx = ml.backward(mlxs, r1, dEdf, 1); - BOOST_CHECK_EQUAL(size(dEdx), size(W)); - cerr << dEdx << endl; - dEdx = ml.backward(mlxs, r1, dEdf, 2); - BOOST_CHECK_EQUAL(size(dEdx), size(x)); - cerr << r2 << endl; - cerr << r1 << endl; -} - -BOOST_AUTO_TEST_CASE(ELogisticSigmoid) { - Tensor x = Ccm({5,1},{-6.f,-logf(3),0.f,logf(3),6.f}); - LogisticSigmoid ls; - vector xs = {&x}; - Tensor r = ls.forward(xs); - BOOST_REQUIRE_EQUAL(size(r), size(x)); - double eps = 1e-2; - BOOST_CHECK_CLOSE(t(r,0,0), 1. /(1. + exp(6.)), eps); - BOOST_CHECK_CLOSE(t(r,1,0), 0.25, eps); - BOOST_CHECK_CLOSE(t(r,2,0), 0.5, eps); - BOOST_CHECK_CLOSE(t(r,3,0), 0.75, eps); - BOOST_CHECK_CLOSE(t(r,4,0), 1. - t(r,0,0), eps); - Tensor dEdf = Ccm({5,1},{1.,1.,1.,1.,1.}); - Tensor dEdx = ls.backward(xs, r, dEdf, 0); - BOOST_CHECK_CLOSE(t(dEdx,1,0), 0.1875, eps); - BOOST_CHECK_CLOSE(t(dEdx,2,0), 0.25, eps); - BOOST_CHECK_CLOSE(t(dEdx,3,0), t(dEdx,1,0), eps); - BOOST_CHECK_CLOSE(t(dEdx,4,0), t(dEdx,0,0), eps); -} - -BOOST_AUTO_TEST_CASE(ETanh) { - Tensor x = Ccm({5,1},{-6.f,-logf(3),0.f,logf(3),6.f}); - Tanh th; - vector xs = {&x}; - Tensor r = th.forward(xs); - BOOST_REQUIRE_EQUAL(size(r), size(x)); - double eps = 1e-2; - BOOST_CHECK_CLOSE(t(r,1,0), -0.8, eps); - BOOST_CHECK_CLOSE(t(r,2,0), 0, eps); - BOOST_CHECK_CLOSE(t(r,3,0), 0.8, eps); - BOOST_CHECK_CLOSE(t(r,4,0), -t(r,0,0), eps); - Tensor dEdf = Ccm({5,1},{1.,1.,1.,1.,1.}); - Tensor dEdx = th.backward(xs, r, dEdf, 0); - BOOST_CHECK_CLOSE(t(dEdx,1,0), 0.36, eps); - BOOST_CHECK_CLOSE(t(dEdx,2,0), 1.0, eps); - BOOST_CHECK_CLOSE(t(dEdx,3,0), t(dEdx,1,0), eps); - BOOST_CHECK_CLOSE(t(dEdx,4,0), t(dEdx,0,0), eps); -} - -BOOST_AUTO_TEST_CASE(ESoftmaxUnif) { - for (float v = -12.; v < 12.; v += 1.) { - Tensor u = Ccm({4,1}, {v, v, v, v}); - Softmax sm; - vector xs = {&u}; - Tensor m = sm.forward(xs); - BOOST_REQUIRE_EQUAL(Dim({4,1}),size(m)); - double eps = 1e-5; - for (unsigned i = 0; i < 4; ++i) - BOOST_CHECK_CLOSE(t(m, i, 0), 0.25, eps); - Tensor dEdf = Ccm({4,1}, {1., 0., 0., 0.}); - Tensor d = sm.backward(xs, m, dEdf, 0); - BOOST_CHECK_CLOSE(t(d,0,0), 0.1875, eps); - BOOST_CHECK_CLOSE(t(d,1,0), -0.0625, eps); - BOOST_CHECK_CLOSE(t(d,2,0), -0.0625, eps); - BOOST_CHECK_CLOSE(t(d,3,0), -0.0625, eps); -// cerr << d << endl; - - LogSoftmax lsm; - Tensor lm = lsm.forward(xs); - BOOST_REQUIRE_EQUAL(Dim({4,1}),size(lm)); - for (unsigned i = 0; i < 4; ++i) - BOOST_CHECK_CLOSE(log(t(m, i, 0)), t(lm, i, 0), eps); - Tensor b = lsm.backward(xs, lm, dEdf, 0); - BOOST_CHECK_CLOSE(t(b, 0, 0), 0.75, eps); - BOOST_CHECK_CLOSE(t(b, 1, 0), -0.25, eps); - BOOST_CHECK_CLOSE(t(b, 2, 0), -0.25, eps); - BOOST_CHECK_CLOSE(t(b, 3, 0), -0.25, eps); - } -} - - diff --git a/cnn/tests/test_init.cc b/cnn/tests/test_init.cc deleted file mode 100644 index 15ebb4d67..000000000 --- a/cnn/tests/test_init.cc +++ /dev/null @@ -1,53 +0,0 @@ -#define BOOST_TEST_DYN_LINK -#define BOOST_TEST_MODULE "CNNInit" -#include - -#include - -#include "cnn/tests/test_utils.h" -#include "cnn/tensor.h" -#include "cnn/saxe_init.h" - -using namespace std; -using namespace cnn; - -BOOST_GLOBAL_FIXTURE(TestTensorSetup) - -BOOST_AUTO_TEST_CASE(EOrthonormalRandom) -{ - for (int d = 4; d < 128; d += 1) { - Tensor Q = OrthonormalRandom(d, 1.0); - BOOST_REQUIRE_EQUAL(size(Q), Dim({d,d})); - - // check that this is actually returning orthogonal matrices -#if MINERVA_BACKEND - Tensor I = Q.Trans() * Q; -#else - Tensor I = Q.transpose() * Q; -#endif - double eps = 1e-1; - for (int i = 0; i < d; ++i) - for (int j = 0; j < d; ++j) - BOOST_CHECK_CLOSE(t(I,i,j) + 1., (i == j ? 2. : 1.), eps); - } -} - -BOOST_AUTO_TEST_CASE(BernoulliInit) { - Tensor r = RandomBernoulli(Dim({1000,1000}), 0.5f); - int tot = 0; - for (int i = 0; i < 1000; ++i) - for (int j = 0; j < 1000; ++j) - if (t(r,i,j)) ++tot; - BOOST_CHECK_GT(tot, 490000); - BOOST_CHECK_LT(tot, 510000); -} - -BOOST_AUTO_TEST_CASE(Rand01) { - cnn::real tot = 0; - for (unsigned i = 0; i < 1000000; ++i) - tot += cnn::rand01(); - BOOST_CHECK_GT(tot, 490000.); - BOOST_CHECK_LT(tot, 510000.); -} - - diff --git a/cnn/tests/test_utils.h b/cnn/tests/test_utils.h deleted file mode 100644 index 42a45d601..000000000 --- a/cnn/tests/test_utils.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef CNN_TEST_UTILS_H_ -#define CNN_TEST_UTILS_H_ - -#include "cnn/tensor.h" - -namespace cnn { - -#if MINERVA_BACKEND - -struct TestTensorSetup { - TestTensorSetup() { - int argc = 1; - char* foo = "foo"; - char** argv = {&foo}; - minerva::MinervaSystem::Initialize(&argc, &argv); -#if HAS_CUDA - minerva::MinervaSystem::Instance().device_manager().CreateGpuDevice(0); -#else - minerva::MinervaSystem::Instance().device_manager().CreateCpuDevice(); -#endif - } -}; - -double t(const Tensor& T, unsigned i, unsigned j) { - int m = T.Size(0); - return T.Get().get()[j * m + i]; -} - -std::ostream& operator<<(std::ostream& os, const Tensor& T) { - if (T.Size().NumDims() == 2) { - int m = T.Size(0); - int n = T.Size(1); - for (int i = 0; i < m; ++i) { - for (int j = 0; j < n; ++j) { - os << '\t' << t(T,i,j); - } - os << std::endl; - } - return os; - } else { - os << T.Size() << ": "; - minerva::FileFormat ff; ff.binary = false; - T.ToStream(os, ff); - return os; - } -} - -#else - -struct TestTensorSetup { - TestTensorSetup() { - int argc = 1; - char* p = "foo"; - char** argv = {&p}; - cnn::Initialize(argc, argv); - } -}; - -double t(const Tensor& T, unsigned i, unsigned j) { - return T(i, j); -} - -double t(const Tensor& T, unsigned i) { - return T(i, 0); -} - -#endif - -} // namespace cnn - -#endif diff --git a/cnn/training.cc b/cnn/training.cc deleted file mode 100644 index d93864ff5..000000000 --- a/cnn/training.cc +++ /dev/null @@ -1,105 +0,0 @@ -#include "cnn/training.h" - -namespace cnn { - -using namespace std; - -Trainer::~Trainer() {} - -void Trainer::clip_gradients() { - if (clipping_enabled) { - double gg = 0; - for (auto p : model->all_parameters_list()) - gg+=p->g_squared_l2norm(); - gg = sqrt(gg); - if (gg > clip_threshold) { - ++clips; - for (auto p : model->all_parameters_list()) - p->rescale_gradient(clip_threshold / gg); - } - } -} - -void SimpleSGDTrainer::update(real scale) { - clip_gradients(); - for (auto p : model->parameters_list()) { - const Tensor reg = p->values * lambda; - p->values -= (eta * scale) * p->g; - p->values -= reg; - p->clear(); - } - for (auto p : model->lookup_parameters_list()) { - for (auto& it : p->g) { - const Tensor reg = p->values[it.first] * lambda; - p->values[it.first] -= it.second * (eta * scale); - p->values[it.first] -= reg; - } - p->clear(); - } - ++updates; -} - -static inline Tensor& get_or_init(Tensor& x, const Tensor& t) { - if (x.rows() == 0) { - x = t; - x.setZero(); - } - return x; -} - -void MomentumSGDTrainer::update(real scale) { - clip_gradients(); - for (auto p : model->parameters_list()) { - Tensor& v = get_or_init(vp[p], p->values); - const Tensor reg = p->values * lambda; - v = momentum * v - (eta * scale) * p->g; - p->values += v; - p->values -= reg; - p->clear(); - } - for (auto p : model->lookup_parameters_list()) { - unordered_map& vx = vl[p]; - for (auto& it : p->g) { - Tensor& v = get_or_init(vx[it.first], it.second); - const Tensor reg = p->values[it.first] * lambda; - v = momentum * v - (eta * scale) * it.second; - p->values[it.first] += v; - p->values[it.first] -= reg; - } - p->clear(); - } - ++updates; -} - -#if 0 -void RMSPropTrainer::update(real scale) { - for (auto p : params) { - Tensor& x = p->values; - Tensor& g = p->g; - Tensor& v = vp[p]; - v *= decay; - v += g.cwiseProduct(g) * (1.0 - decay); - const Tensor reg = x * lambda; - x -= eta * g.cwiseQuotient((v + Tensor::Constant(v.rows(),v.cols(),eps)).cwiseSqrt()); - x -= reg; - p->clear(); - } - for (auto p : lookup_params) { - unordered_map& vt = vl[p]; - for (auto it : p->g) { - Tensor& x = p->values[it.first]; - Tensor& g = it.second; - Tensor& v = vt[it.first]; - if (v.rows() == 0) v = g * 0; - v *= decay; - v += g.cwiseProduct(g) * (1.0 - decay); - const Tensor reg = x * lambda; - x -= eta * g.cwiseQuotient((v + Tensor::Constant(v.rows(),v.cols(),eps)).cwiseSqrt()); - x -= reg; - } - p->clear(); - } -} -#endif - -} // namespace cnn diff --git a/cnn/training.h b/cnn/training.h deleted file mode 100644 index f8242ebbf..000000000 --- a/cnn/training.h +++ /dev/null @@ -1,65 +0,0 @@ -#ifndef CNN_TRAINING_H_ -#define CNN_TRAINING_H_ - -#include -#include -#include -#include "cnn/model.h" - -namespace cnn { - -struct Trainer { - explicit Trainer(Model* m, real e0, real lam) : - eta0(e0), eta(e0), eta_decay(), epoch(), lambda(lam), clipping_enabled(true), clip_threshold(5), clips(), updates(), model(m) {} - virtual ~Trainer(); - - virtual void update(real scale = 1.0) = 0; - void update_epoch(real r = 1) { - epoch += r; - eta = eta0 / (1 + epoch * eta_decay); - } - - // if clipping is enabled and the gradient is too big, clip - void clip_gradients(); - - // learning rates - real eta0; - real eta; - real eta_decay; - real epoch; - - real lambda; // weight regularization (l2) - - // clipping - real clipping_enabled; - real clip_threshold; - real clips; - real updates; - - void status() { - std::cerr << "[epoch=" << epoch << " eta=" << eta << " clips=" << clips << " updates=" << updates << "] "; - updates = clips = 0; - } - - Model* model; // parameters and gradients live here -}; - -struct SimpleSGDTrainer : public Trainer { - explicit SimpleSGDTrainer(Model* m, real lam = 1e-6, real e0 = 0.1) : Trainer(m, e0, lam) {} - void update(real scale) override; -}; - -struct MomentumSGDTrainer : public Trainer { - explicit MomentumSGDTrainer(Model* m, real lam = 1e-6, real e0 = 0.01, real mom = 0.9) : - Trainer(m, e0, lam), momentum(mom) {} - void update(real scale) override; - - real momentum; - - std::unordered_map vp; - std::unordered_map> vl; -}; - -} // namespace cnn - -#endif diff --git a/config.h.cmake b/config.h.cmake index c319372b1..3b73020b2 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -1,6 +1,8 @@ -#ifndef CNN_CONFIG_H_ -#define CNN_CONFIG_H_ +#ifndef DYNET_CONFIG_H_ +#define DYNET_CONFIG_H_ -#cmakedefine HAVE_MINERVA_H @HAVE_MINERVA_H@ +#cmakedefine WITH_MINERVA_BACKEND @WITH_MINERVA_BACKEND@ +#cmakedefine WITH_THPP_BACKEND @WITH_THPP_BACKEND@ +#cmakedefine WITH_EIGEN_BACKEND @WITH_EIGEN_BACKEND@ #endif diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 000000000..e0d42bc90 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,177 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Dynet.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Dynet.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/Dynet" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Dynet" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/doc/build_doc.sh b/doc/build_doc.sh new file mode 100644 index 000000000..8db4067b1 --- /dev/null +++ b/doc/build_doc.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Run Doxygen +cd doxygen +doxygen +cd .. + +# Run sphinx to generate text and html doc +make html diff --git a/doc/doxygen/Doxyfile b/doc/doxygen/Doxyfile new file mode 100644 index 000000000..e9ba5512d --- /dev/null +++ b/doc/doxygen/Doxyfile @@ -0,0 +1,2384 @@ +# Doxyfile 1.8.9.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "Dynet" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "A dynamic neural network library written in C++" + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = NO + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = YES + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = YES + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = YES + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = YES + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = YES + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = NO + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT =../../dynet/expr.h ../../dynet/training.h ../../dynet/rnn.h ../../dynet/dim.h ../../examples/cpp/encdec/encdec.h ../../examples/cpp/rnnlm-batch/rnnlm-batch.h ../../examples/cpp/mnist/mlp.h + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the +# cost of reduced performance. This can be particularly helpful with template +# rich C++ code for which doxygen's built-in parser lacks the necessary type +# information. +# Note: The availability of this option depends on whether or not doxygen was +# compiled with the --with-libclang option. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = NO + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /