diff --git a/Makefile b/Makefile index f60600f..1949783 100644 --- a/Makefile +++ b/Makefile @@ -36,6 +36,7 @@ endif all: $(EXAMPLE) $(TEST) $(EXAMPLE): $(BINDIR)/%: $(BUILDDIR)/example/%.o $(OBJ) + @mkdir -p $(@D) $(CXX) $(CFLAGS) $(LIB) $< $(OBJ) -o $@ $(EXAMPLEOBJ): $(BUILDDIR)/example/%.o: $(EXAMPLEDIR)/%.cc @@ -43,6 +44,7 @@ $(EXAMPLEOBJ): $(BUILDDIR)/example/%.o: $(EXAMPLEDIR)/%.cc $(CXX) $(CFLAGS) $(INC) -c $< -o $@ $(TEST): $(BINDIR)/%: $(BUILDDIR)/test/%.o $(OBJ) + @mkdir -p $(@D) $(CXX) $(CFLAGS) $(LIB) $< $(OBJ) -o $@ $(TESTOBJ): $(BUILDDIR)/test/%.o: $(TESTDIR)/%.cc diff --git a/README.git b/README.git deleted file mode 100644 index 619356e..0000000 --- a/README.git +++ /dev/null @@ -1,10 +0,0 @@ -Galois is a general deep learning framework in C++ using dataflow graphs. The distinguish features of Galois is that it aims for dynamic neural network. The guiding principles of design includes both efficiency both convenience. For user guide, please have a look at the example fold. - -Galois is a work in progress. It only supports CPU computing for the moment. It uses extensively new features of C++11, especially lambda function, to reduce lines of code. Using Galois is just as simple as drawing dataflow graphs. In particular, it's easy to implement recurrent neural network with Galois. Galois is also efficient. For the same mnist_mlp model (from torch demos) on Mac Pro, the consumed time of each epoch is: Torch ~ 40s; Keras ~ 60s; Galois ~ 30s. - -Todo list (not in priority order): -- RMSprop, Adagrad -- dynamic model -- filters: lstm, dropout, cnn, etc -- narray: better api, cuda version -- more I/O utils diff --git a/README.md b/README.md new file mode 100644 index 0000000..516f371 --- /dev/null +++ b/README.md @@ -0,0 +1,12 @@ +#Neural Network Library# + +This library is a research of implementing ultimately generic neural network without performance overhead. + +##Interesting Features: + * Network inside network: every network is considered as a filter and could be used to contruct more complicated networks; + * Network sharing and cloning: sub-networks could share paramenters and are clonable; + * In place memory optimization by default: one neuron could accept signals from several other neurons with just one copy of n-dim array memory; + * Dynamic traning: it's able to train only part of the whole network (e.g. RNN with varied input lenght); it's able to fix part of the whole network; + * Dynamic network [WIP]: fast dynamic network construction, optimization with cache. + +The guiding principles of design includes both efficiency and convenience. For user guide, please have a look at the example fold. The library uses extensively new features of C++11 to make the code simple and clear. Using Galois is just as simple as drawing dataflow graphs. Galois is also efficient. For the same mnist_mlp model (from torch demos) on Mac Pro 2013, the consumed time of each epoch is: Torch ~ 40s; Keras ~ 60s; Galois ~ 30s. Only implemented for CPU for the moment.