Skip to content

Commit

Permalink
Autobatch device (#846)
Browse files Browse the repository at this point in the history
* Set pseudo_node device in autobatching.

* Ensuring device is set based on input arguments for any function addition.


Former-commit-id: 37f61e6
  • Loading branch information
neubig authored Sep 1, 2017
1 parent 46c3512 commit 86c05a4
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 39 deletions.
58 changes: 20 additions & 38 deletions dynet/dynet.h
Original file line number Diff line number Diff line change
Expand Up @@ -488,6 +488,7 @@ struct ComputationGraph {
// flag of checking Inf/NaN of each layer. Only performing checking when
// immediate_compute is also set to true.
bool check_validity;
VariableIndex add_function_node(Node *node);
void set_dim_for_new_node(const VariableIndex& i);

std::vector<CGCheckpoint> checkpoints;
Expand Down Expand Up @@ -753,70 +754,51 @@ struct Node {
bool has_cuda_implemented = true;
};

template <class Function>
inline VariableIndex ComputationGraph::add_function(
const std::initializer_list<VariableIndex>& arguments) {
VariableIndex new_node_index(nodes.size());
nodes.push_back(new Function(arguments));
if (nodes.back()->device == nullptr) {
if (arguments.size()) {
nodes.back()->device = nodes[*arguments.begin()]->device;

inline VariableIndex ComputationGraph::add_function_node(Node *node) {
VariableIndex new_node_index((VariableIndex)nodes.size());
nodes.push_back(node);
if (node->device == nullptr) {
if (node->arity() > 0) {
node->device = nodes[node->args[0]]->device;
} else {
nodes.back()->device = dynet::default_device;
node->device = dynet::default_device;
}
}
if (nodes.back()->device->type == DeviceType::GPU && !nodes.back()->has_cuda_implemented)
DYNET_NO_CUDA_IMPL_ERROR(nodes.back()->as_dummy_string())
if (node->device->type == DeviceType::GPU && !node->has_cuda_implemented)
DYNET_NO_CUDA_IMPL_ERROR(node->as_dummy_string())
set_dim_for_new_node(new_node_index);
return new_node_index;
}

template <class Function>
inline VariableIndex ComputationGraph::add_function(
const std::initializer_list<VariableIndex>& arguments) {
return add_function_node(new Function(arguments));
}

// pass side information to the function. these are likely to be
// nondifferentiable arguments
template <class Function, typename... Args>
inline VariableIndex ComputationGraph::add_function(
const std::initializer_list<VariableIndex>& arguments,
Args&&... side_information) {
VariableIndex new_node_index(nodes.size());
nodes.push_back(
return add_function_node(
new Function(arguments, std::forward<Args>(side_information)...));
if (nodes.back()->device == nullptr) {
if (arguments.size()) {
nodes.back()->device = nodes[*arguments.begin()]->device;
} else {
nodes.back()->device = dynet::default_device;
}
}
if (nodes.back()->device->type == DeviceType::GPU && !nodes.back()->has_cuda_implemented)
DYNET_NO_CUDA_IMPL_ERROR(nodes.back()->as_dummy_string())
set_dim_for_new_node(new_node_index);
return new_node_index;
}

template <class Function, typename T>
inline VariableIndex ComputationGraph::add_function(const T& arguments) {
VariableIndex new_node_index((VariableIndex)nodes.size());
nodes.push_back(new Function(arguments));
nodes.back()->device = dynet::default_device;
if (nodes.back()->device->type == DeviceType::GPU && !nodes.back()->has_cuda_implemented)
DYNET_NO_CUDA_IMPL_ERROR(nodes.back()->as_dummy_string())
set_dim_for_new_node(new_node_index);
return new_node_index;
return add_function_node(new Function(arguments));
}

// pass side information to the function. these are likely to be
// nondifferentiable arguments
template <class Function, typename T, typename... Args>
inline VariableIndex ComputationGraph::add_function(
const T& arguments, Args&&... side_information) {
VariableIndex new_node_index((VariableIndex)nodes.size());
nodes.push_back(
return add_function_node(
new Function(arguments, std::forward<Args>(side_information)...));
nodes.back()->device = dynet::default_device;
if (nodes.back()->device->type == DeviceType::GPU && !nodes.back()->has_cuda_implemented)
DYNET_NO_CUDA_IMPL_ERROR(nodes.back()->as_dummy_string())
set_dim_for_new_node(new_node_index);
return new_node_index;
}

} // namespace dynet
Expand Down
4 changes: 3 additions & 1 deletion dynet/exec.cc
Original file line number Diff line number Diff line change
Expand Up @@ -662,8 +662,10 @@ const Tensor& BatchedExecutionEngine::incremental_forward_no_update(VariableInde
// Get the concatenation and pseudo-node info
my_batch.concat = node->autobatch_concat(cg);
my_batch.pseudo_node = node->autobatch_pseudo_node(cg, batch_ids);
if(my_batch.pseudo_node != nullptr)
if (my_batch.pseudo_node != nullptr) {
my_batch.pseudo_node->aux_mem = head_aux;
my_batch.pseudo_node->device = node->device;
}
else
cg.nodes[batch_ids[0]]->aux_mem = head_aux;

Expand Down

0 comments on commit 86c05a4

Please sign in to comment.