Skip to content

Commit 909dd2d

Browse files
committed
support Windows installation
1 parent 5bf0a50 commit 909dd2d

File tree

9 files changed

+78
-21
lines changed

9 files changed

+78
-21
lines changed

README.md

+22-2
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@ TorchDrug is a [PyTorch]-based machine learning toolbox designed for several pur
2525
Installation
2626
------------
2727

28-
TorchDrug is compatible with Python 3.7/3.8 and PyTorch >= 1.4.0.
28+
TorchDrug can be installed on either Linux, Windows or macOS. It is compatible with
29+
Python 3.7/3.8 and PyTorch >= 1.4.0.
2930

3031
### From Conda ###
3132

@@ -46,13 +47,32 @@ instructions in https://github.com/rusty1s/pytorch_scatter
4647

4748
### From Source ###
4849

49-
```
50+
```bash
5051
git clone https://github.com/DeepGraphLearning/torchdrug
5152
cd torchdrug
5253
pip install -r requirements.txt
5354
python setup.py install
5455
```
5556

57+
### Windows (PowerShell) ###
58+
59+
We need to first install the build tools for Visual Studio. We then install the
60+
following modules in PowerShell.
61+
62+
```powershell
63+
Install-Module Pscx -AllowClobber
64+
Install-Module VSSetup
65+
```
66+
67+
Initialize Visual Studio in PowerShell with the following commands. We may setup
68+
this for all PowerShell sessions by writing it to the PowerShell profile. Change
69+
the library path according to your own case.
70+
71+
```powershell
72+
Import-VisualStudioVars -Architecture x64
73+
$env:LIB += ";C:\Program Files\Python37\libs"
74+
```
75+
5676
Quick Start
5777
-----------
5878

doc/source/installation.rst

+38-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
Installation
22
============
33

4-
TorchDrug is compatible with Python 3.7/3.8 and PyTorch >= 1.4.0.
4+
TorchDrug can be installed on either Linux, Windows or macOS. It is compatible with
5+
Python 3.7/3.8 and PyTorch >= 1.4.0.
6+
7+
For Windows
58

69
From Conda
710
----------
@@ -48,3 +51,37 @@ From Source
4851
cd torchdrug
4952
pip install -r requirements.txt
5053
python setup.py install
54+
55+
Windows (PowerShell)
56+
--------------------
57+
58+
For Windows, we need to additionally install Visual Studio to enable the JIT
59+
compilation. If you don't have Visual Studio installed, you can get a minimal
60+
version of the build tools for Visual Studio at
61+
https://visualstudio.microsoft.com/downloads/.
62+
63+
.. note::
64+
65+
For non-English systems, you must select English as the language for Visual
66+
Studio. Otherwise, the ANSI encoding of Windows will cause errors in Python.
67+
68+
We then setup a command-line environment for JIT compilation. Launch a PowerShell
69+
as administrator, and install the following extensions.
70+
71+
.. code:: powershell
72+
73+
Install-Module Pscx -AllowClobber
74+
Install-Module VSSetup
75+
76+
Initialize Visual Studio in PowerShell with the following commands. We need to
77+
change the library path based on our own Python path.
78+
79+
.. code:: powershell
80+
81+
Import-VisualStudioVars -Architecture x64
82+
$env:LIB += ";C:\Program Files\Python37\libs"
83+
84+
The above commands should be executed for every PowerShell session. To setup this
85+
for all PowerShell sessions, we can write them to the PowerShell profile. The
86+
profile can be found by the ``$profile`` command in PowerShell. You may need to
87+
create the profile if you use it for the first time.

torchdrug/layers/functional/extension/embedding.cpp

+4-3
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ void rotate_backward_out_cpu(const scalar_t *entity, const scalar_t *relation,
210210
const int64_t *h_index, const int64_t *t_index, const int64_t *r_index,
211211
const scalar_t *score_grad, scalar_t *entity_grad, scalar_t *relation_grad,
212212
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
213+
const float kEpsilon = 1e-15; // 1e-15 from GraphVite
213214
// since #CPU thread < embedding_dim / 2
214215
// we can parallel over embedding_dim to avoid atomic operations
215216
parallel_for(0, embedding_dim / 2, 0, [&](int64_t start, int64_t end) {
@@ -298,7 +299,7 @@ void simple_backward_out_cpu(const scalar_t *entity, const scalar_t *relation,
298299
#define DECLARE_FORWARD_IMPL(NAME) \
299300
Tensor NAME##_forward_cpu(const Tensor &entity_, const Tensor &relation_, \
300301
const Tensor &h_index_, const Tensor &t_index_, const Tensor &r_index_) { \
301-
constexpr const char *fn_name = #NAME"_forward_cpu"; \
302+
constexpr const char *fn_name = #NAME"_forward_cpu"; \
302303
TensorArg entity_arg(entity_, "entity", 1), relation_arg(relation_, "relation", 2), \
303304
h_index_arg(h_index_, "h_index", 3), r_index_arg(r_index_, "r_index", 4), \
304305
t_index_arg(t_index_, "t_index", 5); \
@@ -319,7 +320,7 @@ void simple_backward_out_cpu(const scalar_t *entity, const scalar_t *relation,
319320
\
320321
Tensor score = at::empty(h_index.sizes(), entity.options()); \
321322
\
322-
AT_DISPATCH_FLOATING_TYPES(entity.scalar_type(), fn_name, [&] { \
323+
AT_DISPATCH_FLOATING_TYPES(entity.scalar_type(), #NAME"_forward_cpu", [&] { \
323324
NAME##_forward_out_cpu<scalar_t>( \
324325
entity.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), \
325326
h_index.data_ptr<int64_t>(), t_index.data_ptr<int64_t>(), r_index.data_ptr<int64_t>(), \
@@ -359,7 +360,7 @@ void simple_backward_out_cpu(const scalar_t *entity, const scalar_t *relation,
359360
Tensor entity_grad = at::zeros_like(entity); \
360361
Tensor relation_grad = at::zeros_like(relation); \
361362
\
362-
AT_DISPATCH_FLOATING_TYPES(entity.scalar_type(), fn_name, [&] { \
363+
AT_DISPATCH_FLOATING_TYPES(entity.scalar_type(), #NAME"_backward_cpu", [&] { \
363364
NAME##_backward_out_cpu<scalar_t>( \
364365
entity.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), \
365366
h_index.data_ptr<int64_t>(), t_index.data_ptr<int64_t>(), r_index.data_ptr<int64_t>(), \

torchdrug/layers/functional/extension/embedding.cu

+4-3
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,7 @@ void rotate_backward_out_cuda(const scalar_t *entity, const scalar_t *relation,
218218
const int64_t *h_index, const int64_t *t_index, const int64_t *r_index,
219219
const scalar_t *score_grad, scalar_t *entity_grad, scalar_t *relation_grad,
220220
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
221+
const float kEpsilon = 1e-15; // 1e-15 from GraphVite
221222
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
222223
const int lane_id = thread_id % warpSize;
223224
const int num_thread = gridDim.x * blockDim.x;
@@ -313,7 +314,7 @@ void simple_backward_out_cuda(const scalar_t *entity, const scalar_t *relation,
313314
#define DECLARE_FORWARD_IMPL(NAME) \
314315
Tensor NAME##_forward_cuda(const Tensor &entity_, const Tensor &relation_, const Tensor &h_index_, \
315316
const Tensor &t_index_, const Tensor &r_index_) { \
316-
constexpr const char *fn_name = #NAME"_forward_cuda"; \
317+
constexpr const char *fn_name = #NAME"_forward_cuda"; \
317318
TensorArg entity_arg(entity_, "entity", 1), relation_arg(relation_, "relation", 2), \
318319
h_index_arg(h_index_, "h_index", 3), r_index_arg(r_index_, "r_index", 4), \
319320
t_index_arg(t_index_, "t_index", 5); \
@@ -353,7 +354,7 @@ void simple_backward_out_cuda(const scalar_t *entity, const scalar_t *relation,
353354
std::tuple<Tensor, Tensor> NAME##_backward_cuda( \
354355
const Tensor &entity_, const Tensor &relation_, const Tensor &h_index_, \
355356
const Tensor &t_index_, const Tensor &r_index_, const Tensor &score_grad_) { \
356-
constexpr const char *fn_name = #NAME"_backward_cuda"; \
357+
constexpr const char *fn_name = #NAME"_backward_cuda"; \
357358
TensorArg entity_arg(entity_, "entity", 1), relation_arg(relation_, "relation", 2), \
358359
h_index_arg(h_index_, "h_index", 3), r_index_arg(r_index_, "r_index", 4), \
359360
t_index_arg(t_index_, "t_index", 5), score_grad_arg(score_grad_, "score_grad", 6); \
@@ -384,7 +385,7 @@ void simple_backward_out_cuda(const scalar_t *entity, const scalar_t *relation,
384385
NAME##_backward_out_cuda<scalar_t><<<4096, 512, 0, stream>>>( \
385386
entity.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), \
386387
h_index.data_ptr<int64_t>(), t_index.data_ptr<int64_t>(), r_index.data_ptr<int64_t>(), \
387-
score_grad.data_ptr<scalar_t>(), \
388+
score_grad.data_ptr<scalar_t>(), \
388389
entity_grad.data_ptr<scalar_t>(), relation_grad.data_ptr<scalar_t>(), \
389390
num_entity, num_relation, embedding_dim, num_sample \
390391
); \

torchdrug/layers/functional/extension/embedding.h

-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66

77
namespace at {
88

9-
const float kEpsilon = 1e-15; // 1e-15 from graphvite
10-
119
void embedding_forward_check(CheckedFrom c, const TensorArg &entity_arg, const TensorArg &relation_arg,
1210
const TensorArg &h_index_arg, const TensorArg &t_index_arg, const TensorArg &r_index_arg);
1311

torchdrug/layers/functional/extension/rspmm.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ Tensor rspmm_forward_cpu(const SparseTensor &sparse, const Tensor &relation_, co
154154
Tensor layer_ind = std::get<2>(csr);
155155
Tensor value = std::get<3>(csr);
156156

157-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
157+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "rspmm_forward_cpu", [&] {
158158
rspmm_forward_out_cpu<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>(
159159
row_ptr.data_ptr<int64_t>(),
160160
col_ind.data_ptr<int64_t>(),
@@ -202,7 +202,7 @@ std::tuple<SparseTensor, Tensor, Tensor> rspmm_backward_cpu(
202202
std::vector<std::mutex> relation_mutex(relation.numel());
203203
std::vector<std::mutex> input_mutex(input.numel());
204204

205-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
205+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "rspmm_backward_cpu", [&] {
206206
rspmm_backward_out_cpu<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>(
207207
row_ptr.data_ptr<int64_t>(),
208208
col_ind.data_ptr<int64_t>(),

torchdrug/layers/functional/extension/rspmm.cu

+3-3
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ Tensor rspmm_forward_cuda(const SparseTensor &sparse, const Tensor &relation_, c
243243
const int row_per_block = kThreadPerBlock / dim_per_block;
244244
const int num_row_block = (num_row + row_per_block - 1) / row_per_block;
245245

246-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
246+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "rspmm_forward_cuda", [&] {
247247
const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t));
248248
rspmm_forward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>
249249
<<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>(
@@ -300,7 +300,7 @@ std::tuple<SparseTensor, Tensor, Tensor> rspmm_backward_cuda(
300300
const int num_row_block = (num_row + row_per_block - 1) / row_per_block;
301301

302302
if (sparse.requires_grad())
303-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
303+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "rspmm_backward_cuda", [&] {
304304
const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t));
305305
rspmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>
306306
<<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>(
@@ -319,7 +319,7 @@ std::tuple<SparseTensor, Tensor, Tensor> rspmm_backward_cuda(
319319
);
320320
});
321321
else
322-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
322+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "rspmm_backward_cuda", [&] {
323323
const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t));
324324
rspmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>
325325
<<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>(

torchdrug/layers/functional/extension/spmm.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ Tensor spmm_forward_cpu(const SparseTensor &sparse, const Tensor &input_) {
133133
Tensor col_ind = std::get<1>(csr);
134134
Tensor value = std::get<2>(csr);
135135

136-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
136+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "spmm_forward_cpu", [&] {
137137
spmm_forward_out_cpu<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>(
138138
row_ptr.data_ptr<int64_t>(),
139139
col_ind.data_ptr<int64_t>(),
@@ -174,7 +174,7 @@ std::tuple<SparseTensor, Tensor> spmm_backward_cpu(
174174
Tensor value = std::get<2>(csr).contiguous();
175175
std::vector<std::mutex> mutex(input.numel());
176176

177-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
177+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "spmm_backward_cpu", [&] {
178178
spmm_backward_out_cpu<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>(
179179
row_ptr.data_ptr<int64_t>(),
180180
col_ind.data_ptr<int64_t>(),

torchdrug/layers/functional/extension/spmm.cu

+3-3
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ Tensor spmm_forward_cuda(const SparseTensor &sparse, const Tensor &input_) {
216216
const int row_per_block = kThreadPerBlock / dim_per_block;
217217
const int num_row_block = (num_row + row_per_block - 1) / row_per_block;
218218

219-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
219+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "spmm_forward_cuda", [&] {
220220
const int memory_size = kThreadPerBlock * (sizeof(int64_t) + sizeof(scalar_t));
221221
spmm_forward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>
222222
<<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>(
@@ -267,7 +267,7 @@ std::tuple<SparseTensor, Tensor> spmm_backward_cuda(
267267
const int num_row_block = (num_row + row_per_block - 1) / row_per_block;
268268

269269
if (sparse.requires_grad())
270-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
270+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "spmm_backward_cuda", [&] {
271271
const int memory_size = kThreadPerBlock * (sizeof(int64_t) + sizeof(scalar_t));
272272
spmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>
273273
<<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>(
@@ -283,7 +283,7 @@ std::tuple<SparseTensor, Tensor> spmm_backward_cuda(
283283
);
284284
});
285285
else
286-
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
286+
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "spmm_backward_cuda", [&] {
287287
const int memory_size = kThreadPerBlock * (sizeof(int64_t) + sizeof(scalar_t));
288288
spmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>
289289
<<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>(

0 commit comments

Comments
 (0)