Skip to content

Commit 0f6efd8

Browse files
committed
path
1 parent c92a7c2 commit 0f6efd8

File tree

17 files changed

+279
-339
lines changed

17 files changed

+279
-339
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,8 @@ build/
66
data/
77
docs/src/
88
docs/html/
9+
encoding/lib/
910
encoding/_ext/
1011
encoding.egg-info/
12+
experiments/recognition/
13+
experiments/segmentation/

LICENSE

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
99
copies of the Software, and to permit persons to whom the Software is
1010
furnished to do so, subject to the following conditions:
1111

12-
The above copyright notice and this permission notice shall be included in all
13-
copies or substantial portions of the Software.
12+
The above copyright notice and this permission notice shall be included in
13+
all copies or substantial portions of the Software.
1414

1515
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1616
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

build.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
lib_path = os.path.join(os.path.dirname(torch.__file__), 'lib')
1818
cwd = os.path.dirname(os.path.realpath(__file__))
19+
encoding_lib_path = os.path.join(cwd, "encoding", "lib")
1920

2021
# clean the build files
2122
clean_cmd = ['bash', 'clean.sh']
@@ -26,11 +27,12 @@
2627
if platform.system() == 'Darwin':
2728
os.environ['TH_LIBRARIES'] = os.path.join(lib_path,'libTH.1.dylib')
2829
os.environ['THC_LIBRARIES'] = os.path.join(lib_path,'libTHC.1.dylib')
29-
ENCODING_LIB = os.path.join(lib_path, 'libENCODING.dylib')
30+
ENCODING_LIB = os.path.join(cwd, 'encoding/lib/libENCODING.dylib')
31+
3032
else:
3133
os.environ['TH_LIBRARIES'] = os.path.join(lib_path,'libTH.so.1')
3234
os.environ['THC_LIBRARIES'] = os.path.join(lib_path,'libTHC.so.1')
33-
ENCODING_LIB = os.path.join(lib_path, 'libENCODING.so')
35+
ENCODING_LIB = os.path.join(cwd, 'encoding/lib/libENCODING.so')
3436

3537
build_all_cmd = ['bash', 'encoding/make.sh']
3638
subprocess.check_call(build_all_cmd, env=dict(os.environ))
@@ -44,7 +46,7 @@
4446
with_cuda = True
4547

4648
include_path = [os.path.join(lib_path, 'include'),
47-
os.path.join(lib_path,'include/ENCODING'),
49+
os.path.join(cwd,'encoding/kernel'),
4850
os.path.join(cwd,'encoding/kernel/include'),
4951
os.path.join(cwd,'encoding/src/')]
5052

@@ -65,6 +67,7 @@ def make_relative_rpath(path):
6567
include_dirs = include_path,
6668
extra_link_args = [
6769
make_relative_rpath(lib_path),
70+
make_relative_rpath(encoding_lib_path),
6871
ENCODING_LIB,
6972
],
7073
)

clean.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
#!/usr/bin/env bash
22

3-
rm -rf build/ dist/ encoding.egg-info/ encoding/build/ encoding/_ext/ __pycache__ encoding/__pycache__
3+
rm -rf build/ dist/ encoding.egg-info/ encoding/lib/ encoding/_ext/ __pycache__ encoding/__pycache__

docs/source/dilated.rst

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,13 @@
44
Dilated Networks
55
================
66

7-
We provide correct dilated pre-trained ResNet and DenseNet for semantic segmentation.
8-
For dilation of ResNet, we replace the stride of 2 Conv3x3 at begining of certain stage and update the dilation of the conv layers afterwards.
9-
For dilation of DenseNet, we provide :class:`encoding.nn.DilatedAvgPool2d` that handles the dilation of the transition layers, then update the dilation of the conv layers afterwards.
7+
We provide correct dilated pre-trained ResNet and DenseNet (stride of 8) for semantic segmentation.
8+
For dilation of DenseNet, we provide :class:`encoding.nn.DilatedAvgPool2d`.
109
All provided models have been verified.
1110

11+
.. note::
12+
13+
This code is provided together with the paper (coming soon), please cite our work.
1214

1315
.. automodule:: encoding.dilated
1416
.. currentmodule:: encoding.dilated

docs/source/notes/compile.rst

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,7 @@ Installing PyTorch-Encoding
55
Install from Source
66
-------------------
77

8-
* Please follow the `PyTorch instructions <https://github.com/pytorch/pytorch#from-source>`_ to install PyTorch from Source to the ``$HOME`` directory (recommended). Or you can simply clone a copy to ``$HOME`` directory::
9-
10-
git clone https://github.com/pytorch/pytorch $HOME/pytorch
8+
* Install PyTorch from Source (recommended). Please follow the `PyTorch instructions <https://github.com/pytorch/pytorch#from-source>`_.
119

1210
* Install this package
1311

docs/source/utils.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,10 @@ Useful util functions.
99
.. automodule:: encoding.utils
1010
.. currentmodule:: encoding.utils
1111

12-
:hidden:`CosLR_Scheduler`
12+
:hidden:`LR_Scheduler`
1313
~~~~~~~~~~~~~~~~~~~~~~~~~
1414

15-
.. autoclass:: CosLR_Scheduler
15+
.. autoclass:: LR_Scheduler
1616
:members:
1717

1818
:hidden:`get_optimizer`

encoding/CMakeLists.txt

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,4 @@ IF(ENCODING_SO_VERSION)
7575
SOVERSION ${ENCODING_SO_VERSION})
7676
ENDIF(ENCODING_SO_VERSION)
7777

78-
FILE(GLOB kernel-header kernel/generic/*.h)
79-
FILE(GLOB src-header src/generic/*.h)
80-
81-
INSTALL(TARGETS ENCODING LIBRARY DESTINATION ${ENCODING_INSTALL_LIB_SUBDIR})
82-
INSTALL(FILES kernel/thc_encoding.h DESTINATION "${ENCODING_INSTALL_INCLUDE_SUBDIR}/ENCODING")
83-
INSTALL(FILES ${src-header} ${kernel-header} DESTINATION "${ENCODING_INSTALL_INCLUDE_SUBDIR}/ENCODING/generic")
78+
#INSTALL(TARGETS ENCODING LIBRARY DESTINATION ${ENCODING_INSTALL_LIB_SUBDIR})

encoding/functions/syncbn.py

Lines changed: 9 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,11 @@ def sum_square(input):
6161
return _sum_square()(input)
6262

6363

64-
class _batchnormtrain(Function):
64+
class _batchnorm(Function):
65+
def __init__(self, training=False):
66+
super(_batchnorm, self).__init__()
67+
self.training = training
68+
6569
def forward(ctx, input, gamma, beta, mean, std):
6670
ctx.save_for_backward(input, gamma, beta, mean, std)
6771
assert(input.dim()==3)
@@ -95,13 +99,13 @@ def backward(ctx, gradOutput):
9599
encoding_lib.Encoding_Float_batchnorm_Backward(
96100
gradOutput, input, gradInput, gradGamma, gradBeta,
97101
mean, invstd, gamma, beta, gradMean, gradStd,
98-
True)
102+
self.training)
99103
elif isinstance(input, torch.cuda.DoubleTensor):
100104
with torch.cuda.device_of(input):
101105
encoding_lib.Encoding_Double_batchnorm_Backward(
102106
gradOutput, input, gradInput, gradGamma, gradBeta,
103107
mean, invstd, gamma, beta, gradMean, gradStd,
104-
True)
108+
self.training)
105109
else:
106110
raise RuntimeError('Unimplemented data type!')
107111
return gradInput, gradGamma, gradBeta, gradMean, gradStd
@@ -122,52 +126,7 @@ def batchnormtrain(input, gamma, beta, mean, std):
122126
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
123127
124128
"""
125-
return _batchnormtrain()(input, gamma, beta, mean, std)
126-
127-
128-
class _batchnormeval(Function):
129-
def forward(ctx, input, gamma, beta, mean, std):
130-
ctx.save_for_backward(input, gamma, beta, mean, std)
131-
assert(input.dim()==3)
132-
with torch.cuda.device_of(input):
133-
invstd = 1.0 / std
134-
output = input.new().resize_as_(input)
135-
if isinstance(input, torch.cuda.FloatTensor):
136-
with torch.cuda.device_of(input):
137-
encoding_lib.Encoding_Float_batchnorm_Forward(output,
138-
input, mean, invstd, gamma, beta)
139-
elif isinstance(input, torch.cuda.DoubleTensor):
140-
with torch.cuda.device_of(input):
141-
encoding_lib.Encoding_Double_batchnorm_Forward(output,
142-
input, mean, invstd, gamma, beta)
143-
else:
144-
raise RuntimeError('Unimplemented data type!')
145-
return output
146-
147-
def backward(ctx, gradOutput):
148-
input, gamma, beta, mean, std = ctx.saved_tensors
149-
invstd = 1.0 / std
150-
with torch.cuda.device_of(input):
151-
gradInput = gradOutput.new().resize_as_(input).zero_()
152-
gradGamma = gradOutput.new().resize_as_(gamma).zero_()
153-
gradBeta = gradOutput.new().resize_as_(beta).zero_()
154-
gradMean = gradOutput.new().resize_as_(mean).zero_()
155-
gradStd = gradOutput.new().resize_as_(std).zero_()
156-
if isinstance(input, torch.cuda.FloatTensor):
157-
with torch.cuda.device_of(input):
158-
encoding_lib.Encoding_Float_batchnorm_Backward(
159-
gradOutput, input, gradInput, gradGamma, gradBeta,
160-
mean, invstd, gamma, beta, gradMean, gradStd,
161-
False)
162-
elif isinstance(input, torch.cuda.DoubleTensor):
163-
with torch.cuda.device_of(input):
164-
encoding_lib.Encoding_Double_batchnorm_Backward(
165-
gradOutput, input, gradInput, gradGamma, gradBeta,
166-
mean, invstd, gamma, beta, gradMean, gradStd,
167-
False)
168-
else:
169-
raise RuntimeError('Unimplemented data type!')
170-
return gradInput, gradGamma, gradBeta, gradMean, gradStd
129+
return _batchnorm(True)(input, gamma, beta, mean, std)
171130

172131

173132
def batchnormeval(input, gamma, beta, mean, std):
@@ -176,4 +135,4 @@ def batchnormeval(input, gamma, beta, mean, std):
176135
177136
Please see encoding.batchnormtrain_
178137
"""
179-
return _batchnormeval()(input, gamma, beta, mean, std)
138+
return _batchnorm(False)(input, gamma, beta, mean, std)

encoding/make.sh

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#!/usr/bin/env bash
22

3-
mkdir -p encoding/build && cd encoding/build
3+
mkdir -p encoding/lib && cd encoding/lib
44
# compile and install
55
cmake ..
6-
make install
7-
cd ..
6+
make

0 commit comments

Comments
 (0)