Skip to content

Commit 837ff50

Browse files
committed
Merge pull request #177 from aykuznetsova/add-python2.6-compatibility
Add python2.6 compatibility
2 parents 7e0e7b1 + 16bfa99 commit 837ff50

18 files changed

+68
-57
lines changed

.travis.yml

+5
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# Based on https://github.com/Jorge-C/ordination/blob/master/.travis.yml
22
language: python
33
python:
4+
- "2.6"
45
- "2.7"
56
- "3.3"
67
notifications:
@@ -28,11 +29,15 @@ matrix:
2829
env: UPDATE_PYENV='pip install pydap'
2930
- python: "2.7"
3031
env: UPDATE_PYENV=''
32+
- python: "2.6"
33+
env: UPDATE_PYENV=''
3134

3235
# Install packages
3336
install:
3437
- conda create --yes -n test_env python=$TRAVIS_PYTHON_VERSION pip nose mock numpy pandas scipy netCDF4
3538
- source activate test_env
39+
# install unittest2 ONLY if running 2.6
40+
- if [ ${TRAVIS_PYTHON_VERSION:0:3} == "2.6" ]; then pip install unittest2; fi
3641
- echo $UPDATE_PYENV; $UPDATE_PYENV
3742
- python setup.py install
3843
# Run test

setup.py

+3
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
'Intended Audience :: Science/Research',
2929
'Programming Language :: Python',
3030
'Programming Language :: Python :: 2',
31+
'Programming Language :: Python :: 2.6',
3132
'Programming Language :: Python :: 2.7',
3233
'Programming Language :: Python :: 3',
3334
'Programming Language :: Python :: 3.3',
@@ -37,6 +38,8 @@
3738
INSTALL_REQUIRES = ['numpy >= 1.7', 'pandas >= 0.13.1']
3839
TESTS_REQUIRE = ['nose >= 1.0']
3940

41+
if sys.version_info[:2] < (2, 7):
42+
TESTS_REQUIRE += ["unittest2 == 0.5.1"]
4043

4144
DESCRIPTION = "Extended arrays for working with scientific datasets in Python"
4245
LONG_DESCRIPTION = """

test/__init__.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,15 @@
1-
import unittest
2-
31
import numpy as np
42
from numpy.testing import assert_array_equal
53

64
from xray import utils, DataArray
75
from xray.variable import as_variable
86
from xray.pycompat import PY3
97

8+
try:
9+
import unittest2 as unittest
10+
except ImportError:
11+
import unittest
12+
1013
try:
1114
import scipy
1215
has_scipy = True

test/test_backends.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -239,8 +239,8 @@ def test_open_encodings(self):
239239
actual = open_dataset(tmp_file)
240240

241241
self.assertVariableEqual(actual['time'], expected['time'])
242-
actual_encoding = {k: v for k, v in iteritems(actual['time'].encoding)
243-
if k in expected['time'].encoding}
242+
actual_encoding = dict((k, v) for k, v in iteritems(actual['time'].encoding)
243+
if k in expected['time'].encoding)
244244
self.assertDictEqual(actual_encoding, expected['time'].encoding)
245245

246246
def test_open_group(self):

test/test_data_array.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,9 @@
22
import pandas as pd
33
from copy import deepcopy
44
from textwrap import dedent
5-
from collections import OrderedDict
65

76
from xray import Dataset, DataArray, Index, Variable, align
8-
from xray.pycompat import iteritems
7+
from xray.pycompat import iteritems, OrderedDict
98
from . import TestCase, ReturnItem, source_ndarray
109

1110

test/test_dataset.py

+11-12
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from collections import OrderedDict
21
from copy import copy, deepcopy
32
from textwrap import dedent
43
try:
@@ -11,7 +10,7 @@
1110

1211
from xray import (Dataset, DataArray, Index, Variable,
1312
backends, utils, align, indexing)
14-
from xray.pycompat import iteritems
13+
from xray.pycompat import iteritems, OrderedDict
1514

1615
from . import TestCase
1716

@@ -164,7 +163,7 @@ def test_indexes_properties(self):
164163

165164
self.assertEquals(2, len(data.indexes))
166165

167-
self.assertEquals({'x', 'y'}, set(data.indexes))
166+
self.assertEquals(set(['x', 'y']), set(data.indexes))
168167

169168
self.assertVariableIdentical(data.indexes['x'], data['x'].variable)
170169
self.assertVariableIdentical(data.indexes['y'], data['y'].variable)
@@ -367,11 +366,11 @@ def test_drop_vars(self):
367366

368367
self.assertEqual(data, data.drop_vars())
369368

370-
expected = Dataset({k: data[k] for k in data if k != 'time'})
369+
expected = Dataset(dict((k, data[k]) for k in data if k != 'time'))
371370
actual = data.drop_vars('time')
372371
self.assertEqual(expected, actual)
373372

374-
expected = Dataset({k: data[k] for k in ['dim2', 'dim3', 'time']})
373+
expected = Dataset(dict((k, data[k]) for k in ['dim2', 'dim3', 'time']))
375374
actual = data.drop_vars('dim1')
376375
self.assertEqual(expected, actual)
377376

@@ -510,20 +509,20 @@ def test_setitem(self):
510509

511510
def test_delitem(self):
512511
data = create_test_data()
513-
all_items = {'time', 'dim1', 'dim2', 'dim3', 'var1', 'var2', 'var3'}
512+
all_items = set(['time', 'dim1', 'dim2', 'dim3', 'var1', 'var2', 'var3'])
514513
self.assertItemsEqual(data, all_items)
515514
del data['var1']
516-
self.assertItemsEqual(data, all_items - {'var1'})
515+
self.assertItemsEqual(data, all_items - set(['var1']))
517516
del data['dim1']
518-
self.assertItemsEqual(data, {'time', 'dim2', 'dim3'})
517+
self.assertItemsEqual(data, set(['time', 'dim2', 'dim3']))
519518

520519
def test_squeeze(self):
521520
data = Dataset({'foo': (['x', 'y', 'z'], [[[1], [2]]])})
522521
for args in [[], [['x']], [['x', 'z']]]:
523522
def get_args(v):
524523
return [set(args[0]) & set(v.dimensions)] if args else []
525-
expected = Dataset({k: v.squeeze(*get_args(v))
526-
for k, v in iteritems(data.variables)})
524+
expected = Dataset(dict((k, v.squeeze(*get_args(v)))
525+
for k, v in iteritems(data.variables)))
527526
self.assertDatasetIdentical(expected, data.squeeze(*args))
528527
# invalid squeeze
529528
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
@@ -595,8 +594,8 @@ def test_concat(self):
595594
def rectify_dim_order(dataset):
596595
# return a new dataset with all variable dimensions tranposed into
597596
# the order in which they are found in `data`
598-
return Dataset({k: v.transpose(*data[k].dimensions)
599-
for k, v in iteritems(dataset.variables)},
597+
return Dataset(dict((k, v.transpose(*data[k].dimensions))
598+
for k, v in iteritems(dataset.variables)),
600599
dataset.attrs)
601600

602601
for dim in ['dim1', 'dim2', 'dim3']:

test/test_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
from collections import OrderedDict
21
import numpy as np
32
import pandas as pd
43

54
from xray import utils
5+
from xray.pycompat import OrderedDict
66
from . import TestCase
77

88

test/test_variable.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from collections import namedtuple, OrderedDict
1+
from collections import namedtuple
22
from copy import copy, deepcopy
33
from datetime import datetime
44
from textwrap import dedent
@@ -9,7 +9,7 @@
99
from xray import Variable, Dataset, DataArray, indexing
1010
from xray.variable import (Index, as_variable, NumpyArrayAdapter,
1111
PandasIndexAdapter, _as_compatible_data)
12-
from xray.pycompat import PY3
12+
from xray.pycompat import PY3, OrderedDict
1313

1414
from . import TestCase, source_ndarray
1515

xray/backends/memory.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from collections import OrderedDict
1+
from xray.pycompat import OrderedDict
22

33
from .common import AbstractWritableDataStore
44

xray/backends/netCDF4_.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from collections import OrderedDict
21
import warnings
32

43
import numpy as np
@@ -9,7 +8,7 @@
98
from xray.conventions import encode_cf_variable
109
from xray.utils import FrozenOrderedDict, NDArrayMixin
1110
from xray import indexing
12-
from xray.pycompat import iteritems, basestring, bytes_type
11+
from xray.pycompat import iteritems, basestring, bytes_type, OrderedDict
1312

1413

1514
class NetCDF4ArrayWrapper(NDArrayMixin):

xray/backends/scipy_.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from collections import OrderedDict
21
try: # Python 2
32
from cStringIO import StringIO as BytesIO
43
except ImportError: # Python 3
@@ -9,7 +8,7 @@
98
import xray
109
from xray.backends.common import AbstractWritableDataStore
1110
from xray.utils import Frozen
12-
from xray.pycompat import iteritems, basestring, unicode_type
11+
from xray.pycompat import iteritems, basestring, unicode_type, OrderedDict
1312

1413
from .. import conventions
1514
from .netcdf3 import is_valid_nc3_name, coerce_nc3_dtype, encode_nc3_variable

xray/conventions.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
11
import numpy as np
22
import pandas as pd
33
import warnings
4-
from collections import defaultdict, OrderedDict
4+
from collections import defaultdict
55
from datetime import datetime
66

77
from . import indexing
88
from . import utils
9-
from .pycompat import iteritems, bytes_type, unicode_type
9+
from .pycompat import iteritems, bytes_type, unicode_type, OrderedDict
1010
import xray
1111

1212
# standard calendars recognized by netcdftime
13-
_STANDARD_CALENDARS = {'standard', 'gregorian', 'proleptic_gregorian'}
13+
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
1414

1515

1616
def mask_and_scale(array, fill_value=None, scale_factor=None, add_offset=None,

xray/data_array.py

+9-7
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import functools
22
import operator
33
import warnings
4-
from collections import defaultdict, OrderedDict
4+
from collections import defaultdict
55

66
import numpy as np
77
import pandas as pd
@@ -14,7 +14,7 @@
1414
from . import variable
1515
from .common import AbstractArray, AbstractIndexes
1616
from .utils import multi_index_from_product
17-
from .pycompat import iteritems, basestring
17+
from .pycompat import iteritems, basestring, OrderedDict
1818

1919

2020
def _is_dict_like(value):
@@ -639,8 +639,8 @@ def reduce(self, func, dimension=None, axis=None, keep_attrs=False,
639639
# For now, take an aggressive strategy of removing all variables
640640
# associated with any dropped dimensions
641641
# TODO: save some summary (mean? bounds?) of dropped variables
642-
drop |= {k for k, v in iteritems(self.dataset.variables)
643-
if any(dim in drop for dim in v.dimensions)}
642+
drop |= set(k for k, v in iteritems(self.dataset.variables)
643+
if any(dim in drop for dim in v.dimensions))
644644
ds = self.dataset.drop_vars(*drop)
645645
ds[self.name] = var
646646

@@ -702,7 +702,9 @@ def concat(cls, arrays, dimension='concat_dimension', indexers=None,
702702
datasets.append(arr.dataset)
703703
if concat_over is None:
704704
concat_over = set()
705-
concat_over = set(concat_over) | {name}
705+
elif isinstance(concat_over, basestring):
706+
concat_over = set([concat_over])
707+
concat_over = set(concat_over) | set([name])
706708
ds = xray.Dataset.concat(datasets, dimension, indexers,
707709
concat_over=concat_over)
708710
return ds[name]
@@ -897,7 +899,7 @@ def align(*objects, **kwargs):
897899

898900
# Exclude dimensions with all equal indices to avoid unnecessary reindexing
899901
# work.
900-
joined_indexes = {k: join_indices(v) for k, v in iteritems(all_indexes)
901-
if any(not v[0].equals(idx) for idx in v[1:])}
902+
joined_indexes = dict((k, join_indices(v)) for k, v in iteritems(all_indexes)
903+
if any(not v[0].equals(idx) for idx in v[1:]))
902904

903905
return tuple(obj.reindex(copy=copy, **joined_indexes) for obj in objects)

xray/dataset.py

+12-13
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from cStringIO import StringIO as BytesIO
66
except ImportError: # Python 3
77
from io import BytesIO
8-
from collections import OrderedDict, Mapping
8+
from collections import Mapping
99

1010
from . import backends
1111
from . import conventions
@@ -18,7 +18,7 @@
1818
from . import ops
1919
from .utils import (FrozenOrderedDict, Frozen, SortedKeysDict, ChainMap,
2020
multi_index_from_product)
21-
from .pycompat import iteritems, itervalues, basestring
21+
from .pycompat import iteritems, itervalues, basestring, OrderedDict
2222

2323

2424
def open_dataset(nc, decode_cf=True, mask_and_scale=True, decode_times=True,
@@ -623,13 +623,12 @@ def isel(self, **indexers):
623623
raise ValueError("dimensions %r do not exist" % invalid)
624624

625625
# all indexers should be int, slice or np.ndarrays
626-
indexers = {k: np.asarray(v) if not isinstance(v, (int, np.integer, slice)) else v
627-
for k, v in iteritems(indexers)}
626+
indexers = dict((k, np.asarray(v) if not isinstance(v, (int, np.integer, slice)) else v)
627+
for k, v in iteritems(indexers))
628628

629629
variables = OrderedDict()
630630
for name, var in iteritems(self.variables):
631-
var_indexers = {k: v for k, v in iteritems(indexers)
632-
if k in var.dimensions}
631+
var_indexers = dict((k, v) for k, v in iteritems(indexers) if k in var.dimensions)
633632
variables[name] = var.isel(**var_indexers)
634633
return type(self)(variables, self.attrs)
635634

@@ -923,11 +922,11 @@ def merge(self, other, inplace=False, overwrite_vars=set(),
923922
potential_conflicts = self.variables
924923
else:
925924
if isinstance(overwrite_vars, basestring):
926-
overwrite_vars = {overwrite_vars}
925+
overwrite_vars = set([overwrite_vars])
927926
else:
928927
overwrite_vars = set(overwrite_vars)
929-
potential_conflicts = {k: v for k, v in iteritems(self.variables)
930-
if k not in overwrite_vars}
928+
potential_conflicts = dict((k, v) for k, v in iteritems(self.variables)
929+
if k not in overwrite_vars)
931930

932931
# update variables
933932
new_variables = _expand_variables(other_variables, potential_conflicts,
@@ -975,8 +974,8 @@ def drop_vars(self, *names):
975974
raise ValueError('One or more of the specified variable '
976975
'names does not exist in this dataset')
977976
drop = set(names)
978-
drop |= {k for k, v in iteritems(self.variables)
979-
if any(name in v.dimensions for name in names)}
977+
drop |= set(k for k, v in iteritems(self.variables)
978+
if any(name in v.dimensions for name in names))
980979
variables = OrderedDict((k, v) for k, v in iteritems(self.variables)
981980
if k not in drop)
982981
return type(self)(variables, self.attrs)
@@ -1150,7 +1149,7 @@ def concat(cls, datasets, dimension='concat_dimension', indexers=None,
11501149
if concat_over is None:
11511150
concat_over = set()
11521151
elif isinstance(concat_over, basestring):
1153-
concat_over = {concat_over}
1152+
concat_over = set([concat_over])
11541153
else:
11551154
concat_over = set(concat_over)
11561155

@@ -1180,7 +1179,7 @@ def differs(vname, v):
11801179
% (concat_over, datasets[0]))
11811180

11821181
# automatically concatenate over variables along the dimension
1183-
auto_concat_dims = {dim_name}
1182+
auto_concat_dims = set([dim_name])
11841183
if hasattr(dimension, 'dimensions'):
11851184
auto_concat_dims |= set(dimension.dimensions)
11861185
for k, v in iteritems(datasets[0]):

xray/indexing.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -129,9 +129,8 @@ def remap_label_indexers(data_obj, indexers):
129129
"""Given an xray data object and label based indexers, return a mapping
130130
of equivalent location based indexers.
131131
"""
132-
return {dim: convert_label_indexer(data_obj.indexes[dim], label, dim)
133-
for dim, label in iteritems(indexers)}
134-
132+
return dict((dim, convert_label_indexer(data_obj.indexes[dim], label, dim))
133+
for dim, label in iteritems(indexers))
135134

136135
def _expand_slice(slice_, size):
137136
return np.arange(*slice_.indices(size))

xray/pycompat.py

+5
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ def iteritems(d):
1111
def itervalues(d):
1212
return iter(d.values())
1313
xrange = range
14+
from collections import OrderedDict
1415
else:
1516
# Python 2
1617
basestring = basestring
@@ -21,3 +22,7 @@ def iteritems(d):
2122
def itervalues(d):
2223
return d.itervalues()
2324
xrange = xrange
25+
try:
26+
from collections import OrderedDict
27+
except ImportError:
28+
from ordereddict import OrderedDict

0 commit comments

Comments
 (0)