Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 9 additions & 10 deletions pylearn2/datasets/hdf5.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import tables
except ImportError:
tables = None
import numpy as np
import warnings
from os.path import isfile
from pylearn2.compat import OrderedDict
Expand Down Expand Up @@ -86,9 +87,7 @@ def __new__(cls, filename, X=None, topo_view=None, y=None, load_all=False,
return HDF5DatasetDeprecated(filename, X, topo_view, y, load_all,
cache_size, **kwargs)
else:
return super(HDF5Dataset, cls).__new__(
cls, filename, sources, spaces, aliases, load_all, cache_size,
use_h5py, **kwargs)
return super(HDF5Dataset, cls).__new__(cls)

def __init__(self, filename, sources, spaces, aliases=None, load_all=False,
cache_size=None, use_h5py='auto', **kwargs):
Expand Down Expand Up @@ -204,7 +203,7 @@ def iterator(self, mode=None, data_specs=None, batch_size=None,
provided when the dataset object has been created will be used.
"""
if data_specs is None:
data_specs = (self._get_sources, self._get_spaces)
data_specs = (self._get_spaces(), self._get_sources())

[mode, batch_size, num_batches, rng, data_specs] = self._init_iterator(
mode, batch_size, num_batches, rng, data_specs)
Expand Down Expand Up @@ -240,7 +239,7 @@ def _get_spaces(self):
-------
A Space or a list of Spaces.
"""
space = [self.spaces[s] for s in self._get_sources]
space = [self.spaces[s] for s in self._get_sources()]
return space[0] if len(space) == 1 else tuple(space)

def get_data_specs(self, source_or_alias=None):
Expand Down Expand Up @@ -310,16 +309,16 @@ def get(self, sources, indexes):
sources[s], *e.args))
if (isinstance(indexes, (slice, py_integer_types)) or
len(indexes) == 1):
rval.append(sdata[indexes])
val = sdata[indexes]
else:
warnings.warn('Accessing non sequential elements of an '
'HDF5 file will be at best VERY slow. Avoid '
'using iteration schemes that access '
'random/shuffled data with hdf5 datasets!!')
val = []
[val.append(sdata[idx]) for idx in indexes]
rval.append(val)
return tuple(rval)
val = [sdata[idx] for idx in indexes]
val = tuple(tuple(row) for row in val)
rval.append(val)
return tuple(np.array(v) for v in rval)

@wraps(Dataset.get_num_examples, assigned=(), updated=())
def get_num_examples(self, source_or_alias=None):
Expand Down