Skip to content

hdf_io

Reads neurons from HDF5 files.

Source code in navis/io/hdf_io.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
class BaseH5Reader(ABC):
    """Reads neurons from HDF5 files."""

    def __init__(self, filepath: str):
        """Initialize."""
        self.filepath = filepath

    def __enter__(self):
        """Open file on enter."""
        self.f = h5py.File(self.filepath, 'r')
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        """Close file on exit."""
        self.f.close()

    def check_compatible(self):
        """Check if this reader is compatible with existing data."""
        fmt = self.f.attrs.get('format_spec')
        if not isinstance(fmt, str):
            raise ValueError('No format specifier found for file '
                             f'{self.filepath}')
        elif fmt != f'hnf_v{self.version}':
            raise ValueError('Unexpected format specifier for file '
                             f'{self.filepath}: "{fmt}"')

    @abstractmethod
    def list_neurons(self):
        """List neurons in file."""
        pass

    @abstractmethod
    def read_neurons(self):
        """Read neurons from file."""
        pass

    def read_dataframe(self, group, subset=None, exclude=None,
                       include_attrs=False, skip_hidden=True):
        """Read dataset within a group into a single pandas DataFrame."""
        # List datasets contained in this group
        datasets = [k for k, v in group.items() if isinstance(v, h5py.Dataset)]

        if isinstance(subset, type(None)):
            subset = datasets
        else:
            subset = utils.make_iterable(subset)

        if isinstance(exclude, type(None)):
            exclude = []
        else:
            exclude = utils.make_iterable(exclude)

        # Read the datasets
        df = pd.DataFrame()
        for col in datasets:
            if col not in subset or col in exclude:
                continue
            # Skip hidden datasets
            if col.startswith('.') and skip_hidden:
                continue

            # This is the dataset
            ds = group[col]

            # Easy-peasy if dataset has 1 dimension
            if ds.ndim == 1:
                df[col] = ds[:]
            # 2-dimensions should not happen but we will simply enumerate cols
            elif ds.ndim == 2:
                for i in range(ds.shape[1]):
                    df[f'{col}_{i}'] = ds[:, i]
            else:
                raise ValueError(f'Dataset {col} has more than two dimensions.')

        if include_attrs:
            for k in group.attrs:
                df.attrs[k] = group.attrs[k]

        return df

Initialize.

Source code in navis/io/hdf_io.py
33
34
35
def __init__(self, filepath: str):
    """Initialize."""
    self.filepath = filepath

Check if this reader is compatible with existing data.

Source code in navis/io/hdf_io.py
46
47
48
49
50
51
52
53
54
def check_compatible(self):
    """Check if this reader is compatible with existing data."""
    fmt = self.f.attrs.get('format_spec')
    if not isinstance(fmt, str):
        raise ValueError('No format specifier found for file '
                         f'{self.filepath}')
    elif fmt != f'hnf_v{self.version}':
        raise ValueError('Unexpected format specifier for file '
                         f'{self.filepath}: "{fmt}"')

List neurons in file.

Source code in navis/io/hdf_io.py
56
57
58
59
@abstractmethod
def list_neurons(self):
    """List neurons in file."""
    pass

Read dataset within a group into a single pandas DataFrame.

Source code in navis/io/hdf_io.py
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
def read_dataframe(self, group, subset=None, exclude=None,
                   include_attrs=False, skip_hidden=True):
    """Read dataset within a group into a single pandas DataFrame."""
    # List datasets contained in this group
    datasets = [k for k, v in group.items() if isinstance(v, h5py.Dataset)]

    if isinstance(subset, type(None)):
        subset = datasets
    else:
        subset = utils.make_iterable(subset)

    if isinstance(exclude, type(None)):
        exclude = []
    else:
        exclude = utils.make_iterable(exclude)

    # Read the datasets
    df = pd.DataFrame()
    for col in datasets:
        if col not in subset or col in exclude:
            continue
        # Skip hidden datasets
        if col.startswith('.') and skip_hidden:
            continue

        # This is the dataset
        ds = group[col]

        # Easy-peasy if dataset has 1 dimension
        if ds.ndim == 1:
            df[col] = ds[:]
        # 2-dimensions should not happen but we will simply enumerate cols
        elif ds.ndim == 2:
            for i in range(ds.shape[1]):
                df[f'{col}_{i}'] = ds[:, i]
        else:
            raise ValueError(f'Dataset {col} has more than two dimensions.')

    if include_attrs:
        for k in group.attrs:
            df.attrs[k] = group.attrs[k]

    return df

Read neurons from file.

Source code in navis/io/hdf_io.py
61
62
63
64
@abstractmethod
def read_neurons(self):
    """Read neurons from file."""
    pass

Writes neurons to HDF5 files.

Source code in navis/io/hdf_io.py
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
class BaseH5Writer(ABC):
    """Writes neurons to HDF5 files."""

    version = None

    def __init__(self, filepath: str,  mode='a', **kwargs):
        """Initialize.

        Parameters
        ----------
        filepath :      str
                        Path to HDF5 file.
        mode :          "a" | "w" |
                        Mode in which to open H5 file::

                            w	Create file, truncate if exists
                            a	Read/write if exists, create otherwise

        **kwargs
                        Passed to `h5py.File`.
        """
        self.filepath = filepath
        self.mode = mode
        self.kwargs = kwargs

    def __enter__(self):
        """Open file on enter."""
        self.f = h5py.File(self.filepath, self.mode, **self.kwargs)
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        """Close file on exit."""
        self.f.close()

    def check_compatible(self):
        """Check if this writer is compatible with existing data."""
        fmt = self.f.attrs.get('format_spec')
        if fmt:
            if not fmt.startswith('hnf_'):
                raise ValueError('Unexpected format specifier for file '
                                 f'{self.filepath}: "{fmt}"')

            ver = fmt.split('_')[-1]
            if ver != 'v1':
                raise ValueError(f'File {self.filepath} appears to contain '
                                 f'data from an incompatible version: "{ver}"')

    def write_dataframe(self, df, group, subset=None, exclude=None, overwrite=False):
        """Write dataframe to group."""
        assert isinstance(df, pd.DataFrame)

        if isinstance(subset, type(None)):
            subset = df.columns
        else:
            subset = utils.make_iterable(subset)

        if isinstance(exclude, type(None)):
            exclude = []
        else:
            exclude = utils.make_iterable(exclude)

        # Remove datasets if it already exists
        for c in df.columns:
            if c not in subset or c in exclude:
                continue

            if c in group:
                if not overwrite:
                    raise ValueError(f'Dataset {c} already exists in group "{group}"')
                del group[c]

            # Convert categoricals
            if isinstance(df[c].dtype, pd.CategoricalDtype):
                data = np.asarray(df[c])
            else:
                data = df[c].values

            # HDF5 does not like numpy strings ("<U4") or object
            if data.dtype.type in (np.str_, np.object_):
                data = data.astype("S")

            group.create_dataset(c, data=data, compression='gzip')

    def write_base_info(self):
        """Add version and url to root path."""
        self.f.attrs['format_spec'] = f'hnf_v{self.version}'
        self.f.attrs['format_url'] = 'https://github.com/navis-org/navis'

    @abstractmethod
    def write_neurons(self, neuron: 'core.NeuronObject'):
        """Write Neuron to file."""
        pass

Initialize.

PARAMETER DESCRIPTION
filepath
        Path to HDF5 file.

TYPE: str

mode
        Mode in which to open H5 file::

            w   Create file, truncate if exists
            a   Read/write if exists, create otherwise

TYPE: "a" | "w" | DEFAULT: 'a'

**kwargs
        Passed to `h5py.File`.

DEFAULT: {}

Source code in navis/io/hdf_io.py
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
def __init__(self, filepath: str,  mode='a', **kwargs):
    """Initialize.

    Parameters
    ----------
    filepath :      str
                    Path to HDF5 file.
    mode :          "a" | "w" |
                    Mode in which to open H5 file::

                        w	Create file, truncate if exists
                        a	Read/write if exists, create otherwise

    **kwargs
                    Passed to `h5py.File`.
    """
    self.filepath = filepath
    self.mode = mode
    self.kwargs = kwargs

Check if this writer is compatible with existing data.

Source code in navis/io/hdf_io.py
447
448
449
450
451
452
453
454
455
456
457
458
def check_compatible(self):
    """Check if this writer is compatible with existing data."""
    fmt = self.f.attrs.get('format_spec')
    if fmt:
        if not fmt.startswith('hnf_'):
            raise ValueError('Unexpected format specifier for file '
                             f'{self.filepath}: "{fmt}"')

        ver = fmt.split('_')[-1]
        if ver != 'v1':
            raise ValueError(f'File {self.filepath} appears to contain '
                             f'data from an incompatible version: "{ver}"')

Add version and url to root path.

Source code in navis/io/hdf_io.py
496
497
498
499
def write_base_info(self):
    """Add version and url to root path."""
    self.f.attrs['format_spec'] = f'hnf_v{self.version}'
    self.f.attrs['format_url'] = 'https://github.com/navis-org/navis'

Write dataframe to group.

Source code in navis/io/hdf_io.py
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
def write_dataframe(self, df, group, subset=None, exclude=None, overwrite=False):
    """Write dataframe to group."""
    assert isinstance(df, pd.DataFrame)

    if isinstance(subset, type(None)):
        subset = df.columns
    else:
        subset = utils.make_iterable(subset)

    if isinstance(exclude, type(None)):
        exclude = []
    else:
        exclude = utils.make_iterable(exclude)

    # Remove datasets if it already exists
    for c in df.columns:
        if c not in subset or c in exclude:
            continue

        if c in group:
            if not overwrite:
                raise ValueError(f'Dataset {c} already exists in group "{group}"')
            del group[c]

        # Convert categoricals
        if isinstance(df[c].dtype, pd.CategoricalDtype):
            data = np.asarray(df[c])
        else:
            data = df[c].values

        # HDF5 does not like numpy strings ("<U4") or object
        if data.dtype.type in (np.str_, np.object_):
            data = data.astype("S")

        group.create_dataset(c, data=data, compression='gzip')

Write Neuron to file.

Source code in navis/io/hdf_io.py
501
502
503
504
@abstractmethod
def write_neurons(self, neuron: 'core.NeuronObject'):
    """Write Neuron to file."""
    pass

Reads neurons from HDF5 files.

Source code in navis/io/hdf_io.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
class H5ReaderV1(BaseH5Reader):
    """Reads neurons from HDF5 files."""

    version = 1

    def list_neurons(self, from_cache=True):
        """List all neurons in file."""
        if from_cache and hasattr(self, 'neurons'):
            return self.neurons

        # Go over all top level groups
        self.neurons = []
        for id, grp in self.f.items():
            # Skip if not a group
            if not isinstance(grp, h5py.Group):
                continue

            self.neurons.append(id)

        return self.neurons

    def read_neurons(self,
                     subset=None,
                     read='mesh->skeleton->dotprops',
                     strict=False,
                     prefer_raw=False,
                     on_error='stop',
                     progress=True,
                     annotations=False,
                     **kwargs):
        """Read neurons from file."""
        assert isinstance(read, str)

        readers = {'mesh': self.read_meshneuron,
                   'skeleton': self.read_treeneuron,
                   'dotprops': self.read_dotprops}

        # If no subset specified, load all neurons
        if isinstance(subset, type(None)):
            subset = self.list_neurons()
        else:
            subset = [id for id in subset if id in self.f]

        neurons = []
        errors = {}

        # We can not simply use disable=True here because of some odd
        # interactions when this is run in multiple processes
        if progress:
            pbar = config.tqdm(desc='Reading',
                               leave=False,
                               disable=config.pbar_hide,
                               total=len(subset))

        try:
            for id in subset:
                # Go over the requested neuron representations
                for rep in read.split(','):
                    # Strip potential whitespaces
                    rep = rep.strip()

                    # Go over priorities
                    for prio in rep.split('->'):
                        # Strip potential whitespaces
                        prio = prio.strip()
                        # If that neuron type is present
                        if prio in self.f[id]:
                            try:
                                # Read neuron
                                read_func = readers[prio]
                                n = read_func(id,
                                              strict=strict,
                                              prefer_raw=prefer_raw,
                                              **kwargs)

                                # Read annotations
                                if not isinstance(annotations, type(None)):
                                    an = self.read_annotations(id, annotations, **kwargs)
                                    for k, v in an.items():
                                        setattr(n, k, v)

                                # Append neuron and break prio loop
                                neurons.append(n)
                            except BaseException as e:
                                errors[id] = str(e)
                                if on_error in ('stop', 'raise'):
                                    raise e
                                elif on_error == 'warn':
                                    warnings.warn(f'Error parsing {prio} for '
                                                  f'neuron {id}: {e}')

                            break

                if progress:
                    pbar.update()
        except BaseException:
            raise
        finally:
            if progress:
                pbar.close()

        return neurons, errors

    def parse_add_attributes(self, grp, base_grp, neuron,
                             subset=None, exclude=['units_nm']):
        """Parse attributes and associate with neuron."""
        if not isinstance(subset, type(None)):
            subset = utils.make_iterable(subset)

        # First parse base (neuron-level) attributes
        attrs = {}
        for k, v in base_grp.attrs.items():
            if not isinstance(subset, type(None)) and k not in subset:
                continue
            elif k in exclude:
                continue
            attrs[k] = v

        # Now parse attributes specific for this representation
        for k, v in grp.attrs.items():
            if not isinstance(subset, type(None)) and k not in subset:
                continue
            elif k in exclude:
                continue
            attrs[k] = v

        # Now add attributes
        for k, v in attrs.items():
            setattr(neuron, k, v)

    def parse_add_datasets(self, grp, neuron, subset=None, exclude=None,
                           skip_hidden=True):
        """Parse datasets and associate with neuron."""
        if not isinstance(subset, type(None)):
            subset = utils.make_iterable(subset)
        if not isinstance(exclude, type(None)):
            exclude = utils.make_iterable(exclude)

        # Now parse attributes specific for this representation
        for k in grp.keys():
            if not isinstance(subset, type(None)) and k not in subset:
                continue
            elif k.startswith('.') and skip_hidden:
                continue
            elif k in exclude:
                continue
            # Do not remove the [:] as it ensures that we get a numpy array
            setattr(neuron, k, grp[k][:])

    def parse_add_units(self, grp, base_grp, neuron):
        """Parse units and associate with neuron."""
        # Check if we have units
        units = grp.attrs.get('units_nm',
                              base_grp.attrs.get('units_nm',
                                                 None))

        if isinstance(units, str):
            # If string, see if pint can parse it
            neuron.units = units
        elif isinstance(units, np.ndarray):
            # Currently navis .units does support x/y/z units
            # We will use only the first entry for now
            neuron.units = f'{units[0]} nm'
        elif not isinstance(units, type(None)):
            neuron.units = f'{units} nm'

    def read_annotations(self, id, annotations, **kwargs):
        """Read annotations for given neuron from file."""
        # Get the group for this neuron
        neuron_grp = self.f[id]

        an_grp = neuron_grp.get('annotations')
        if not an_grp:
            return {}

        if isinstance(annotations, bool):
            annotations = [k for k, grp in an_grp.items() if isinstance(grp, type(h5py.Group))]
        else:
            annotations = utils.make_iterable(annotations)

        parsed_an = {}
        for an in annotations:
            if an in an_grp:
                parsed_an[an] = self.read_dataframe(an_grp[an],
                                                    include_attrs=True)

        return parsed_an

    def read_treeneuron(self, id, strict=False, prefer_raw=False, **kwargs):
        """Read given TreeNeuron from file."""
        # Get the group for this neuron
        neuron_grp = self.f[id]

        # See if this neuron has a skeleton
        if 'skeleton' not in neuron_grp:
            raise ValueError(f'Neuron {id} has no skeleton')
        sk_grp = neuron_grp['skeleton']

        if '.serialized_navis' in sk_grp and not prefer_raw:
            return pickle.loads(sk_grp['.serialized_navis'][()])

        # Parse node table
        nodes = self.read_dataframe(sk_grp,
                                    subset=['node_id', 'parent_id',
                                            'x', 'y', 'z',
                                            'radius'] if strict else None)

        n = core.TreeNeuron(nodes, id=id)

        # Check if we have units
        self.parse_add_units(sk_grp, neuron_grp, n)

        # Parse attributes
        self.parse_add_attributes(sk_grp, neuron_grp, n,
                                  subset=['soma'] if strict else None)

        return n

    def read_dotprops(self, id, strict=False, prefer_raw=False, **kwargs):
        """Read given Dotprops from file."""
        # Get the group for this neuron
        neuron_grp = self.f[id]

        # See if this neuron has dotprops
        if 'dotprops' not in neuron_grp:
            raise ValueError(f'Neuron {id} has no dotprops')
        dp_grp = neuron_grp['dotprops']

        if '.serialized_navis' in dp_grp and not prefer_raw:
            return pickle.loads(dp_grp['.serialized_navis'][()])

        # Parse dotprop arrays
        points = dp_grp['points']
        vect = dp_grp['vect']
        alpha = dp_grp['alpha']
        k = dp_grp.attrs['k']

        n = core.Dotprops(points=points,
                          k=k,
                          vect=vect,
                          alpha=alpha,
                          id=id,
                          name=neuron_grp.attrs.get('neuron_name'))

        # Check if we have units
        self.parse_add_units(dp_grp, neuron_grp, n)

        # Parse additional attributes
        self.parse_add_attributes(dp_grp, neuron_grp, n,
                                  subset=['soma'] if strict else None,
                                  exclude=['units_nm', 'k'])

        # Parse additional datatsets
        if not strict:
            self.parse_add_datasets(dp_grp, n,
                                    exclude=['points', 'vect', 'alpha'])

        return n

    def read_meshneuron(self, id, strict=False, prefer_raw=False, **kwargs):
        """Read given MeshNeuron from file."""
        # Get the group for this neuron
        neuron_grp = self.f[id]

        # See if this neuron has a mesh
        if 'mesh' not in neuron_grp:
            raise ValueError(f'Neuron {id} has no mesh')
        me_grp = neuron_grp['mesh']

        if '.serialized_navis' in me_grp and not prefer_raw:
            return pickle.loads(me_grp['.serialized_navis'][()])

        # Parse node table -> do not remove the [:] as it ensures that we get
        # a numpy array
        verts = me_grp['vertices'][:]
        faces = me_grp['faces'][:]

        n = core.MeshNeuron({'vertices': verts, 'faces': faces},
                            id=id,
                            name=neuron_grp.attrs.get('neuron_name'))

        if 'skeleton_map' in me_grp:
            n.skeleton_map = me_grp['skeleton_map']

        # Check if we have units
        self.parse_add_units(me_grp, neuron_grp, n)

        # Parse additional attributes
        self.parse_add_attributes(me_grp, neuron_grp, n,
                                  subset=['soma'] if strict else None,
                                  exclude=['units_nm'])

        # Parse additional datatsets
        if not strict:
            self.parse_add_datasets(me_grp, n,
                                    exclude=['vertices',
                                             'faces',
                                             'skeleton_map'])

        return n

List all neurons in file.

Source code in navis/io/hdf_io.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def list_neurons(self, from_cache=True):
    """List all neurons in file."""
    if from_cache and hasattr(self, 'neurons'):
        return self.neurons

    # Go over all top level groups
    self.neurons = []
    for id, grp in self.f.items():
        # Skip if not a group
        if not isinstance(grp, h5py.Group):
            continue

        self.neurons.append(id)

    return self.neurons

Parse attributes and associate with neuron.

Source code in navis/io/hdf_io.py
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
def parse_add_attributes(self, grp, base_grp, neuron,
                         subset=None, exclude=['units_nm']):
    """Parse attributes and associate with neuron."""
    if not isinstance(subset, type(None)):
        subset = utils.make_iterable(subset)

    # First parse base (neuron-level) attributes
    attrs = {}
    for k, v in base_grp.attrs.items():
        if not isinstance(subset, type(None)) and k not in subset:
            continue
        elif k in exclude:
            continue
        attrs[k] = v

    # Now parse attributes specific for this representation
    for k, v in grp.attrs.items():
        if not isinstance(subset, type(None)) and k not in subset:
            continue
        elif k in exclude:
            continue
        attrs[k] = v

    # Now add attributes
    for k, v in attrs.items():
        setattr(neuron, k, v)

Parse datasets and associate with neuron.

Source code in navis/io/hdf_io.py
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
def parse_add_datasets(self, grp, neuron, subset=None, exclude=None,
                       skip_hidden=True):
    """Parse datasets and associate with neuron."""
    if not isinstance(subset, type(None)):
        subset = utils.make_iterable(subset)
    if not isinstance(exclude, type(None)):
        exclude = utils.make_iterable(exclude)

    # Now parse attributes specific for this representation
    for k in grp.keys():
        if not isinstance(subset, type(None)) and k not in subset:
            continue
        elif k.startswith('.') and skip_hidden:
            continue
        elif k in exclude:
            continue
        # Do not remove the [:] as it ensures that we get a numpy array
        setattr(neuron, k, grp[k][:])

Parse units and associate with neuron.

Source code in navis/io/hdf_io.py
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
def parse_add_units(self, grp, base_grp, neuron):
    """Parse units and associate with neuron."""
    # Check if we have units
    units = grp.attrs.get('units_nm',
                          base_grp.attrs.get('units_nm',
                                             None))

    if isinstance(units, str):
        # If string, see if pint can parse it
        neuron.units = units
    elif isinstance(units, np.ndarray):
        # Currently navis .units does support x/y/z units
        # We will use only the first entry for now
        neuron.units = f'{units[0]} nm'
    elif not isinstance(units, type(None)):
        neuron.units = f'{units} nm'

Read annotations for given neuron from file.

Source code in navis/io/hdf_io.py
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
def read_annotations(self, id, annotations, **kwargs):
    """Read annotations for given neuron from file."""
    # Get the group for this neuron
    neuron_grp = self.f[id]

    an_grp = neuron_grp.get('annotations')
    if not an_grp:
        return {}

    if isinstance(annotations, bool):
        annotations = [k for k, grp in an_grp.items() if isinstance(grp, type(h5py.Group))]
    else:
        annotations = utils.make_iterable(annotations)

    parsed_an = {}
    for an in annotations:
        if an in an_grp:
            parsed_an[an] = self.read_dataframe(an_grp[an],
                                                include_attrs=True)

    return parsed_an

Read given Dotprops from file.

Source code in navis/io/hdf_io.py
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
def read_dotprops(self, id, strict=False, prefer_raw=False, **kwargs):
    """Read given Dotprops from file."""
    # Get the group for this neuron
    neuron_grp = self.f[id]

    # See if this neuron has dotprops
    if 'dotprops' not in neuron_grp:
        raise ValueError(f'Neuron {id} has no dotprops')
    dp_grp = neuron_grp['dotprops']

    if '.serialized_navis' in dp_grp and not prefer_raw:
        return pickle.loads(dp_grp['.serialized_navis'][()])

    # Parse dotprop arrays
    points = dp_grp['points']
    vect = dp_grp['vect']
    alpha = dp_grp['alpha']
    k = dp_grp.attrs['k']

    n = core.Dotprops(points=points,
                      k=k,
                      vect=vect,
                      alpha=alpha,
                      id=id,
                      name=neuron_grp.attrs.get('neuron_name'))

    # Check if we have units
    self.parse_add_units(dp_grp, neuron_grp, n)

    # Parse additional attributes
    self.parse_add_attributes(dp_grp, neuron_grp, n,
                              subset=['soma'] if strict else None,
                              exclude=['units_nm', 'k'])

    # Parse additional datatsets
    if not strict:
        self.parse_add_datasets(dp_grp, n,
                                exclude=['points', 'vect', 'alpha'])

    return n

Read given MeshNeuron from file.

Source code in navis/io/hdf_io.py
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
def read_meshneuron(self, id, strict=False, prefer_raw=False, **kwargs):
    """Read given MeshNeuron from file."""
    # Get the group for this neuron
    neuron_grp = self.f[id]

    # See if this neuron has a mesh
    if 'mesh' not in neuron_grp:
        raise ValueError(f'Neuron {id} has no mesh')
    me_grp = neuron_grp['mesh']

    if '.serialized_navis' in me_grp and not prefer_raw:
        return pickle.loads(me_grp['.serialized_navis'][()])

    # Parse node table -> do not remove the [:] as it ensures that we get
    # a numpy array
    verts = me_grp['vertices'][:]
    faces = me_grp['faces'][:]

    n = core.MeshNeuron({'vertices': verts, 'faces': faces},
                        id=id,
                        name=neuron_grp.attrs.get('neuron_name'))

    if 'skeleton_map' in me_grp:
        n.skeleton_map = me_grp['skeleton_map']

    # Check if we have units
    self.parse_add_units(me_grp, neuron_grp, n)

    # Parse additional attributes
    self.parse_add_attributes(me_grp, neuron_grp, n,
                              subset=['soma'] if strict else None,
                              exclude=['units_nm'])

    # Parse additional datatsets
    if not strict:
        self.parse_add_datasets(me_grp, n,
                                exclude=['vertices',
                                         'faces',
                                         'skeleton_map'])

    return n

Read neurons from file.

Source code in navis/io/hdf_io.py
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
def read_neurons(self,
                 subset=None,
                 read='mesh->skeleton->dotprops',
                 strict=False,
                 prefer_raw=False,
                 on_error='stop',
                 progress=True,
                 annotations=False,
                 **kwargs):
    """Read neurons from file."""
    assert isinstance(read, str)

    readers = {'mesh': self.read_meshneuron,
               'skeleton': self.read_treeneuron,
               'dotprops': self.read_dotprops}

    # If no subset specified, load all neurons
    if isinstance(subset, type(None)):
        subset = self.list_neurons()
    else:
        subset = [id for id in subset if id in self.f]

    neurons = []
    errors = {}

    # We can not simply use disable=True here because of some odd
    # interactions when this is run in multiple processes
    if progress:
        pbar = config.tqdm(desc='Reading',
                           leave=False,
                           disable=config.pbar_hide,
                           total=len(subset))

    try:
        for id in subset:
            # Go over the requested neuron representations
            for rep in read.split(','):
                # Strip potential whitespaces
                rep = rep.strip()

                # Go over priorities
                for prio in rep.split('->'):
                    # Strip potential whitespaces
                    prio = prio.strip()
                    # If that neuron type is present
                    if prio in self.f[id]:
                        try:
                            # Read neuron
                            read_func = readers[prio]
                            n = read_func(id,
                                          strict=strict,
                                          prefer_raw=prefer_raw,
                                          **kwargs)

                            # Read annotations
                            if not isinstance(annotations, type(None)):
                                an = self.read_annotations(id, annotations, **kwargs)
                                for k, v in an.items():
                                    setattr(n, k, v)

                            # Append neuron and break prio loop
                            neurons.append(n)
                        except BaseException as e:
                            errors[id] = str(e)
                            if on_error in ('stop', 'raise'):
                                raise e
                            elif on_error == 'warn':
                                warnings.warn(f'Error parsing {prio} for '
                                              f'neuron {id}: {e}')

                        break

            if progress:
                pbar.update()
    except BaseException:
        raise
    finally:
        if progress:
            pbar.close()

    return neurons, errors

Read given TreeNeuron from file.

Source code in navis/io/hdf_io.py
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
def read_treeneuron(self, id, strict=False, prefer_raw=False, **kwargs):
    """Read given TreeNeuron from file."""
    # Get the group for this neuron
    neuron_grp = self.f[id]

    # See if this neuron has a skeleton
    if 'skeleton' not in neuron_grp:
        raise ValueError(f'Neuron {id} has no skeleton')
    sk_grp = neuron_grp['skeleton']

    if '.serialized_navis' in sk_grp and not prefer_raw:
        return pickle.loads(sk_grp['.serialized_navis'][()])

    # Parse node table
    nodes = self.read_dataframe(sk_grp,
                                subset=['node_id', 'parent_id',
                                        'x', 'y', 'z',
                                        'radius'] if strict else None)

    n = core.TreeNeuron(nodes, id=id)

    # Check if we have units
    self.parse_add_units(sk_grp, neuron_grp, n)

    # Parse attributes
    self.parse_add_attributes(sk_grp, neuron_grp, n,
                              subset=['soma'] if strict else None)

    return n

Implements v1 of the HDF schema.

See for format specs.

Source code in navis/io/hdf_io.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
class H5WriterV1(BaseH5Writer):
    """Implements v1 of the HDF schema.

    See <url> for format specs.
    """

    version = 1

    def write_neurons(self, neuron, serialized=True, raw=False,
                      overwrite=True, annotations=None, **kwargs):
        """Write neuron to file."""
        if isinstance(neuron, core.NeuronList):
            for n in config.tqdm(neuron, desc='Writing',
                                 leave=False,
                                 disable=config.pbar_hide):
                self.write_neurons(n, overwrite=overwrite,
                                   annotations=annotations, **kwargs)
            return

        if isinstance(neuron, core.TreeNeuron):
            self.write_treeneuron(neuron, serialized=serialized, raw=raw,
                                  overwrite=overwrite, **kwargs)
        elif isinstance(neuron, core.MeshNeuron):
            self.write_meshneuron(neuron, serialized=serialized, raw=raw,
                                  overwrite=overwrite, **kwargs)
        elif isinstance(neuron, core.Dotprops):
            self.write_dotprops(neuron, serialized=serialized, raw=raw,
                                overwrite=overwrite, **kwargs)
        else:
            raise TypeError(f'Unable to write object of type "{type(neuron)}"'
                            'to HDF5 file.')

        # Write annotations
        if not isinstance(annotations, type(None)):
            _ = self.write_annotations(neuron, annotations,
                                       overwrite=overwrite,
                                       **kwargs)

    def write_annotations(self, neuron, annotations, overwrite=True, **kwargs):
        """Write annotations for given neuron to file."""
        # Get this neuron's group
        neuron_grp = self.get_neuron_group(neuron)
        # Create annotation group if it does not exist
        an_grp = neuron_grp.require_group('annotations')

        annotations = utils.make_iterable(annotations)

        for an in annotations:
            data = getattr(neuron, an)
            if isinstance(data, pd.DataFrame):
                data_grp = an_grp.require_group(an)
                self.write_dataframe(data, data_grp, overwrite=overwrite)
            else:
                raise ValueError(f'Unable to write "{an}" of type '
                                 f'"({type(data)})" to HDF5 file.')

    def get_neuron_group(self, neuron):
        """Get group for neuron or create if it does not exists."""
        # Can only use strings as group names
        id = str(neuron.id)

        grp = self.f.require_group(id)

        # Write some basic info
        if hasattr(neuron, 'name'):
            grp.attrs['neuron_name'] = neuron.name

        return grp

    def write_treeneuron(self, neuron,
                         serialized=True, raw=False,
                         overwrite=True, **kwargs):
        """Write TreeNeuron to file."""
        assert isinstance(neuron, core.TreeNeuron)

        # Get the group for this neuron -> this also write BaseInfo
        neuron_grp = self.get_neuron_group(neuron)

        # See if this neuron already has a skeleton
        if 'skeleton' in neuron_grp:
            if not overwrite:
                raise ValueError('File already contains a skeleton for neuron '
                                 f'{neuron.id}')
            del neuron_grp['skeleton']

        sk_grp = neuron_grp.require_group('skeleton')

        if serialized:
            sk_grp.create_dataset('.serialized_navis',
                                  data=np.void(pickle.dumps(neuron)))

        if raw:
            # Write info
            units = neuron_nm_units(neuron)
            if units:
                sk_grp.attrs['units_nm'] = units

            if neuron.has_soma:
                sk_grp.attrs['soma'] = neuron.soma

            # Write node table
            self.write_dataframe(neuron.nodes,
                                 group=sk_grp,
                                 exclude=['type'],
                                 overwrite=overwrite)

    def write_dotprops(self, neuron,
                       serialized=True, raw=False,
                       overwrite=True, **kwargs):
        """Write Dotprops to file."""
        assert isinstance(neuron, core.Dotprops)

        # Get the group for this neuron -> this also write BaseInfo
        neuron_grp = self.get_neuron_group(neuron)

        # See if this neuron already has dotprops
        if 'dotprops' in neuron_grp:
            if not overwrite:
                raise ValueError('File already contains dotprops for neuron '
                                 f'{neuron.id}')
            del neuron_grp['dotprops']

        dp_grp = neuron_grp.require_group('dotprops')

        if serialized:
            dp_grp.create_dataset('.serialized_navis',
                                  data=np.void(pickle.dumps(neuron)))

        if raw:
            # Write info
            dp_grp.attrs['k'] = neuron.k
            units = neuron_nm_units(neuron)
            if units:
                dp_grp.attrs['units_nm'] = units
            if neuron.has_soma:
                dp_grp.attrs['soma'] = neuron.soma

            # Write data
            for d in ['points', 'vect', 'alpha']:
                # The neuron really ought to have these but just in case
                if not hasattr(neuron, d):
                    continue

                data = getattr(neuron, d)
                dp_grp.create_dataset(d, data=data, compression='gzip')

    def write_meshneuron(self, neuron,
                         serialized=True, raw=False,
                         overwrite=True, **kwargs):
        """Write MeshNeuron to file."""
        assert isinstance(neuron, core.MeshNeuron)

        # Get the group for this neuron -> this also write BaseInfo
        neuron_grp = self.get_neuron_group(neuron)

        # See if this neuron already has a mesh
        if 'mesh' in neuron_grp:
            if not overwrite:
                raise ValueError('File already contains a mesh for neuron '
                                 f'{neuron.id}')
            del neuron_grp['mesh']

        me_grp = neuron_grp.require_group('mesh')

        if serialized:
            me_grp.create_dataset('.serialized_navis',
                                  data=np.void(pickle.dumps(neuron)))

        if raw:
            # Write info
            units = neuron_nm_units(neuron)
            if units:
                me_grp.attrs['units_nm'] = units
            if neuron.has_soma:
                me_grp.attrs['soma'] = neuron.soma

            # Write data
            for d in ['vertices', 'faces', 'skeleton_map']:
                if not hasattr(neuron, d):
                    continue

                data = getattr(neuron, d)
                me_grp.create_dataset(d, data=data, compression='gzip')

Get group for neuron or create if it does not exists.

Source code in navis/io/hdf_io.py
563
564
565
566
567
568
569
570
571
572
573
574
def get_neuron_group(self, neuron):
    """Get group for neuron or create if it does not exists."""
    # Can only use strings as group names
    id = str(neuron.id)

    grp = self.f.require_group(id)

    # Write some basic info
    if hasattr(neuron, 'name'):
        grp.attrs['neuron_name'] = neuron.name

    return grp

Write annotations for given neuron to file.

Source code in navis/io/hdf_io.py
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def write_annotations(self, neuron, annotations, overwrite=True, **kwargs):
    """Write annotations for given neuron to file."""
    # Get this neuron's group
    neuron_grp = self.get_neuron_group(neuron)
    # Create annotation group if it does not exist
    an_grp = neuron_grp.require_group('annotations')

    annotations = utils.make_iterable(annotations)

    for an in annotations:
        data = getattr(neuron, an)
        if isinstance(data, pd.DataFrame):
            data_grp = an_grp.require_group(an)
            self.write_dataframe(data, data_grp, overwrite=overwrite)
        else:
            raise ValueError(f'Unable to write "{an}" of type '
                             f'"({type(data)})" to HDF5 file.')

Write Dotprops to file.

Source code in navis/io/hdf_io.py
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
def write_dotprops(self, neuron,
                   serialized=True, raw=False,
                   overwrite=True, **kwargs):
    """Write Dotprops to file."""
    assert isinstance(neuron, core.Dotprops)

    # Get the group for this neuron -> this also write BaseInfo
    neuron_grp = self.get_neuron_group(neuron)

    # See if this neuron already has dotprops
    if 'dotprops' in neuron_grp:
        if not overwrite:
            raise ValueError('File already contains dotprops for neuron '
                             f'{neuron.id}')
        del neuron_grp['dotprops']

    dp_grp = neuron_grp.require_group('dotprops')

    if serialized:
        dp_grp.create_dataset('.serialized_navis',
                              data=np.void(pickle.dumps(neuron)))

    if raw:
        # Write info
        dp_grp.attrs['k'] = neuron.k
        units = neuron_nm_units(neuron)
        if units:
            dp_grp.attrs['units_nm'] = units
        if neuron.has_soma:
            dp_grp.attrs['soma'] = neuron.soma

        # Write data
        for d in ['points', 'vect', 'alpha']:
            # The neuron really ought to have these but just in case
            if not hasattr(neuron, d):
                continue

            data = getattr(neuron, d)
            dp_grp.create_dataset(d, data=data, compression='gzip')

Write MeshNeuron to file.

Source code in navis/io/hdf_io.py
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
def write_meshneuron(self, neuron,
                     serialized=True, raw=False,
                     overwrite=True, **kwargs):
    """Write MeshNeuron to file."""
    assert isinstance(neuron, core.MeshNeuron)

    # Get the group for this neuron -> this also write BaseInfo
    neuron_grp = self.get_neuron_group(neuron)

    # See if this neuron already has a mesh
    if 'mesh' in neuron_grp:
        if not overwrite:
            raise ValueError('File already contains a mesh for neuron '
                             f'{neuron.id}')
        del neuron_grp['mesh']

    me_grp = neuron_grp.require_group('mesh')

    if serialized:
        me_grp.create_dataset('.serialized_navis',
                              data=np.void(pickle.dumps(neuron)))

    if raw:
        # Write info
        units = neuron_nm_units(neuron)
        if units:
            me_grp.attrs['units_nm'] = units
        if neuron.has_soma:
            me_grp.attrs['soma'] = neuron.soma

        # Write data
        for d in ['vertices', 'faces', 'skeleton_map']:
            if not hasattr(neuron, d):
                continue

            data = getattr(neuron, d)
            me_grp.create_dataset(d, data=data, compression='gzip')

Write neuron to file.

Source code in navis/io/hdf_io.py
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
def write_neurons(self, neuron, serialized=True, raw=False,
                  overwrite=True, annotations=None, **kwargs):
    """Write neuron to file."""
    if isinstance(neuron, core.NeuronList):
        for n in config.tqdm(neuron, desc='Writing',
                             leave=False,
                             disable=config.pbar_hide):
            self.write_neurons(n, overwrite=overwrite,
                               annotations=annotations, **kwargs)
        return

    if isinstance(neuron, core.TreeNeuron):
        self.write_treeneuron(neuron, serialized=serialized, raw=raw,
                              overwrite=overwrite, **kwargs)
    elif isinstance(neuron, core.MeshNeuron):
        self.write_meshneuron(neuron, serialized=serialized, raw=raw,
                              overwrite=overwrite, **kwargs)
    elif isinstance(neuron, core.Dotprops):
        self.write_dotprops(neuron, serialized=serialized, raw=raw,
                            overwrite=overwrite, **kwargs)
    else:
        raise TypeError(f'Unable to write object of type "{type(neuron)}"'
                        'to HDF5 file.')

    # Write annotations
    if not isinstance(annotations, type(None)):
        _ = self.write_annotations(neuron, annotations,
                                   overwrite=overwrite,
                                   **kwargs)

Write TreeNeuron to file.

Source code in navis/io/hdf_io.py
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
def write_treeneuron(self, neuron,
                     serialized=True, raw=False,
                     overwrite=True, **kwargs):
    """Write TreeNeuron to file."""
    assert isinstance(neuron, core.TreeNeuron)

    # Get the group for this neuron -> this also write BaseInfo
    neuron_grp = self.get_neuron_group(neuron)

    # See if this neuron already has a skeleton
    if 'skeleton' in neuron_grp:
        if not overwrite:
            raise ValueError('File already contains a skeleton for neuron '
                             f'{neuron.id}')
        del neuron_grp['skeleton']

    sk_grp = neuron_grp.require_group('skeleton')

    if serialized:
        sk_grp.create_dataset('.serialized_navis',
                              data=np.void(pickle.dumps(neuron)))

    if raw:
        # Write info
        units = neuron_nm_units(neuron)
        if units:
            sk_grp.attrs['units_nm'] = units

        if neuron.has_soma:
            sk_grp.attrs['soma'] = neuron.soma

        # Write node table
        self.write_dataframe(neuron.nodes,
                             group=sk_grp,
                             exclude=['type'],
                             overwrite=overwrite)

Return neuron's units in nanometers.

Returns None if units are dimensionless.

Source code in navis/io/hdf_io.py
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
def neuron_nm_units(neuron):
    """Return neuron's units in nanometers.

    Returns `None` if units are dimensionless.

    """
    units = getattr(neuron, 'units')

    if isinstance(units, type(None)):
        return

    if not isinstance(units, (pint.Quantity, pint.Unit)):
        return None

    # Casting to nm throws an error if dimensionless
    try:
        return units.to('nm').magnitude
    except pint.DimensionalityError:
        return None
    except BaseException:
        raise