Skip to content

navis

Base class for all neurons.

Source code in navis/core/base.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
class BaseNeuron(UnitObject):
    """Base class for all neurons."""

    name: Optional[str]
    id: Union[int, str, uuid.UUID]

    #: Unit space for this neuron. Some functions, like soma detection are
    #: sensitive to units (if provided)
    #: Default = micrometers
    units: Union[pint.Unit, pint.Quantity]

    volume: Union[int, float]

    connectors: Optional[pd.DataFrame]

    #: Attributes used for neuron summary
    SUMMARY_PROPS = ["type", "name", "units"]

    #: Attributes to be used when comparing two neurons.
    EQ_ATTRIBUTES = ["name"]

    #: Temporary attributes that need clearing when neuron data changes
    TEMP_ATTR = ["_memory_usage"]

    #: Core data table(s) used to calculate hash
    CORE_DATA = []

    def __init__(self, **kwargs):
        # Set a random ID -> may be replaced later
        self.id = uuid.uuid4()

        # Make a copy of summary and temp props so that if we register
        # additional properties we don't change this for every single neuron
        self.SUMMARY_PROPS = self.SUMMARY_PROPS.copy()
        self.TEMP_ATTR = self.TEMP_ATTR.copy()

        self._lock = 0
        for k, v in kwargs.items():
            self._register_attr(name=k, value=v)

        # Base neurons has no data
        self._current_md5 = None

    def __getattr__(self, key):
        """Get attribute."""
        if key.startswith("has_"):
            key = key[key.index("_") + 1 :]
            if hasattr(self, key):
                data = getattr(self, key)
                if isinstance(data, pd.DataFrame):
                    if not data.empty:
                        return True
                    else:
                        return False
                # This is necessary because np.any does not like strings
                elif isinstance(data, str):
                    if data == "NA" or not data:
                        return False
                    return True
                elif utils.is_iterable(data) and len(data) > 0:
                    return True
                elif data:
                    return True
            return False
        elif key.startswith("n_"):
            key = key[key.index("_") + 1 :]
            if hasattr(self, key):
                data = getattr(self, key, None)
                if isinstance(data, pd.DataFrame):
                    return data.shape[0]
                elif utils.is_iterable(data):
                    return len(data)
                elif isinstance(data, str) and data == "NA":
                    return "NA"
            return None

        raise AttributeError(f'Attribute "{key}" not found')

    def __str__(self):
        return self.__repr__()

    def __repr__(self):
        return str(self.summary())

    def __copy__(self):
        return self.copy(deepcopy=False)

    def __deepcopy__(self, memo):
        result = self.copy(deepcopy=True)
        memo[id(self)] = result
        return result

    def __eq__(self, other):
        """Implement neuron comparison."""
        if isinstance(other, BaseNeuron):
            # We will do this sequentially and stop as soon as we find a
            # discrepancy -> this saves tons of time!
            for at in self.EQ_ATTRIBUTES:
                comp = getattr(self, at, None) == getattr(other, at, None)
                if isinstance(comp, np.ndarray) and not all(comp):
                    return False
                elif comp is False:
                    return False
            # If all comparisons have passed, return True
            return True
        else:
            return NotImplemented

    def __hash__(self):
        """Generate a hashable value."""
        # We will simply use the neuron's memory address
        return id(self)

    def __add__(self, other):
        """Implement addition."""
        if isinstance(other, BaseNeuron):
            return core.NeuronList([self, other])
        return NotImplemented

    def __imul__(self, other):
        """Multiplication with assignment (*=)."""
        return self.__mul__(other, copy=False)

    def __itruediv__(self, other):
        """Division with assignment (/=)."""
        return self.__truediv__(other, copy=False)

    def __iadd__(self, other):
        """Addition with assignment (+=)."""
        return self.__add__(other, copy=False)

    def __isub__(self, other):
        """Subtraction with assignment (-=)."""
        return self.__sub__(other, copy=False)

    def _repr_html_(self):
        frame = self.summary().to_frame()
        frame.columns = [""]
        # return self._gen_svg_thumbnail() + frame._repr_html_()
        return frame._repr_html_()

    def _gen_svg_thumbnail(self):
        """Generate 2D plot for thumbnail."""
        import matplotlib.pyplot as plt

        # Store some previous states
        prev_level = logger.getEffectiveLevel()
        prev_pbar = config.pbar_hide
        prev_int = plt.isinteractive()

        plt.ioff()  # turn off interactive mode
        logger.setLevel("WARNING")
        config.pbar_hide = True
        fig = plt.figure(figsize=(2, 2))
        ax = fig.add_subplot(111)
        fig, ax = self.plot2d(connectors=False, ax=ax)
        output = StringIO()
        fig.savefig(output, format="svg")

        if prev_int:
            plt.ion()  # turn on interactive mode
        logger.setLevel(prev_level)
        config.pbar_hide = prev_pbar
        _ = plt.clf()
        return output.getvalue()

    def _clear_temp_attr(self, exclude: list = []) -> None:
        """Clear temporary attributes."""
        if self.is_locked:
            logger.debug(f"Neuron {self.id} at {hex(id(self))} locked.")
            return

        # Must set checksum before recalculating e.g. node types
        # -> otherwise we run into a recursive loop
        self._current_md5 = self.core_md5
        self._stale = False

        for a in [at for at in self.TEMP_ATTR if at not in exclude]:
            try:
                delattr(self, a)
                logger.debug(f"Neuron {self.id} {hex(id(self))}: attribute {a} cleared")
            except AttributeError:
                logger.debug(
                    f'Neuron {self.id} at {hex(id(self))}: Unable to clear temporary attribute "{a}"'
                )
            except BaseException:
                raise

    def _register_attr(self, name, value, summary=True, temporary=False):
        """Set and register attribute.

        Use this if you want an attribute to be used for the summary or cleared
        when temporary attributes are cleared.
        """
        setattr(self, name, value)

        # If this is an easy to summarize attribute, add to summary
        if summary and name not in self.SUMMARY_PROPS:
            if isinstance(value, (numbers.Number, str, bool, np.bool_, type(None))):
                self.SUMMARY_PROPS.append(name)
            else:
                logger.error(
                    f'Attribute "{name}" of type "{type(value)}" '
                    "can not be added to summary"
                )

        if temporary:
            self.TEMP_ATTR.append(name)

    def _unregister_attr(self, name):
        """Remove and unregister attribute."""
        if name in self.SUMMARY_PROPS:
            self.SUMMARY_PROPS.remove(name)

        if name in self.TEMP_ATTR:
            self.TEMP_ATTR.remove(name)

        delattr(self, name)

    @property
    def core_md5(self) -> str:
        """MD5 checksum of core data.

        Generated from `.CORE_DATA` properties.

        Returns
        -------
        md5 :   string
                MD5 checksum of core data. `None` if no core data.

        """
        hash = ""
        for prop in self.CORE_DATA:
            cols = None
            # See if we need to parse props into property and columns
            # e.g. "nodes:node_id,parent_id,x,y,z"
            if ":" in prop:
                prop, cols = prop.split(":")
                cols = cols.split(",")

            if hasattr(self, prop):
                data = getattr(self, prop)
                if isinstance(data, pd.DataFrame):
                    if cols:
                        data = data[cols]
                    data = data.values

                data = np.ascontiguousarray(data)

                if xxhash:
                    hash += xxhash.xxh128(data).hexdigest()
                else:
                    hash += hashlib.md5(data).hexdigest()

        return hash if hash else None

    @property
    def datatables(self) -> List[str]:
        """Names of all DataFrames attached to this neuron."""
        return [k for k, v in self.__dict__.items() if isinstance(v, pd.DataFrame)]

    @property
    def extents(self) -> np.ndarray:
        """Extents of neuron in x/y/z direction (includes connectors)."""
        if not hasattr(self, "bbox"):
            raise ValueError(
                "Neuron must implement `.bbox` (bounding box) "
                "property to calculate extents."
            )
        bbox = self.bbox
        return bbox[:, 1] - bbox[:, 0]

    @property
    def id(self) -> Any:
        """ID of the neuron.

        Must be hashable. If not set, will assign a random unique identifier.
        Can be indexed by using the `NeuronList.idx[]` locator.
        """
        return getattr(self, "_id", None)

    @id.setter
    def id(self, value):
        try:
            hash(value)
        except BaseException:
            raise ValueError("id must be hashable")
        self._id = value

    @property
    def label(self) -> str:
        """Label (e.g. for legends)."""
        # If explicitly set return that label
        if getattr(self, "_label", None):
            return self._label

        # If no label set, produce one from name + id (optional)
        name = getattr(self, "name", None)
        id = getattr(self, "id", None)

        # If no name, use type
        if not name:
            name = self.type

        label = name

        # Use ID only if not a UUID
        if not isinstance(id, uuid.UUID):
            # And if it can be turned into a string
            try:
                id = str(id)
            except BaseException:
                id = ""

            # Only use ID if it is not the same as name
            if id and name != id:
                label += f" ({id})"

        return label

    @label.setter
    def label(self, value: str):
        if not isinstance(value, str):
            raise TypeError(f'label must be string, got "{type(value)}"')
        self._label = value

    @property
    def name(self) -> str:
        """Neuron name."""
        return getattr(self, "_name", None)

    @name.setter
    def name(self, value: str):
        self._name = value

    @property
    def connectors(self) -> pd.DataFrame:
        """Connector table. If none, will return `None`."""
        return getattr(self, "_connectors", None)

    @connectors.setter
    def connectors(self, v):
        if isinstance(v, type(None)):
            self._connectors = None
        else:
            self._connectors = utils.validate_table(
                v, required=["x", "y", "z"], rename=True, restrict=False
            )

    @property
    def presynapses(self):
        """Table with presynapses (filtered from connectors table).

        Requires a "type" column in connector table. Will look for type labels
        that include "pre" or that equal 0 or "0".
        """
        if not isinstance(getattr(self, "connectors", None), pd.DataFrame):
            raise ValueError("No connector table found.")
        # Make an educated guess what presynapses are
        types = self.connectors["type"].unique()
        pre = [t for t in types if "pre" in str(t).lower() or t in [0, "0"]]

        if len(pre) == 0:
            logger.debug(f"Unable to find presynapses in types: {types}")
            return self.connectors.iloc[0:0]  # return empty DataFrame
        elif len(pre) > 1:
            raise ValueError(f"Found ambigous presynapse labels: {pre}")

        return self.connectors[self.connectors["type"] == pre[0]]

    @property
    def postsynapses(self):
        """Table with postsynapses (filtered from connectors table).

        Requires a "type" column in connector table. Will look for type labels
        that include "post" or that equal 1 or "1".
        """
        if not isinstance(getattr(self, "connectors", None), pd.DataFrame):
            raise ValueError("No connector table found.")
        # Make an educated guess what presynapses are
        types = self.connectors["type"].unique()
        post = [t for t in types if "post" in str(t).lower() or t in [1, "1"]]

        if len(post) == 0:
            logger.debug(f"Unable to find postsynapses in types: {types}")
            return self.connectors.iloc[0:0]  # return empty DataFrame
        elif len(post) > 1:
            raise ValueError(f"Found ambigous postsynapse labels: {post}")

        return self.connectors[self.connectors["type"] == post[0]]

    @property
    def is_stale(self) -> bool:
        """Test if temporary attributes might be outdated."""
        # If we know we are stale, just return True
        if getattr(self, "_stale", False):
            return True
        else:
            # Only check if we believe we are not stale
            self._stale = self._current_md5 != self.core_md5
        return self._stale

    @property
    def is_locked(self):
        """Test if neuron is locked."""
        return getattr(self, "_lock", 0) > 0

    @property
    def type(self) -> str:
        """Neuron type."""
        return "navis.BaseNeuron"

    @property
    def soma(self):
        """The soma of the neuron (if any)."""
        raise NotImplementedError(f"`soma` property not implemented for {type(self)}.")

    @property
    def bbox(self) -> np.ndarray:
        """Bounding box of neuron."""
        raise NotImplementedError(f"Bounding box not implemented for {type(self)}.")

    def convert_units(
        self, to: Union[pint.Unit, str], inplace: bool = False
    ) -> Optional["BaseNeuron"]:
        """Convert coordinates to different unit.

        Only works if neuron's `.units` is not dimensionless.

        Parameters
        ----------
        to :        pint.Unit | str
                    Units to convert to. If string, must be parsable by pint.
                    See examples.
        inplace :   bool, optional
                    If True will convert in place. If not will return a
                    copy.

        Examples
        --------
        >>> import navis
        >>> n = navis.example_neurons(1)
        >>> n.units
        <Quantity(8, 'nanometer')>
        >>> n.cable_length
        266476.8
        >>> n2 = n.convert_units('um')
        >>> n2.units
        <Quantity(1.0, 'micrometer')>
        >>> n2.cable_length
        2131.8

        """
        if not isinstance(self.units, (pint.Unit, pint.Quantity)):
            raise ValueError("Unable to convert: neuron has no units set.")

        n = self.copy() if not inplace else self

        # Catch pint's UnitStrippedWarning
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            # Get factor by which we have to multiply to get to target units
            conv = n.units.to(to).magnitude
            # Multiply by conversion factor
            n *= conv

        n._clear_temp_attr(exclude=["classify_nodes"])

        return n

    def copy(self, deepcopy=False) -> "BaseNeuron":
        """Return a copy of the neuron."""
        copy_fn = copy.deepcopy if deepcopy else copy.copy
        # Attributes not to copy
        no_copy = ["_lock"]
        # Generate new empty neuron
        x = self.__class__()
        # Override with this neuron's data
        x.__dict__.update(
            {k: copy_fn(v) for k, v in self.__dict__.items() if k not in no_copy}
        )

        return x

    def summary(self, add_props=None) -> pd.Series:
        """Get a summary of this neuron."""
        # Do not remove the list -> otherwise we might change the original!
        props = list(self.SUMMARY_PROPS)

        # Make sure ID is always in second place
        if "id" in props and props.index("id") != 2:
            props.remove("id")
            props.insert(2, "id")
        # Add .id to summary if not a generic UUID
        elif not isinstance(self.id, uuid.UUID) and "id" not in props:
            props.insert(2, "id")

        if add_props:
            props, ix = np.unique(np.append(props, add_props), return_inverse=True)
            props = props[ix]

        # This is to catch an annoying "UnitStrippedWarning" with pint
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            s = pd.Series([getattr(self, at, "NA") for at in props], index=props)

        return s

    def plot2d(self, **kwargs):
        """Plot neuron using [`navis.plot2d`][].

        Parameters
        ----------
        **kwargs
                Will be passed to [`navis.plot2d`][].
                See `help(navis.plot2d)` for a list of keywords.

        See Also
        --------
        [`navis.plot2d`][]
                    Function called to generate 2d plot.

        """
        from ..plotting import plot2d

        return plot2d(self, **kwargs)

    def plot3d(self, **kwargs):
        """Plot neuron using [`navis.plot3d`][].

        Parameters
        ----------
        **kwargs
                Keyword arguments. Will be passed to [`navis.plot3d`][].
                See `help(navis.plot3d)` for a list of keywords.

        See Also
        --------
        [`navis.plot3d`][]
                    Function called to generate 3d plot.

        Examples
        --------
        >>> import navis
        >>> nl = navis.example_neurons()
        >>> #Plot with connectors
        >>> viewer = nl.plot3d(connectors=True)

        """
        from ..plotting import plot3d

        return plot3d(core.NeuronList(self, make_copy=False), **kwargs)

    def map_units(
        self,
        units: Union[pint.Unit, str],
        on_error: Union[Literal["raise"], Literal["ignore"]] = "raise",
    ) -> Union[int, float]:
        """Convert units to match neuron space.

        Only works if neuron's `.units` is isometric and not dimensionless.

        Parameters
        ----------
        units :     number | str | pint.Quantity | pint.Units
                    The units to convert to neuron units. Simple numbers are just
                    passed through.
        on_error :  "raise" | "ignore"
                    What to do if an error occurs (e.g. because `neuron` does not
                    have units specified). If "ignore" will simply return `units`
                    unchanged.

        See Also
        --------
        [`navis.core.to_neuron_space`][]
                    The base function for this method.

        Examples
        --------
        >>> import navis
        >>> # Example neurons are in 8x8x8nm voxel space
        >>> n = navis.example_neurons(1)
        >>> n.map_units('1 nanometer')
        0.125
        >>> # Numbers are passed-through
        >>> n.map_units(1)
        1
        >>> # For neuronlists
        >>> nl = navis.example_neurons(3)
        >>> nl.map_units('1 nanometer')
        [0.125, 0.125, 0.125]

        """
        return core.core_utils.to_neuron_space(units, neuron=self, on_error=on_error)

    def memory_usage(self, deep=False, estimate=False):
        """Return estimated memory usage of this neuron.

        Works by going over attached data (numpy arrays and pandas DataFrames
        such as vertices, nodes, etc) and summing up their size in memory.

        Parameters
        ----------
        deep :      bool
                    Passed to pandas DataFrames. If True will also inspect
                    memory footprint of `object` dtypes.
        estimate :  bool
                    If True, we will only estimate the size. This is
                    considerably faster but will slightly underestimate the
                    memory usage.

        Returns
        -------
        int
                    Memory usage in bytes.

        """
        # We will use a very simply caching here
        # We don't check whether neuron is stale because that causes
        # additional overhead and we want this function to be as fast
        # as possible
        if hasattr(self, "_memory_usage"):
            mu = self._memory_usage
            if mu["deep"] == deep and mu["estimate"] == estimate:
                return mu["size"]

        size = 0
        if not estimate:
            for k, v in self.__dict__.items():
                if isinstance(v, np.ndarray):
                    size += v.nbytes
                elif isinstance(v, pd.DataFrame):
                    size += v.memory_usage(deep=deep).sum()
                elif isinstance(v, pd.Series):
                    size += v.memory_usage(deep=deep)
        else:
            for k, v in self.__dict__.items():
                if isinstance(v, np.ndarray):
                    size += v.dtype.itemsize * v.size
                elif isinstance(v, pd.DataFrame):
                    for dt in v.dtypes.values:
                        if isinstance(dt, pd.CategoricalDtype):
                            size += len(dt.categories) * dt.itemsize
                        else:
                            size += dt.itemsize * v.shape[0]
                elif isinstance(v, pd.Series):
                    if isinstance(v.dtype, pd.CategoricalDtype):
                        size += len(dt.categories) * dt.itemsize
                    else:
                        size += v.dtype.itemsize * v.shape[0]

        self._memory_usage = {"deep": deep, "estimate": estimate, "size": size}

        return size

Bounding box of neuron.

Connector table. If none, will return None.

MD5 checksum of core data.

Generated from .CORE_DATA properties.

RETURNS DESCRIPTION
md5

MD5 checksum of core data. None if no core data.

TYPE: string

Names of all DataFrames attached to this neuron.

Extents of neuron in x/y/z direction (includes connectors).

ID of the neuron.

Must be hashable. If not set, will assign a random unique identifier. Can be indexed by using the NeuronList.idx[] locator.

Test if neuron is locked.

Test if temporary attributes might be outdated.

Label (e.g. for legends).

Neuron name.

Table with postsynapses (filtered from connectors table).

Requires a "type" column in connector table. Will look for type labels that include "post" or that equal 1 or "1".

Table with presynapses (filtered from connectors table).

Requires a "type" column in connector table. Will look for type labels that include "pre" or that equal 0 or "0".

The soma of the neuron (if any).

Neuron type.

Convert coordinates to different unit.

Only works if neuron's .units is not dimensionless.

PARAMETER DESCRIPTION
to
    Units to convert to. If string, must be parsable by pint.
    See examples.

TYPE: pint.Unit | str

inplace
    If True will convert in place. If not will return a
    copy.

TYPE: bool DEFAULT: False

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> n.units
<Quantity(8, 'nanometer')>
>>> n.cable_length
266476.8
>>> n2 = n.convert_units('um')
>>> n2.units
<Quantity(1.0, 'micrometer')>
>>> n2.cable_length
2131.8
Source code in navis/core/base.py
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
def convert_units(
    self, to: Union[pint.Unit, str], inplace: bool = False
) -> Optional["BaseNeuron"]:
    """Convert coordinates to different unit.

    Only works if neuron's `.units` is not dimensionless.

    Parameters
    ----------
    to :        pint.Unit | str
                Units to convert to. If string, must be parsable by pint.
                See examples.
    inplace :   bool, optional
                If True will convert in place. If not will return a
                copy.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n.units
    <Quantity(8, 'nanometer')>
    >>> n.cable_length
    266476.8
    >>> n2 = n.convert_units('um')
    >>> n2.units
    <Quantity(1.0, 'micrometer')>
    >>> n2.cable_length
    2131.8

    """
    if not isinstance(self.units, (pint.Unit, pint.Quantity)):
        raise ValueError("Unable to convert: neuron has no units set.")

    n = self.copy() if not inplace else self

    # Catch pint's UnitStrippedWarning
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        # Get factor by which we have to multiply to get to target units
        conv = n.units.to(to).magnitude
        # Multiply by conversion factor
        n *= conv

    n._clear_temp_attr(exclude=["classify_nodes"])

    return n

Return a copy of the neuron.

Source code in navis/core/base.py
641
642
643
644
645
646
647
648
649
650
651
652
653
def copy(self, deepcopy=False) -> "BaseNeuron":
    """Return a copy of the neuron."""
    copy_fn = copy.deepcopy if deepcopy else copy.copy
    # Attributes not to copy
    no_copy = ["_lock"]
    # Generate new empty neuron
    x = self.__class__()
    # Override with this neuron's data
    x.__dict__.update(
        {k: copy_fn(v) for k, v in self.__dict__.items() if k not in no_copy}
    )

    return x

Convert units to match neuron space.

Only works if neuron's .units is isometric and not dimensionless.

PARAMETER DESCRIPTION
units
    The units to convert to neuron units. Simple numbers are just
    passed through.

TYPE: number | str | pint.Quantity | pint.Units

on_error
    What to do if an error occurs (e.g. because `neuron` does not
    have units specified). If "ignore" will simply return `units`
    unchanged.

TYPE: "raise" | "ignore" DEFAULT: 'raise'

See Also

navis.core.to_neuron_space The base function for this method.

Examples:

>>> import navis
>>> # Example neurons are in 8x8x8nm voxel space
>>> n = navis.example_neurons(1)
>>> n.map_units('1 nanometer')
0.125
>>> # Numbers are passed-through
>>> n.map_units(1)
1
>>> # For neuronlists
>>> nl = navis.example_neurons(3)
>>> nl.map_units('1 nanometer')
[0.125, 0.125, 0.125]
Source code in navis/core/base.py
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
def map_units(
    self,
    units: Union[pint.Unit, str],
    on_error: Union[Literal["raise"], Literal["ignore"]] = "raise",
) -> Union[int, float]:
    """Convert units to match neuron space.

    Only works if neuron's `.units` is isometric and not dimensionless.

    Parameters
    ----------
    units :     number | str | pint.Quantity | pint.Units
                The units to convert to neuron units. Simple numbers are just
                passed through.
    on_error :  "raise" | "ignore"
                What to do if an error occurs (e.g. because `neuron` does not
                have units specified). If "ignore" will simply return `units`
                unchanged.

    See Also
    --------
    [`navis.core.to_neuron_space`][]
                The base function for this method.

    Examples
    --------
    >>> import navis
    >>> # Example neurons are in 8x8x8nm voxel space
    >>> n = navis.example_neurons(1)
    >>> n.map_units('1 nanometer')
    0.125
    >>> # Numbers are passed-through
    >>> n.map_units(1)
    1
    >>> # For neuronlists
    >>> nl = navis.example_neurons(3)
    >>> nl.map_units('1 nanometer')
    [0.125, 0.125, 0.125]

    """
    return core.core_utils.to_neuron_space(units, neuron=self, on_error=on_error)

Return estimated memory usage of this neuron.

Works by going over attached data (numpy arrays and pandas DataFrames such as vertices, nodes, etc) and summing up their size in memory.

PARAMETER DESCRIPTION
deep
    Passed to pandas DataFrames. If True will also inspect
    memory footprint of `object` dtypes.

TYPE: bool DEFAULT: False

estimate
    If True, we will only estimate the size. This is
    considerably faster but will slightly underestimate the
    memory usage.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
int

Memory usage in bytes.

Source code in navis/core/base.py
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
def memory_usage(self, deep=False, estimate=False):
    """Return estimated memory usage of this neuron.

    Works by going over attached data (numpy arrays and pandas DataFrames
    such as vertices, nodes, etc) and summing up their size in memory.

    Parameters
    ----------
    deep :      bool
                Passed to pandas DataFrames. If True will also inspect
                memory footprint of `object` dtypes.
    estimate :  bool
                If True, we will only estimate the size. This is
                considerably faster but will slightly underestimate the
                memory usage.

    Returns
    -------
    int
                Memory usage in bytes.

    """
    # We will use a very simply caching here
    # We don't check whether neuron is stale because that causes
    # additional overhead and we want this function to be as fast
    # as possible
    if hasattr(self, "_memory_usage"):
        mu = self._memory_usage
        if mu["deep"] == deep and mu["estimate"] == estimate:
            return mu["size"]

    size = 0
    if not estimate:
        for k, v in self.__dict__.items():
            if isinstance(v, np.ndarray):
                size += v.nbytes
            elif isinstance(v, pd.DataFrame):
                size += v.memory_usage(deep=deep).sum()
            elif isinstance(v, pd.Series):
                size += v.memory_usage(deep=deep)
    else:
        for k, v in self.__dict__.items():
            if isinstance(v, np.ndarray):
                size += v.dtype.itemsize * v.size
            elif isinstance(v, pd.DataFrame):
                for dt in v.dtypes.values:
                    if isinstance(dt, pd.CategoricalDtype):
                        size += len(dt.categories) * dt.itemsize
                    else:
                        size += dt.itemsize * v.shape[0]
            elif isinstance(v, pd.Series):
                if isinstance(v.dtype, pd.CategoricalDtype):
                    size += len(dt.categories) * dt.itemsize
                else:
                    size += v.dtype.itemsize * v.shape[0]

    self._memory_usage = {"deep": deep, "estimate": estimate, "size": size}

    return size

Plot neuron using navis.plot2d.

PARAMETER DESCRIPTION
**kwargs
Will be passed to [`navis.plot2d`][].
See `help(navis.plot2d)` for a list of keywords.

DEFAULT: {}

See Also

navis.plot2d Function called to generate 2d plot.

Source code in navis/core/base.py
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
def plot2d(self, **kwargs):
    """Plot neuron using [`navis.plot2d`][].

    Parameters
    ----------
    **kwargs
            Will be passed to [`navis.plot2d`][].
            See `help(navis.plot2d)` for a list of keywords.

    See Also
    --------
    [`navis.plot2d`][]
                Function called to generate 2d plot.

    """
    from ..plotting import plot2d

    return plot2d(self, **kwargs)

Plot neuron using navis.plot3d.

PARAMETER DESCRIPTION
**kwargs
Keyword arguments. Will be passed to [`navis.plot3d`][].
See `help(navis.plot3d)` for a list of keywords.

DEFAULT: {}

See Also

navis.plot3d Function called to generate 3d plot.

Examples:

>>> import navis
>>> nl = navis.example_neurons()
>>> #Plot with connectors
>>> viewer = nl.plot3d(connectors=True)
Source code in navis/core/base.py
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
def plot3d(self, **kwargs):
    """Plot neuron using [`navis.plot3d`][].

    Parameters
    ----------
    **kwargs
            Keyword arguments. Will be passed to [`navis.plot3d`][].
            See `help(navis.plot3d)` for a list of keywords.

    See Also
    --------
    [`navis.plot3d`][]
                Function called to generate 3d plot.

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons()
    >>> #Plot with connectors
    >>> viewer = nl.plot3d(connectors=True)

    """
    from ..plotting import plot3d

    return plot3d(core.NeuronList(self, make_copy=False), **kwargs)

Get a summary of this neuron.

Source code in navis/core/base.py
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
def summary(self, add_props=None) -> pd.Series:
    """Get a summary of this neuron."""
    # Do not remove the list -> otherwise we might change the original!
    props = list(self.SUMMARY_PROPS)

    # Make sure ID is always in second place
    if "id" in props and props.index("id") != 2:
        props.remove("id")
        props.insert(2, "id")
    # Add .id to summary if not a generic UUID
    elif not isinstance(self.id, uuid.UUID) and "id" not in props:
        props.insert(2, "id")

    if add_props:
        props, ix = np.unique(np.append(props, add_props), return_inverse=True)
        props = props[ix]

    # This is to catch an annoying "UnitStrippedWarning" with pint
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        s = pd.Series([getattr(self, at, "NA") for at in props], index=props)

    return s

Neuron represented as points + local vectors.

Dotprops consist of points with x/y/z coordinates, a tangent vector and an alpha value describing the immediate neighbourhood (see also references).

Typically constructed using navis.make_dotprops.

References

Masse N.Y., Cachero S., Ostrovsky A., and Jefferis G.S.X.E. (2012). A mutual information approach to automate identification of neuronal clusters in Drosophila brain images. Frontiers in Neuroinformatics 6 (00021). doi: 10.3389/fninf.2012.00021

PARAMETER DESCRIPTION
points
        (N, 3) array of x/y/z coordinates.

TYPE: numpy array

k
        Number of nearest neighbors for tangent vector calculation.
        This can be `None` or `0` but then vectors must be
        provided on initialization and can subsequently not be
        re-calculated. Typical values here are `k=20` for dense
        (e.g. from light level data) and `k=5` for sparse
        (e.g. from skeletons) point clouds.

TYPE: int

vect
        (N, 3) array of vectors. If not provided will
        recalculate both `vect` and `alpha` using `k`.

TYPE: numpy array DEFAULT: None

alpha
        (N, ) array of alpha values. If not provided will
        recalculate both `alpha` and `vect` using `k`.

TYPE: numpy array DEFAULT: None

units
        Units for coordinates. Defaults to `None` (dimensionless).
        Strings must be parsable by pint: e.g. "nm", "um",
        "micrometer" or "8 nanometers".

TYPE: str | pint.Units | pint.Quantity DEFAULT: None

**metadata
        Any additional data to attach to neuron.

DEFAULT: {}

Source code in navis/core/dotprop.py
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
class Dotprops(BaseNeuron):
    """Neuron represented as points + local vectors.

    Dotprops consist of points with x/y/z coordinates, a tangent vector and an
    alpha value describing the immediate neighbourhood (see also references).

    Typically constructed using [`navis.make_dotprops`][].

    References
    ----------
    Masse N.Y., Cachero S., Ostrovsky A., and Jefferis G.S.X.E. (2012). A mutual
    information approach to automate identification of neuronal clusters in
    Drosophila brain images. Frontiers in Neuroinformatics 6 (00021).
    doi: 10.3389/fninf.2012.00021

    Parameters
    ----------
    points :        numpy array
                    (N, 3) array of x/y/z coordinates.
    k :             int, optional
                    Number of nearest neighbors for tangent vector calculation.
                    This can be `None` or `0` but then vectors must be
                    provided on initialization and can subsequently not be
                    re-calculated. Typical values here are `k=20` for dense
                    (e.g. from light level data) and `k=5` for sparse
                    (e.g. from skeletons) point clouds.
    vect :          numpy array, optional
                    (N, 3) array of vectors. If not provided will
                    recalculate both `vect` and `alpha` using `k`.
    alpha :         numpy array, optional
                    (N, ) array of alpha values. If not provided will
                    recalculate both `alpha` and `vect` using `k`.
    units :         str | pint.Units | pint.Quantity
                    Units for coordinates. Defaults to `None` (dimensionless).
                    Strings must be parsable by pint: e.g. "nm", "um",
                    "micrometer" or "8 nanometers".
    **metadata
                    Any additional data to attach to neuron.

    """

    connectors: Optional[pd.DataFrame]

    points: np.ndarray
    alpha: np.ndarray
    vect:  np.ndarray
    k: Optional[int]

    soma: Optional[Union[list, np.ndarray]]

    #: Attributes used for neuron summary
    SUMMARY_PROPS = ['type', 'name', 'k', 'units', 'n_points']

    #: Attributes to be used when comparing two neurons.
    EQ_ATTRIBUTES = ['name', 'n_points', 'k']

    #: Temporary attributes that need clearing when neuron data changes
    TEMP_ATTR = ['_memory_usage']

    #: Core data table(s) used to calculate hash
    _CORE_DATA = ['points', 'vect']

    def __init__(self,
                 points: np.ndarray,
                 k: int,
                 vect: Optional[np.ndarray] = None,
                 alpha: Optional[np.ndarray] = None,
                 units: Union[pint.Unit, str] = None,
                 **metadata
                 ):
        """Initialize Dotprops Neuron."""
        super().__init__()

        self.k = k
        self.points = points
        self.alpha = alpha
        self.vect = vect

        self.soma = None

        for k, v in metadata.items():
            try:
                setattr(self, k, v)
            except AttributeError:
                raise AttributeError(f"Unable to set neuron's `{k}` attribute.")

        self.units = units

    def __truediv__(self, other, copy=True):
        """Implement division for coordinates."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            _ = np.divide(n.points, other, out=n.points, casting='unsafe')
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] /= other

            # Force recomputing of KDTree
            if hasattr(n, '_tree'):
                delattr(n, '_tree')

            # Convert units
            # Note: .to_compact() throws a RuntimeWarning and returns unchanged
            # values  when `units` is a iterable
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                n.units = (n.units * other).to_compact()

            return n
        return NotImplemented

    def __mul__(self, other, copy=True):
        """Implement multiplication for coordinates."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            _ = np.multiply(n.points, other, out=n.points, casting='unsafe')
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] *= other

            # Force recomputing of KDTree
            if hasattr(n, '_tree'):
                delattr(n, '_tree')

            # Convert units
            # Note: .to_compact() throws a RuntimeWarning and returns unchanged
            # values  when `units` is a iterable
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                n.units = (n.units / other).to_compact()

            return n
        return NotImplemented

    def __add__(self, other, copy=True):
        """Implement addition for coordinates."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            _ = np.add(n.points, other, out=n.points, casting='unsafe')
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] += other

            # Force recomputing of KDTree
            if hasattr(n, '_tree'):
                delattr(n, '_tree')

            return n
        # If another neuron, return a list of neurons
        elif isinstance(other, BaseNeuron):
            return core.NeuronList([self, other])
        return NotImplemented

    def __sub__(self, other, copy=True):
        """Implement subtraction for coordinates."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            _ = np.subtract(n.points, other, out=n.points, casting='unsafe')
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] -= other

            # Force recomputing of KDTree
            if hasattr(n, '_tree'):
                delattr(n, '_tree')

            return n
        return NotImplemented

    def __getstate__(self):
        """Get state (used e.g. for pickling)."""
        state = {k: v for k, v in self.__dict__.items() if not callable(v)}

        # The KDTree from pykdtree does not like being pickled
        # We will have to remove it which will force it to be regenerated
        # after unpickling
        if '_tree' in state:
            if 'pykdtree' in str(type(state['_tree'])):
                _ = state.pop('_tree')

        return state

    def __len__(self):
        return len(self.points)

    @property
    def alpha(self):
        """Alpha value for tangent vectors (optional)."""
        if isinstance(self._alpha, type(None)):
            if isinstance(self.k, type(None)) or (self.k <= 0):
                raise ValueError('Unable to calculate `alpha` for Dotprops not '
                                 'generated using k-nearest-neighbors.')

            self.recalculate_tangents(self.k, inplace=True)
        return self._alpha

    @alpha.setter
    def alpha(self, value):
        if not isinstance(value, type(None)):
            value = np.asarray(value)
            if value.ndim != 1:
                raise ValueError(f'alpha must be (N, ) array, got {value.shape}')
        self._alpha = value

    @property
    def bbox(self) -> np.ndarray:
        """Bounding box (includes connectors)."""
        mn = np.min(self.points, axis=0)
        mx = np.max(self.points, axis=0)

        if self.has_connectors:
            cn_mn = np.min(self.connectors[['x', 'y', 'z']].values, axis=0)
            cn_mx = np.max(self.connectors[['x', 'y', 'z']].values, axis=0)

            mn = np.min(np.vstack((mn, cn_mn)), axis=0)
            mx = np.max(np.vstack((mx, cn_mx)), axis=0)

        return np.vstack((mn, mx)).T

    @property
    def datatables(self) -> List[str]:
        """Names of all DataFrames attached to this neuron."""
        return [k for k, v in self.__dict__.items() if isinstance(v, pd.DataFrame, np.ndarray)]

    @property
    def kdtree(self):
        """KDTree for points."""
        if not getattr(self, '_tree', None):
            self._tree = KDTree(self.points)
        return self._tree

    @property
    def points(self):
        """Center of tangent vectors."""
        return self._points

    @points.setter
    def points(self, value):
        if isinstance(value, type(None)):
            value = np.zeros((0, 3))
        value = np.asarray(value)
        if value.ndim != 2 or value.shape[1] != 3:
            raise ValueError(f'points must be (N, 3) array, got {value.shape}')
        self._points = value
        # Also reset KDtree
        self._tree = None

    @property
    def vect(self):
        """Tangent vectors."""
        if isinstance(self._vect, type(None)):
            self.recalculate_tangents(self.k, inplace=True)
        return self._vect

    @vect.setter
    def vect(self, value):
        if not isinstance(value, type(None)):
            value = np.asarray(value)
            if value.ndim != 2 or value.shape[1] != 3:
                raise ValueError(f'vectors must be (N, 3) array, got {value.shape}')
        self._vect = value

    @property
    def sampling_resolution(self):
        """Mean distance between points."""
        dist, _ = self.kdtree.query(self.points, k=2)
        return np.mean(dist[:, 1])

    @property
    def soma(self) -> Optional[int]:
        """Index of soma point.

        `None` if no soma. You can assign either a function that accepts a
        Dotprops as input or a fix value. Default is None.
        """
        if callable(self._soma):
            soma = self._soma.__call__()  # type: ignore  # say int not callable
        else:
            soma = self._soma

        # Sanity check to make sure that the soma node actually exists
        if isinstance(soma, type(None)):
            # Return immmediately without expensive checks
            return soma
        elif utils.is_iterable(soma):
            if not any(soma):
                soma = None
            elif any(np.array(soma) < 0) or any(np.array(soma) > self.points.shape[0]):
                logger.warning(f'Soma(s) {soma} not found in points.')
                soma = None
        else:
            if 0 < soma < self.points.shape[0]:
                logger.warning(f'Soma {soma} not found in node table.')
                soma = None

        return soma

    @soma.setter
    def soma(self, value: Union[Callable, int, None]) -> None:
        """Set soma."""
        if hasattr(value, '__call__'):
            self._soma = types.MethodType(value, self)
        elif isinstance(value, type(None)):
            self._soma = None
        elif isinstance(value, bool) and not value:
            self._soma = None
        else:
            if 0 < value < self.points.shape[0]:
                self._soma = value
            else:
                raise ValueError('Soma must be function, None or a valid node index.')

    @property
    def type(self) -> str:
        """Neuron type."""
        return 'navis.Dotprops'

    def dist_dots(self,
                  other: 'Dotprops',
                  alpha: bool = False,
                  distance_upper_bound: Optional[float] = None,
                  **kwargs) -> Union[
                      Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray]
                    ]:
        """Query this Dotprops against another.

        This function is mainly for `navis.nblast`.

        Parameters
        ----------
        other :                 Dotprops
        alpha :                 bool
                                If True, will also return the product of the
                                product of the alpha values of matched points.
        distance_upper_bound :  non-negative float, optional
                                If provided, we will stop the nearest neighbor
                                search at this distance which can vastly speed
                                up the query. For points with no hit within this
                                distance, `dist` will be set to
                                `distance_upper_bound`, and `dotprods` and
                                `alpha_prod` will be set to 0.
        kwargs
                                Keyword arguments are passed to the KDTree's
                                `query()` method. Note that we are using
                                `pykdtree.kdtree.KDTree` if available and fall
                                back to `scipy.spatial.cKDTree` if pykdtree is
                                not installed.

        Returns
        -------
        dist :          np.ndarray
                        For each point in `self`, the distance to the closest
                        point in `other`.
        dotprods :      np.ndarray
                        Dotproduct of each pair of closest points between
                        `self` and `other`.
        alpha_prod :    np.ndarray
                        Dotproduct of each pair of closest points between
                        `self` and `other`. Only returned if `alpha=True`.

        """
        if not isinstance(other, Dotprops):
            raise TypeError(f'Expected Dotprops, got "{type(other)}"')

        # If we are using pykdtree we need to make sure that self.points is
        # of the same dtype as other.points - not a problem with scipy but
        # the overhead is typically only a few micro seconds anyway
        points = self.points.astype(other.points.dtype, copy=False)

        # Scipy's KDTree does not like the distance to be None
        diub = distance_upper_bound if distance_upper_bound else np.inf
        fast_dists, fast_idxs = other.kdtree.query(points,
                                                   distance_upper_bound=diub,
                                                   **kwargs)

        # If upper distance we have to worry about infinite distances
        if distance_upper_bound:
            no_nn = fast_dists == np.inf
            fast_dists[no_nn] = distance_upper_bound

            # Temporarily give those nodes a match
            fast_idxs[no_nn] = 0

        fast_dotprods = np.abs((self.vect * other.vect[fast_idxs]).sum(axis=1))

        if distance_upper_bound:
            fast_dotprods[no_nn] = 0

        if not alpha:
            return fast_dists, fast_dotprods

        fast_alpha = self.alpha * other.alpha[fast_idxs]

        if distance_upper_bound:
            fast_alpha[no_nn] = 0

        return fast_dists, fast_dotprods, fast_alpha

    def downsample(self, factor=5, inplace=False, **kwargs):
        """Downsample the neuron by given factor.

        Parameters
        ----------
        factor :                int, optional
                                Factor by which to downsample the neurons.
                                Default = 5.
        inplace :               bool, optional
                                If True, operation will be performed on
                                itself. If False, operation is performed on
                                copy which is then returned.
        **kwargs
                                Additional arguments passed to
                                [`navis.downsample_neuron`][].

        See Also
        --------
        [`navis.downsample_neuron`][]
            Base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy()

        sampling.downsample_neuron(x, factor, inplace=True, **kwargs)

        if not inplace:
            return x
        return None

    def copy(self) -> 'Dotprops':
        """Return a copy of the dotprops.

        Returns
        -------
        Dotprops

        """
        # Don't copy the KDtree - when using pykdtree, copy.copy throws an
        # error and the construction is super fast anyway
        no_copy = ['_lock', '_tree']
        # Generate new empty neuron - note we pass vect and alpha to
        # prevent calculation on initialization
        x = self.__class__(points=np.zeros((0, 3)), k=1,
                           vect=np.zeros((0, 3)), alpha=np.zeros(0))
        # Populate with this neuron's data
        x.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy})

        return x

    def drop_fluff(self, epsilon, keep_size: int = None, n_largest: int = None, inplace=False):
        """Remove fluff from neuron.

        By default, this function will remove all but the largest connected
        component from the neuron. You can change that behavior using the
        `keep_size` and `n_largest` parameters.

        Parameters
        ----------
        epsilon :   float
                    Distance at which to consider two points to be connected.
                    If `None`, will use the default value of 5 times the average
                    node distance (`self.sampling_resolution`).
        keep_size : float, optional
                    Use this to set a size (in number of points) for small
                    bits to keep. If `keep_size` < 1 it will be intepreted as
                    fraction of total nodes/vertices/points.
        n_largest : int, optional
                    If set, will keep the `n_largest` connected components. Note:
                    if provided, `keep_size` will be applied first!
        inplace :   bool, optional
                    If False, will return a copy and leave the original data
                    unmodified.

        Returns
        -------
        Dotprops
                    Only if `inplace=False`.

        See Also
        --------
        [`navis.drop_fluff`][]
            Base function. See for details and examples.

        """
        x = morpho.drop_fluff(self, epsilon=epsilon, keep_size=keep_size, n_largest=n_largest, inplace=inplace)

        if not inplace:
            return x

    def recalculate_tangents(self, k: int, inplace=False):
        """Recalculate tangent vectors and alpha with a new `k`.

        Parameters
        ----------
        k :         int
                    Number of nearest neighbours to use for tangent vector
                    calculation.
        inplace :   bool
                    If False, will return a copy and leave the original data
                    unmodified.

        Returns
        -------
        Dotprops
                    Only if `inplace=False`.

        """
        if not inplace:
            x = self.copy()
        else:
            x = self

        if isinstance(k, type(None)) or k < 1:
            raise ValueError(f'`k` must be integer >= 1, got "{k}"')

        # Checks and balances
        n_points = x.points.shape[0]
        if n_points < k:
            raise ValueError(f"Too few points ({n_points}) to calculate {k} "
                             "nearest-neighbors")

        # Create the KDTree and get the k-nearest neighbors for each point
        dist, ix = self.kdtree.query(x.points, k=k)

        # Get points: array of (N, k, 3)
        pt = x.points[ix]

        # Generate centers for each cloud of k nearest neighbors
        centers = np.mean(pt, axis=1)

        # Generate vector from center
        cpt = pt - centers.reshape((pt.shape[0], 1, 3))

        # Get inertia (N, 3, 3)
        inertia = cpt.transpose((0, 2, 1)) @ cpt

        # Extract vector and alpha
        u, s, vh = np.linalg.svd(inertia)
        x.vect = vh[:, 0, :]
        x.alpha = (s[:, 0] - s[:, 1]) / np.sum(s, axis=1)

        # Keep track of k
        x.k = k

        if not inplace:
            return x

    def snap(self, locs, to='points'):
        """Snap xyz location(s) to closest point or synapse.

        Parameters
        ----------
        locs :      (N, 3) array | (3, ) array
                    Either single or multiple XYZ locations.
        to :        "points" | "connectors"
                    Whether to snap to points or connectors.

        Returns
        -------
        ix :        int | list of int
                    Index/Indices of the closest point/connector.
        dist :      float | list of float
                    Distance(s) to the closest point/connector.

        Examples
        --------
        >>> import navis
        >>> n = navis.example_neurons(1)
        >>> dp = navis.make_dotprops(n, k=5)
        >>> ix, dist = dp.snap([0, 0, 0])
        >>> ix
        1123

        """
        locs = np.asarray(locs).astype(np.float64)

        is_single = (locs.ndim == 1 and len(locs) == 3)
        is_multi = (locs.ndim == 2 and locs.shape[1] == 3)
        if not is_single and not is_multi:
            raise ValueError('Expected a single (x, y, z) location or a '
                             '(N, 3) array of multiple locations')

        if to not in ['points', 'connectors']:
            raise ValueError('`to` must be "points" or "connectors", '
                             f'got {to}')

        # Generate tree
        tree = graph.neuron2KDTree(self, data=to)

        # Find the closest node
        dist, ix = tree.query(locs)

        return ix, dist

    def to_skeleton(self,
                    scale_vec: Union[float, Literal['auto']] = 'auto'
                    ) -> core.TreeNeuron:
        """Turn Dotprop into a TreeNeuron.

        This does *not* skeletonize the neuron but rather generates a line
        segment for each point based on the tangent vector. This is mainly
        used under the hood for plotting. Also note that only minimal meta
        data is carried over.

        For proper skeletonization see [`navis.skeletonize`][].

        Parameters
        ----------
        scale_vec :     "auto" | float
                        Factor by which to scale each tangent vector when
                        generating the line segments. If "auto" (default for
                        plotting) will use the sampling resolution (median
                        distance between points) to determine a suitable
                        values.

        Returns
        -------
        TreeNeuron

        """
        if not isinstance(scale_vec, numbers.Number) and scale_vec != 'auto':
            raise ValueError('`scale_vect` must be "auto" or a number, '
                             f'got {scale_vec}')

        if scale_vec == 'auto':
            scale_vec = self.sampling_resolution * .8

        # Prepare segments - this is based on nat:::plot3d.dotprops
        halfvect = self.vect / 2 * scale_vec
        starts = self.points - halfvect
        ends = self.points + halfvect

        # Interweave starts and ends
        segs = np.zeros((starts.shape[0] * 2, 3))
        segs[::2] = starts
        segs[1::2] = ends

        # Generate node table
        nodes = pd.DataFrame(segs, columns=['x', 'y', 'z'])
        nodes['node_id'] = nodes.index
        nodes['parent_id'] = -1
        nodes.loc[1::2, 'parent_id'] = nodes.index.values[::2]

        # Produce a minimal TreeNeuron
        tn = core.TreeNeuron(nodes, units=self.units, id=self.id)

        # Carry over the label
        if getattr(self, '_label', None):
            tn._label = self._label

        # Add some other relevant attributes directly
        if self.has_connectors:
            tn._connectors = self._connectors
        tn._soma = self._soma

        return tn

Alpha value for tangent vectors (optional).

Bounding box (includes connectors).

Names of all DataFrames attached to this neuron.

KDTree for points.

Center of tangent vectors.

Mean distance between points.

Index of soma point.

None if no soma. You can assign either a function that accepts a Dotprops as input or a fix value. Default is None.

Neuron type.

Tangent vectors.

Initialize Dotprops Neuron.

Source code in navis/core/dotprop.py
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
def __init__(self,
             points: np.ndarray,
             k: int,
             vect: Optional[np.ndarray] = None,
             alpha: Optional[np.ndarray] = None,
             units: Union[pint.Unit, str] = None,
             **metadata
             ):
    """Initialize Dotprops Neuron."""
    super().__init__()

    self.k = k
    self.points = points
    self.alpha = alpha
    self.vect = vect

    self.soma = None

    for k, v in metadata.items():
        try:
            setattr(self, k, v)
        except AttributeError:
            raise AttributeError(f"Unable to set neuron's `{k}` attribute.")

    self.units = units

Return a copy of the dotprops.

RETURNS DESCRIPTION
Dotprops
Source code in navis/core/dotprop.py
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
def copy(self) -> 'Dotprops':
    """Return a copy of the dotprops.

    Returns
    -------
    Dotprops

    """
    # Don't copy the KDtree - when using pykdtree, copy.copy throws an
    # error and the construction is super fast anyway
    no_copy = ['_lock', '_tree']
    # Generate new empty neuron - note we pass vect and alpha to
    # prevent calculation on initialization
    x = self.__class__(points=np.zeros((0, 3)), k=1,
                       vect=np.zeros((0, 3)), alpha=np.zeros(0))
    # Populate with this neuron's data
    x.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy})

    return x

Query this Dotprops against another.

This function is mainly for navis.nblast.

PARAMETER DESCRIPTION
other

TYPE: Dotprops

alpha
                If True, will also return the product of the
                product of the alpha values of matched points.

TYPE: bool DEFAULT: False

distance_upper_bound
                If provided, we will stop the nearest neighbor
                search at this distance which can vastly speed
                up the query. For points with no hit within this
                distance, `dist` will be set to
                `distance_upper_bound`, and `dotprods` and
                `alpha_prod` will be set to 0.

TYPE: non-negative float DEFAULT: None

kwargs
                Keyword arguments are passed to the KDTree's
                `query()` method. Note that we are using
                `pykdtree.kdtree.KDTree` if available and fall
                back to `scipy.spatial.cKDTree` if pykdtree is
                not installed.

DEFAULT: {}

RETURNS DESCRIPTION
dist

For each point in self, the distance to the closest point in other.

TYPE: np.ndarray

dotprods

Dotproduct of each pair of closest points between self and other.

TYPE: np.ndarray

alpha_prod

Dotproduct of each pair of closest points between self and other. Only returned if alpha=True.

TYPE: np.ndarray

Source code in navis/core/dotprop.py
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
def dist_dots(self,
              other: 'Dotprops',
              alpha: bool = False,
              distance_upper_bound: Optional[float] = None,
              **kwargs) -> Union[
                  Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray]
                ]:
    """Query this Dotprops against another.

    This function is mainly for `navis.nblast`.

    Parameters
    ----------
    other :                 Dotprops
    alpha :                 bool
                            If True, will also return the product of the
                            product of the alpha values of matched points.
    distance_upper_bound :  non-negative float, optional
                            If provided, we will stop the nearest neighbor
                            search at this distance which can vastly speed
                            up the query. For points with no hit within this
                            distance, `dist` will be set to
                            `distance_upper_bound`, and `dotprods` and
                            `alpha_prod` will be set to 0.
    kwargs
                            Keyword arguments are passed to the KDTree's
                            `query()` method. Note that we are using
                            `pykdtree.kdtree.KDTree` if available and fall
                            back to `scipy.spatial.cKDTree` if pykdtree is
                            not installed.

    Returns
    -------
    dist :          np.ndarray
                    For each point in `self`, the distance to the closest
                    point in `other`.
    dotprods :      np.ndarray
                    Dotproduct of each pair of closest points between
                    `self` and `other`.
    alpha_prod :    np.ndarray
                    Dotproduct of each pair of closest points between
                    `self` and `other`. Only returned if `alpha=True`.

    """
    if not isinstance(other, Dotprops):
        raise TypeError(f'Expected Dotprops, got "{type(other)}"')

    # If we are using pykdtree we need to make sure that self.points is
    # of the same dtype as other.points - not a problem with scipy but
    # the overhead is typically only a few micro seconds anyway
    points = self.points.astype(other.points.dtype, copy=False)

    # Scipy's KDTree does not like the distance to be None
    diub = distance_upper_bound if distance_upper_bound else np.inf
    fast_dists, fast_idxs = other.kdtree.query(points,
                                               distance_upper_bound=diub,
                                               **kwargs)

    # If upper distance we have to worry about infinite distances
    if distance_upper_bound:
        no_nn = fast_dists == np.inf
        fast_dists[no_nn] = distance_upper_bound

        # Temporarily give those nodes a match
        fast_idxs[no_nn] = 0

    fast_dotprods = np.abs((self.vect * other.vect[fast_idxs]).sum(axis=1))

    if distance_upper_bound:
        fast_dotprods[no_nn] = 0

    if not alpha:
        return fast_dists, fast_dotprods

    fast_alpha = self.alpha * other.alpha[fast_idxs]

    if distance_upper_bound:
        fast_alpha[no_nn] = 0

    return fast_dists, fast_dotprods, fast_alpha

Downsample the neuron by given factor.

PARAMETER DESCRIPTION
factor
                Factor by which to downsample the neurons.
                Default = 5.

TYPE: int DEFAULT: 5

inplace
                If True, operation will be performed on
                itself. If False, operation is performed on
                copy which is then returned.

TYPE: bool DEFAULT: False

**kwargs
                Additional arguments passed to
                [`navis.downsample_neuron`][].

DEFAULT: {}

See Also

navis.downsample_neuron Base function. See for details and examples.

Source code in navis/core/dotprop.py
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
def downsample(self, factor=5, inplace=False, **kwargs):
    """Downsample the neuron by given factor.

    Parameters
    ----------
    factor :                int, optional
                            Factor by which to downsample the neurons.
                            Default = 5.
    inplace :               bool, optional
                            If True, operation will be performed on
                            itself. If False, operation is performed on
                            copy which is then returned.
    **kwargs
                            Additional arguments passed to
                            [`navis.downsample_neuron`][].

    See Also
    --------
    [`navis.downsample_neuron`][]
        Base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy()

    sampling.downsample_neuron(x, factor, inplace=True, **kwargs)

    if not inplace:
        return x
    return None

Remove fluff from neuron.

By default, this function will remove all but the largest connected component from the neuron. You can change that behavior using the keep_size and n_largest parameters.

PARAMETER DESCRIPTION
epsilon
    Distance at which to consider two points to be connected.
    If `None`, will use the default value of 5 times the average
    node distance (`self.sampling_resolution`).

TYPE: float

keep_size
    Use this to set a size (in number of points) for small
    bits to keep. If `keep_size` < 1 it will be intepreted as
    fraction of total nodes/vertices/points.

TYPE: float DEFAULT: None

n_largest
    If set, will keep the `n_largest` connected components. Note:
    if provided, `keep_size` will be applied first!

TYPE: int DEFAULT: None

inplace
    If False, will return a copy and leave the original data
    unmodified.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
Dotprops

Only if inplace=False.

See Also

navis.drop_fluff Base function. See for details and examples.

Source code in navis/core/dotprop.py
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
def drop_fluff(self, epsilon, keep_size: int = None, n_largest: int = None, inplace=False):
    """Remove fluff from neuron.

    By default, this function will remove all but the largest connected
    component from the neuron. You can change that behavior using the
    `keep_size` and `n_largest` parameters.

    Parameters
    ----------
    epsilon :   float
                Distance at which to consider two points to be connected.
                If `None`, will use the default value of 5 times the average
                node distance (`self.sampling_resolution`).
    keep_size : float, optional
                Use this to set a size (in number of points) for small
                bits to keep. If `keep_size` < 1 it will be intepreted as
                fraction of total nodes/vertices/points.
    n_largest : int, optional
                If set, will keep the `n_largest` connected components. Note:
                if provided, `keep_size` will be applied first!
    inplace :   bool, optional
                If False, will return a copy and leave the original data
                unmodified.

    Returns
    -------
    Dotprops
                Only if `inplace=False`.

    See Also
    --------
    [`navis.drop_fluff`][]
        Base function. See for details and examples.

    """
    x = morpho.drop_fluff(self, epsilon=epsilon, keep_size=keep_size, n_largest=n_largest, inplace=inplace)

    if not inplace:
        return x

Recalculate tangent vectors and alpha with a new k.

PARAMETER DESCRIPTION
k
    Number of nearest neighbours to use for tangent vector
    calculation.

TYPE: int

inplace
    If False, will return a copy and leave the original data
    unmodified.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
Dotprops

Only if inplace=False.

Source code in navis/core/dotprop.py
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
def recalculate_tangents(self, k: int, inplace=False):
    """Recalculate tangent vectors and alpha with a new `k`.

    Parameters
    ----------
    k :         int
                Number of nearest neighbours to use for tangent vector
                calculation.
    inplace :   bool
                If False, will return a copy and leave the original data
                unmodified.

    Returns
    -------
    Dotprops
                Only if `inplace=False`.

    """
    if not inplace:
        x = self.copy()
    else:
        x = self

    if isinstance(k, type(None)) or k < 1:
        raise ValueError(f'`k` must be integer >= 1, got "{k}"')

    # Checks and balances
    n_points = x.points.shape[0]
    if n_points < k:
        raise ValueError(f"Too few points ({n_points}) to calculate {k} "
                         "nearest-neighbors")

    # Create the KDTree and get the k-nearest neighbors for each point
    dist, ix = self.kdtree.query(x.points, k=k)

    # Get points: array of (N, k, 3)
    pt = x.points[ix]

    # Generate centers for each cloud of k nearest neighbors
    centers = np.mean(pt, axis=1)

    # Generate vector from center
    cpt = pt - centers.reshape((pt.shape[0], 1, 3))

    # Get inertia (N, 3, 3)
    inertia = cpt.transpose((0, 2, 1)) @ cpt

    # Extract vector and alpha
    u, s, vh = np.linalg.svd(inertia)
    x.vect = vh[:, 0, :]
    x.alpha = (s[:, 0] - s[:, 1]) / np.sum(s, axis=1)

    # Keep track of k
    x.k = k

    if not inplace:
        return x

Snap xyz location(s) to closest point or synapse.

PARAMETER DESCRIPTION
locs
    Either single or multiple XYZ locations.

TYPE: (N, 3) array | (3, ) array

to
    Whether to snap to points or connectors.

TYPE: "points" | "connectors" DEFAULT: 'points'

RETURNS DESCRIPTION
ix

Index/Indices of the closest point/connector.

TYPE: int | list of int

dist

Distance(s) to the closest point/connector.

TYPE: float | list of float

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> dp = navis.make_dotprops(n, k=5)
>>> ix, dist = dp.snap([0, 0, 0])
>>> ix
1123
Source code in navis/core/dotprop.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
def snap(self, locs, to='points'):
    """Snap xyz location(s) to closest point or synapse.

    Parameters
    ----------
    locs :      (N, 3) array | (3, ) array
                Either single or multiple XYZ locations.
    to :        "points" | "connectors"
                Whether to snap to points or connectors.

    Returns
    -------
    ix :        int | list of int
                Index/Indices of the closest point/connector.
    dist :      float | list of float
                Distance(s) to the closest point/connector.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> dp = navis.make_dotprops(n, k=5)
    >>> ix, dist = dp.snap([0, 0, 0])
    >>> ix
    1123

    """
    locs = np.asarray(locs).astype(np.float64)

    is_single = (locs.ndim == 1 and len(locs) == 3)
    is_multi = (locs.ndim == 2 and locs.shape[1] == 3)
    if not is_single and not is_multi:
        raise ValueError('Expected a single (x, y, z) location or a '
                         '(N, 3) array of multiple locations')

    if to not in ['points', 'connectors']:
        raise ValueError('`to` must be "points" or "connectors", '
                         f'got {to}')

    # Generate tree
    tree = graph.neuron2KDTree(self, data=to)

    # Find the closest node
    dist, ix = tree.query(locs)

    return ix, dist

Turn Dotprop into a TreeNeuron.

This does not skeletonize the neuron but rather generates a line segment for each point based on the tangent vector. This is mainly used under the hood for plotting. Also note that only minimal meta data is carried over.

For proper skeletonization see navis.skeletonize.

PARAMETER DESCRIPTION
scale_vec
        Factor by which to scale each tangent vector when
        generating the line segments. If "auto" (default for
        plotting) will use the sampling resolution (median
        distance between points) to determine a suitable
        values.

TYPE: "auto" | float DEFAULT: 'auto'

RETURNS DESCRIPTION
TreeNeuron
Source code in navis/core/dotprop.py
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
def to_skeleton(self,
                scale_vec: Union[float, Literal['auto']] = 'auto'
                ) -> core.TreeNeuron:
    """Turn Dotprop into a TreeNeuron.

    This does *not* skeletonize the neuron but rather generates a line
    segment for each point based on the tangent vector. This is mainly
    used under the hood for plotting. Also note that only minimal meta
    data is carried over.

    For proper skeletonization see [`navis.skeletonize`][].

    Parameters
    ----------
    scale_vec :     "auto" | float
                    Factor by which to scale each tangent vector when
                    generating the line segments. If "auto" (default for
                    plotting) will use the sampling resolution (median
                    distance between points) to determine a suitable
                    values.

    Returns
    -------
    TreeNeuron

    """
    if not isinstance(scale_vec, numbers.Number) and scale_vec != 'auto':
        raise ValueError('`scale_vect` must be "auto" or a number, '
                         f'got {scale_vec}')

    if scale_vec == 'auto':
        scale_vec = self.sampling_resolution * .8

    # Prepare segments - this is based on nat:::plot3d.dotprops
    halfvect = self.vect / 2 * scale_vec
    starts = self.points - halfvect
    ends = self.points + halfvect

    # Interweave starts and ends
    segs = np.zeros((starts.shape[0] * 2, 3))
    segs[::2] = starts
    segs[1::2] = ends

    # Generate node table
    nodes = pd.DataFrame(segs, columns=['x', 'y', 'z'])
    nodes['node_id'] = nodes.index
    nodes['parent_id'] = -1
    nodes.loc[1::2, 'parent_id'] = nodes.index.values[::2]

    # Produce a minimal TreeNeuron
    tn = core.TreeNeuron(nodes, units=self.units, id=self.id)

    # Carry over the label
    if getattr(self, '_label', None):
        tn._label = self._label

    # Add some other relevant attributes directly
    if self.has_connectors:
        tn._connectors = self._connectors
    tn._soma = self._soma

    return tn

Neuron represented as mesh with vertices and faces.

PARAMETER DESCRIPTION
x
        Data to construct neuron from:
         - any object that has `.vertices` and `.faces`
           properties (e.g. a trimesh.Trimesh)
         - a tuple `(vertices, faces)`
         - a dictionary `{"vertices": (N, 3), "faces": (M, 3)}`
         - filepath to a file that can be read by `trimesh.load`
         - `None` will initialize an empty MeshNeuron
         - `skeletor.Skeleton` will use the mesh and the skeleton
           (including the vertex to node map)

TYPE: mesh-like | tuple | dictionary | filepath | None

units
        Units for coordinates. Defaults to `None` (dimensionless).
        Strings must be parsable by pint: e.g. "nm", "um",
        "micrometer" or "8 nanometers".

TYPE: str | pint.Units | pint.Quantity DEFAULT: None

process
        If True (default and highly recommended), will remove NaN
        and infinite values, and merge duplicate vertices.

TYPE: bool DEFAULT: True

validate
        If True, will try to fix some common problems with
        meshes. See `navis.fix_mesh` for details.

TYPE: bool DEFAULT: False

**metadata
        Any additional data to attach to neuron.

DEFAULT: {}

Source code in navis/core/mesh.py
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
class MeshNeuron(BaseNeuron):
    """Neuron represented as mesh with vertices and faces.

    Parameters
    ----------
    x :             mesh-like | tuple | dictionary | filepath | None
                    Data to construct neuron from:
                     - any object that has `.vertices` and `.faces`
                       properties (e.g. a trimesh.Trimesh)
                     - a tuple `(vertices, faces)`
                     - a dictionary `{"vertices": (N, 3), "faces": (M, 3)}`
                     - filepath to a file that can be read by `trimesh.load`
                     - `None` will initialize an empty MeshNeuron
                     - `skeletor.Skeleton` will use the mesh and the skeleton
                       (including the vertex to node map)

    units :         str | pint.Units | pint.Quantity
                    Units for coordinates. Defaults to `None` (dimensionless).
                    Strings must be parsable by pint: e.g. "nm", "um",
                    "micrometer" or "8 nanometers".
    process :       bool
                    If True (default and highly recommended), will remove NaN
                    and infinite values, and merge duplicate vertices.
    validate :      bool
                    If True, will try to fix some common problems with
                    meshes. See `navis.fix_mesh` for details.
    **metadata
                    Any additional data to attach to neuron.

    """

    connectors: Optional[pd.DataFrame]

    vertices: np.ndarray
    faces: np.ndarray

    soma: Optional[Union[list, np.ndarray]]

    #: Attributes used for neuron summary
    SUMMARY_PROPS = ['type', 'name', 'units', 'n_vertices', 'n_faces']

    #: Attributes to be used when comparing two neurons.
    EQ_ATTRIBUTES = ['name', 'n_vertices', 'n_faces']

    #: Temporary attributes that need clearing when neuron data changes
    TEMP_ATTR = ['_memory_usage', '_trimesh', '_skeleton', '_igraph', '_graph_nx']

    #: Core data table(s) used to calculate hash
    CORE_DATA = ['vertices', 'faces']

    def __init__(self,
                 x,
                 units: Union[pint.Unit, str] = None,
                 process: bool = True,
                 validate: bool = False,
                 **metadata
                 ):
        """Initialize Mesh Neuron."""
        super().__init__()

        # Lock neuron during initialization
        self._lock = 1
        self._trimesh = None  # this is required to avoid recursion during init

        if isinstance(x, MeshNeuron):
            self.__dict__.update(x.copy().__dict__)
            self.vertices, self.faces = x.vertices, x.faces
        elif hasattr(x, 'faces') and hasattr(x, 'vertices'):
            self.vertices, self.faces = x.vertices, x.faces
        elif isinstance(x, dict):
            if 'faces' not in x or 'vertices' not in x:
                raise ValueError('Dictionary must contain "vertices" and "faces"')
            self.vertices, self.faces = x['vertices'], x['faces']
        elif isinstance(x, str) and os.path.isfile(x):
            m = tm.load(x)
            self.vertices, self.faces = m.vertices, m.faces
        elif isinstance(x, type(None)):
            # Empty neuron
            self.vertices, self.faces = np.zeros((0, 3)), np.zeros((0, 3))
        elif isinstance(x, sk.Skeleton):
            self.vertices, self.faces = x.mesh.vertices, x.mesh.faces
            self._skeleton = TreeNeuron(x)
        elif isinstance(x, tuple):
            if len(x) != 2 or any([not isinstance(v, np.ndarray) for v in x]):
                raise TypeError('Expect tuple to be two arrays: (vertices, faces)')
            self.vertices, self.faces = x[0], x[1]
        else:
            raise utils.ConstructionError(f'Unable to construct MeshNeuron from "{type(x)}"')

        for k, v in metadata.items():
            try:
                setattr(self, k, v)
            except AttributeError:
                raise AttributeError(f"Unable to set neuron's `{k}` attribute.")

        if process and self.vertices.shape[0]:
            # For some reason we can't do self._trimesh at this stage
            _trimesh = tm.Trimesh(self.vertices, self.faces,
                                  process=process,
                                  validate=validate)
            self.vertices = _trimesh.vertices
            self.faces = _trimesh.faces

        self._lock = 0

        if validate:
            self.validate()

        self.units = units

    def __getattr__(self, key):
        """We will use this magic method to calculate some attributes on-demand."""
        # Note that we're mixing @property and __getattr__ which causes problems:
        # if a @property raises an Exception, Python falls back to __getattr__
        # and traceback is lost!

        # Last ditch effort - maybe the base class knows the key?
        return super().__getattr__(key)

    def __getstate__(self):
        """Get state (used e.g. for pickling)."""
        state = {k: v for k, v in self.__dict__.items() if not callable(v)}

        # We don't need the trimesh object
        if '_trimesh' in state:
            _ = state.pop('_trimesh')

        return state

    def __setstate__(self, d):
        """Update state (used e.g. for pickling)."""
        self.__dict__.update(d)

    def __truediv__(self, other, copy=True):
        """Implement division for coordinates (vertices, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            _ = np.divide(n.vertices, other, out=n.vertices, casting='unsafe')
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] /= other

            # Convert units
            # Note: .to_compact() throws a RuntimeWarning and returns unchanged
            # values  when `units` is a iterable
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                n.units = (n.units * other).to_compact()

            self._clear_temp_attr()

            return n
        return NotImplemented

    def __mul__(self, other, copy=True):
        """Implement multiplication for coordinates (vertices, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            _ = np.multiply(n.vertices, other, out=n.vertices, casting='unsafe')
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] *= other

            # Convert units
            # Note: .to_compact() throws a RuntimeWarning and returns unchanged
            # values  when `units` is a iterable
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                n.units = (n.units / other).to_compact()

            self._clear_temp_attr()

            return n
        return NotImplemented

    def __add__(self, other, copy=True):
        """Implement addition for coordinates (vertices, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            n = self.copy() if copy else self
            _ = np.add(n.vertices, other, out=n.vertices, casting='unsafe')
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] += other

            self._clear_temp_attr()

            return n
        # If another neuron, return a list of neurons
        elif isinstance(other, BaseNeuron):
            return NeuronList([self, other])
        return NotImplemented

    def __sub__(self, other, copy=True):
        """Implement subtraction for coordinates (vertices, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            n = self.copy() if copy else self
            _ = np.subtract(n.vertices, other, out=n.vertices, casting='unsafe')
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] -= other

            self._clear_temp_attr()

            return n
        return NotImplemented

    @property
    def bbox(self) -> np.ndarray:
        """Bounding box (includes connectors)."""
        mn = np.min(self.vertices, axis=0)
        mx = np.max(self.vertices, axis=0)

        if self.has_connectors:
            cn_mn = np.min(self.connectors[['x', 'y', 'z']].values, axis=0)
            cn_mx = np.max(self.connectors[['x', 'y', 'z']].values, axis=0)

            mn = np.min(np.vstack((mn, cn_mn)), axis=0)
            mx = np.max(np.vstack((mx, cn_mx)), axis=0)

        return np.vstack((mn, mx)).T

    @property
    def vertices(self):
        """Vertices making up the neuron."""
        return self._vertices

    @vertices.setter
    def vertices(self, verts):
        if not isinstance(verts, np.ndarray):
            raise TypeError(f'Vertices must be numpy array, got "{type(verts)}"')
        if verts.ndim != 2:
            raise ValueError('Vertices must be 2-dimensional array')
        self._vertices = verts
        self._clear_temp_attr()

    @property
    def faces(self):
        """Faces making up the neuron."""
        return self._faces

    @faces.setter
    def faces(self, faces):
        if not isinstance(faces, np.ndarray):
            raise TypeError(f'Faces must be numpy array, got "{type(faces)}"')
        if faces.ndim != 2:
            raise ValueError('Faces must be 2-dimensional array')
        self._faces = faces
        self._clear_temp_attr()

    @property
    @temp_property
    def igraph(self) -> 'igraph.Graph':
        """iGraph representation of the vertex connectivity."""
        # If igraph does not exist, create and return
        if not hasattr(self, '_igraph'):
            # This also sets the attribute
            self._igraph = graph.neuron2igraph(self, raise_not_installed=False)
        return self._igraph

    @property
    @temp_property
    def graph(self) -> nx.DiGraph:
        """Networkx Graph representation of the vertex connectivity."""
        # If graph does not exist, create and return
        if not hasattr(self, '_graph_nx'):
            # This also sets the attribute
            self._graph_nx = graph.neuron2nx(self)
        return self._graph_nx

    @property
    def sampling_resolution(self) -> float:
        """Average distance between vertices."""
        return float(self.trimesh.edges_unique_length.mean())

    @property
    @add_units(compact=True, power=3)
    def volume(self) -> float:
        """Volume of the neuron.

        Calculated from the surface integral. Garbage if neuron is not
        watertight.

        """
        return float(self.trimesh.volume)

    @property
    @temp_property
    def skeleton(self) -> 'TreeNeuron':
        """Skeleton representation of this neuron.

        Uses [`navis.conversion.mesh2skeleton`][].

        """
        if not hasattr(self, '_skeleton'):
            self._skeleton = self.skeletonize()
        return self._skeleton

    @skeleton.setter
    def skeleton(self, s):
        """Attach skeleton respresentation for this neuron."""
        if isinstance(s, sk.Skeleton):
            s = TreeNeuron(s, id=self.id, name=self.name)
        elif not isinstance(s, TreeNeuron):
            raise TypeError(f'`.skeleton` must be a TreeNeuron, got "{type(s)}"')
        self._skeleton = s

    @property
    def soma(self):
        """Not implemented for MeshNeurons - use `.soma_pos`."""
        raise AttributeError("MeshNeurons have a soma position (`.soma_pos`), not a soma.")

    @property
    def soma_pos(self):
        """X/Y/Z position of the soma.

        Returns `None` if no soma.
        """
        return getattr(self, '_soma_pos', None)

    @soma_pos.setter
    def soma_pos(self, value):
        """Set soma by position."""
        if value is None:
            self._soma_pos = None
            return

        try:
            value = np.asarray(value).astype(np.float64).reshape(3)
        except BaseException:
            raise ValueError(f'Unable to convert soma position "{value}" '
                             f'to numeric (3, ) numpy array.')

        self._soma_pos = value

    @property
    def type(self) -> str:
        """Neuron type."""
        return 'navis.MeshNeuron'

    @property
    @temp_property
    def trimesh(self):
        """Trimesh representation of the neuron."""
        if not getattr(self, '_trimesh', None):
            self._trimesh = tm.Trimesh(vertices=self._vertices,
                                       faces=self._faces,
                                       process=False)
        return self._trimesh

    def copy(self) -> 'MeshNeuron':
        """Return a copy of the neuron."""
        no_copy = ['_lock']

        # Generate new neuron
        x = self.__class__(None)
        # Override with this neuron's data
        x.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy})

        return x

    def snap(self, locs, to='vertices'):
        """Snap xyz location(s) to closest vertex or synapse.

        Parameters
        ----------
        locs :      (N, 3) array | (3, ) array
                    Either single or multiple XYZ locations.
        to :        "vertices" | "connectors"
                    Whether to snap to vertex or connector.

        Returns
        -------
        ix :        int | list of int
                    Index/indices of the closest vertex/connector.
        dist :      float | list of float
                    Distance(s) to the closest vertex/connector.

        Examples
        --------
        >>> import navis
        >>> n = navis.example_neurons(1, kind='mesh')
        >>> ix, dist = n.snap([0, 0, 0])
        >>> ix
        4134

        """
        locs = np.asarray(locs).astype(self.vertices.dtype)

        is_single = (locs.ndim == 1 and len(locs) == 3)
        is_multi = (locs.ndim == 2 and locs.shape[1] == 3)
        if not is_single and not is_multi:
            raise ValueError('Expected a single (x, y, z) location or a '
                             '(N, 3) array of multiple locations')

        if to not in ('vertices', 'vertex', 'connectors', 'connectors'):
            raise ValueError('`to` must be "vertices" or "connectors", '
                             f'got {to}')

        # Generate tree
        tree = scipy.spatial.cKDTree(data=self.vertices)

        # Find the closest node
        dist, ix = tree.query(locs)

        return ix, dist

    def skeletonize(self, method='wavefront', heal=True, inv_dist=None, **kwargs) -> 'TreeNeuron':
        """Skeletonize mesh.

        See [`navis.conversion.mesh2skeleton`][] for details.

        Parameters
        ----------
        method :    "wavefront" | "teasar"
                    Method to use for skeletonization.
        heal :      bool
                    Whether to heal a fragmented skeleton after skeletonization.
        inv_dist :  int | float
                    Only required for method "teasar": invalidation distance for
                    the traversal. Smaller `inv_dist` captures smaller features
                    but is slower and vice versa. A good starting value is around
                    2-5 microns.
        **kwargs
                    Additional keyword are passed through to
                    [`navis.conversion.mesh2skeleton`][].

        Returns
        -------
        skeleton :  navis.TreeNeuron

        """
        return conversion.mesh2skeleton(self, method=method, heal=heal,
                                        inv_dist=inv_dist, **kwargs)

    def validate(self, inplace=False):
        """Use trimesh to try and fix some common mesh issues.

        See [`navis.fix_mesh`][] for details.

        """
        return meshes.fix_mesh(self, inplace=inplace)

Bounding box (includes connectors).

Faces making up the neuron.

Networkx Graph representation of the vertex connectivity.

iGraph representation of the vertex connectivity.

Average distance between vertices.

Skeleton representation of this neuron.

Uses navis.conversion.mesh2skeleton.

Not implemented for MeshNeurons - use .soma_pos.

X/Y/Z position of the soma.

Returns None if no soma.

Trimesh representation of the neuron.

Neuron type.

Vertices making up the neuron.

Volume of the neuron.

Calculated from the surface integral. Garbage if neuron is not watertight.

Initialize Mesh Neuron.

Source code in navis/core/mesh.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
def __init__(self,
             x,
             units: Union[pint.Unit, str] = None,
             process: bool = True,
             validate: bool = False,
             **metadata
             ):
    """Initialize Mesh Neuron."""
    super().__init__()

    # Lock neuron during initialization
    self._lock = 1
    self._trimesh = None  # this is required to avoid recursion during init

    if isinstance(x, MeshNeuron):
        self.__dict__.update(x.copy().__dict__)
        self.vertices, self.faces = x.vertices, x.faces
    elif hasattr(x, 'faces') and hasattr(x, 'vertices'):
        self.vertices, self.faces = x.vertices, x.faces
    elif isinstance(x, dict):
        if 'faces' not in x or 'vertices' not in x:
            raise ValueError('Dictionary must contain "vertices" and "faces"')
        self.vertices, self.faces = x['vertices'], x['faces']
    elif isinstance(x, str) and os.path.isfile(x):
        m = tm.load(x)
        self.vertices, self.faces = m.vertices, m.faces
    elif isinstance(x, type(None)):
        # Empty neuron
        self.vertices, self.faces = np.zeros((0, 3)), np.zeros((0, 3))
    elif isinstance(x, sk.Skeleton):
        self.vertices, self.faces = x.mesh.vertices, x.mesh.faces
        self._skeleton = TreeNeuron(x)
    elif isinstance(x, tuple):
        if len(x) != 2 or any([not isinstance(v, np.ndarray) for v in x]):
            raise TypeError('Expect tuple to be two arrays: (vertices, faces)')
        self.vertices, self.faces = x[0], x[1]
    else:
        raise utils.ConstructionError(f'Unable to construct MeshNeuron from "{type(x)}"')

    for k, v in metadata.items():
        try:
            setattr(self, k, v)
        except AttributeError:
            raise AttributeError(f"Unable to set neuron's `{k}` attribute.")

    if process and self.vertices.shape[0]:
        # For some reason we can't do self._trimesh at this stage
        _trimesh = tm.Trimesh(self.vertices, self.faces,
                              process=process,
                              validate=validate)
        self.vertices = _trimesh.vertices
        self.faces = _trimesh.faces

    self._lock = 0

    if validate:
        self.validate()

    self.units = units

Return a copy of the neuron.

Source code in navis/core/mesh.py
400
401
402
403
404
405
406
407
408
409
def copy(self) -> 'MeshNeuron':
    """Return a copy of the neuron."""
    no_copy = ['_lock']

    # Generate new neuron
    x = self.__class__(None)
    # Override with this neuron's data
    x.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy})

    return x

Skeletonize mesh.

See navis.conversion.mesh2skeleton for details.

PARAMETER DESCRIPTION
method
    Method to use for skeletonization.

TYPE: "wavefront" | "teasar" DEFAULT: 'wavefront'

heal
    Whether to heal a fragmented skeleton after skeletonization.

TYPE: bool DEFAULT: True

inv_dist
    Only required for method "teasar": invalidation distance for
    the traversal. Smaller `inv_dist` captures smaller features
    but is slower and vice versa. A good starting value is around
    2-5 microns.

TYPE: int | float DEFAULT: None

**kwargs
    Additional keyword are passed through to
    [`navis.conversion.mesh2skeleton`][].

DEFAULT: {}

RETURNS DESCRIPTION
skeleton

TYPE: navis.TreeNeuron

Source code in navis/core/mesh.py
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
def skeletonize(self, method='wavefront', heal=True, inv_dist=None, **kwargs) -> 'TreeNeuron':
    """Skeletonize mesh.

    See [`navis.conversion.mesh2skeleton`][] for details.

    Parameters
    ----------
    method :    "wavefront" | "teasar"
                Method to use for skeletonization.
    heal :      bool
                Whether to heal a fragmented skeleton after skeletonization.
    inv_dist :  int | float
                Only required for method "teasar": invalidation distance for
                the traversal. Smaller `inv_dist` captures smaller features
                but is slower and vice versa. A good starting value is around
                2-5 microns.
    **kwargs
                Additional keyword are passed through to
                [`navis.conversion.mesh2skeleton`][].

    Returns
    -------
    skeleton :  navis.TreeNeuron

    """
    return conversion.mesh2skeleton(self, method=method, heal=heal,
                                    inv_dist=inv_dist, **kwargs)

Snap xyz location(s) to closest vertex or synapse.

PARAMETER DESCRIPTION
locs
    Either single or multiple XYZ locations.

TYPE: (N, 3) array | (3, ) array

to
    Whether to snap to vertex or connector.

TYPE: "vertices" | "connectors" DEFAULT: 'vertices'

RETURNS DESCRIPTION
ix

Index/indices of the closest vertex/connector.

TYPE: int | list of int

dist

Distance(s) to the closest vertex/connector.

TYPE: float | list of float

Examples:

>>> import navis
>>> n = navis.example_neurons(1, kind='mesh')
>>> ix, dist = n.snap([0, 0, 0])
>>> ix
4134
Source code in navis/core/mesh.py
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
def snap(self, locs, to='vertices'):
    """Snap xyz location(s) to closest vertex or synapse.

    Parameters
    ----------
    locs :      (N, 3) array | (3, ) array
                Either single or multiple XYZ locations.
    to :        "vertices" | "connectors"
                Whether to snap to vertex or connector.

    Returns
    -------
    ix :        int | list of int
                Index/indices of the closest vertex/connector.
    dist :      float | list of float
                Distance(s) to the closest vertex/connector.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1, kind='mesh')
    >>> ix, dist = n.snap([0, 0, 0])
    >>> ix
    4134

    """
    locs = np.asarray(locs).astype(self.vertices.dtype)

    is_single = (locs.ndim == 1 and len(locs) == 3)
    is_multi = (locs.ndim == 2 and locs.shape[1] == 3)
    if not is_single and not is_multi:
        raise ValueError('Expected a single (x, y, z) location or a '
                         '(N, 3) array of multiple locations')

    if to not in ('vertices', 'vertex', 'connectors', 'connectors'):
        raise ValueError('`to` must be "vertices" or "connectors", '
                         f'got {to}')

    # Generate tree
    tree = scipy.spatial.cKDTree(data=self.vertices)

    # Find the closest node
    dist, ix = tree.query(locs)

    return ix, dist

Use trimesh to try and fix some common mesh issues.

See navis.fix_mesh for details.

Source code in navis/core/mesh.py
485
486
487
488
489
490
491
def validate(self, inplace=False):
    """Use trimesh to try and fix some common mesh issues.

    See [`navis.fix_mesh`][] for details.

    """
    return meshes.fix_mesh(self, inplace=inplace)

Constructor for Neuron objects. Depending on the input, either a TreeNeuron or a MeshNeuron is returned.

PARAMETER DESCRIPTION
x
            Anything that can construct a [`navis.TreeNeuron`][]
            or [`navis.MeshNeuron`][].

TYPE: Union[nx.DiGraph, str, pd.DataFrame, TreeNeuron, MeshNeuron]

**metadata
            Any additional data to attach to neuron.

DEFAULT: {}

See Also

navis.read_swc Gives you more control over how data is extracted from SWC file. navis.example_neurons Loads some example neurons provided.

Source code in navis/core/base.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def Neuron(
    x: Union[nx.DiGraph, str, pd.DataFrame, "TreeNeuron", "MeshNeuron"], **metadata
):
    """Constructor for Neuron objects. Depending on the input, either a
    `TreeNeuron` or a `MeshNeuron` is returned.

    Parameters
    ----------
    x
                        Anything that can construct a [`navis.TreeNeuron`][]
                        or [`navis.MeshNeuron`][].
    **metadata
                        Any additional data to attach to neuron.

    See Also
    --------
    [`navis.read_swc`][]
                        Gives you more control over how data is extracted from
                        SWC file.
    [`navis.example_neurons`][]
                        Loads some example neurons provided.

    """
    try:
        return core.TreeNeuron(x, **metadata)
    except utils.ConstructionError:
        try:
            return core.MeshNeuron(x, **metadata)
        except utils.ConstructionError:
            pass
        except BaseException:
            raise
    except BaseException:
        raise

    raise utils.ConstructionError(f'Unable to construct neuron from "{type(x)}"')

Class which creates a connectivity graph from a set of neurons.

Connectivity is determined by shared IDs in the connectors table.

Add neurons with the add_neuron and add_neurons methods. Alternatively, supply an iterable of neurons in the constructor. Neurons must have unique names.

See the to_(multi)digraph method for output.

Source code in navis/connectivity/adjacency.py
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
class NeuronConnector:
    """Class which creates a connectivity graph from a set of neurons.

    Connectivity is determined by shared IDs in the `connectors` table.

    Add neurons with the `add_neuron` and `add_neurons` methods.
    Alternatively, supply an iterable of neurons in the constructor.
    Neurons must have unique names.

    See the `to_(multi)digraph` method for output.
    """

    def __init__(self, nrns: Optional[Iterable[TreeNeuron]] = None) -> None:
        self.neurons = dict()
        self.connector_xyz = dict()
        # connectors and the treenodes presynaptic to them
        self.conn_inputs = dict()
        # connectors and the treenodes postsynaptic to them
        self.conn_outputs = dict()

        if nrns is not None:
            self.add_neurons(nrns)

    def __len__(self) -> int:
        return len(self.neurons)

    def add_neurons(self, nrns: Iterable[TreeNeuron]):
        """Add several neurons to the connector.

        All neurons must have unique names.

        Parameters
        ----------
        nrns : Iterable[TreeNeuron]

        Returns
        -------
        Modified connector.
        """
        for nrn in nrns:
            self.add_neuron(nrn)
        return self

    def add_neuron(self, nrn: TreeNeuron):
        """Add a single neuron to the connector.

        All neurons must have unique names.

        Parameters
        ----------
        nrn : TreeNeuron

        Returns
        -------
        Modified connector.
        """
        if nrn.name in self.neurons:
            logger.warning(
                "Neuron with name %s has already been added to NeuronConnector. "
                "These will occupy the same node in the graph, "
                "but have connectors from both.",
                nrn.name
            )

        self.neurons[nrn.name] = nrn
        if nrn.connectors is None:
            logger.warning("Neuron with name %s has no connector information", nrn.name)
            return self

        for row in nrn.connectors.itertuples():
            # connector_id, node_id, x, y, z, is_input
            self.connector_xyz[row.connector_id] = (row.x, row.y, row.z)
            if row.type == 1:
                self.conn_outputs.setdefault(row.connector_id, []).append((nrn.name, row.node_id))
            elif row.type == 0:
                if row.connector_id in self.conn_inputs:
                    logger.warning(
                        "Connector with ID %s has multiple inputs: "
                        "connector tables are probably inconsistent",
                        row.connector_id
                    )
                self.conn_inputs[row.connector_id] = (nrn.name, row.node_id)

        return self

    def edges(self, include_other=True) -> Iterable[Edge]:
        """Iterate through all synapse edges.

        Parameters
        ----------
        include_other : bool, optional
            Include edges for which only one partner is known, by default True.
            If included, the name of the unknown partner will be `"__OTHER__"`,
            and the treenode ID will be None.

        Yields
        ------
        tuple[int, str, str, int, int]
            Connector ID, source name, target name, source treenode, target treenode.
        """
        for conn_id in set(self.conn_inputs).union(self.conn_outputs):
            src, src_node = self.conn_inputs.get(conn_id, (OTHER, None))
            if src_node is None and not include_other:
                continue
            for tgt, tgt_node in self.conn_outputs.get(conn_id, [(OTHER, None)]):
                if tgt_node is None and not include_other:
                    continue
                yield Edge(conn_id, src, tgt, src_node, tgt_node)

    def to_adjacency(self, include_other=True) -> pd.DataFrame:
        """Create an adjacency matrix of neuron connectivity.

        Parameters
        ----------
        include_other : bool, optional
            Whether to include a node called `"__OTHER__"`,
            which represents all unknown partners.
            By default True.
            This can be helpful when calculating a neuron's input fraction,
            but cannot be used for output fractions if synapses are polyadic.

        Returns
        -------
        pandas.DataFrame
            Row index is source neuron name,
            column index is target neuron name,
            cells are the number of synapses from source to target.
        """
        index = list(self.neurons)
        if include_other:
            index.append(OTHER)
        data = np.zeros((len(index), len(index)), np.uint64)
        df = pd.DataFrame(data, index, index)
        for _, src, tgt, _, _ in self.edges(include_other):
            df.loc[src, tgt] += 1

        return df

    def to_digraph(self, include_other=True) -> nx.DiGraph:
        """Create a graph of neuron connectivity.

        Parameters
        ----------
        include_other : bool, optional
            Whether to include a node called `"__OTHER__"`,
            which represents all unknown partners.
            By default True.
            This can be helpful when calculating a neuron's input fraction,
            but cannot be used for output fractions if synapses are polyadic.

        Returns
        -------
        nx.DiGraph
            The graph has data `{"connector_xyz": {connector_id: (x, y, z), ...}}`.
            The nodes have data `{"neuron": tree_neuron}`.
            The edges have data `{"connectors": data_frame, "weight": n_connectors}`,
            where the connectors data frame has columns
            "connector_id", "pre_node", "post_node".
        """
        g = nx.DiGraph()
        g.add_nodes_from((k, {"neuron": v}) for k, v in self.neurons.items())
        if include_other:
            g.add_node(OTHER, neuron=None)

        g.graph["connector_xyz"] = self.connector_xyz
        headers = {
            "connector_id": pd.UInt64Dtype(),
            "pre_node": pd.UInt64Dtype(),
            "post_node": pd.UInt64Dtype(),
        }
        edges = dict()
        for conn_id, src, tgt, src_node, tgt_node in self.edges(include_other):
            edges.setdefault((src, tgt), []).append([conn_id, src_node, tgt_node])

        for (src, tgt), rows in edges.items():
            df_tmp = pd.DataFrame(rows, columns=list(headers), dtype=object)
            df = df_tmp.astype(headers, copy=False)
            g.add_edge(src, tgt, connectors=df, weight=len(df))

        return g

    def to_multidigraph(self, include_other=True) -> nx.MultiDiGraph:
        """Create a graph of neuron connectivity where each synapse is an edge.

        Parameters
        ----------
        include_other : bool, optional
            Whether to include a node called `"__OTHER__"`,
            which represents all unknown partners.
            By default True.
            This can be helpful when calculating a neuron's input fraction,
            but cannot be used for output fractions if synapses are polyadic.

        Returns
        -------
        nx.MultiDiGraph
            The nodes have data `{"neuron": tree_neuron}`.
            The edges have data
            `{"pre_node": presyn_treenode_id, "post_node": postsyn_treenode_id, "xyz": connector_location, "connector_id": conn_id}`.
        """
        g = nx.MultiDiGraph()
        g.add_nodes_from((k, {"neuron": v}) for k, v in self.neurons.items())
        if include_other:
            g.add_node(OTHER, neuron=None)

        for conn_id, src, tgt, src_node, tgt_node in self.edges(include_other):
            g.add_edge(
                src,
                tgt,
                pre_node=src_node,
                post_node=tgt_node,
                xyz=self.connector_xyz[conn_id],
                connector_id=conn_id,
            )

        return g

Add a single neuron to the connector.

All neurons must have unique names.

PARAMETER DESCRIPTION
nrn

TYPE: TreeNeuron

RETURNS DESCRIPTION
Modified connector.
Source code in navis/connectivity/adjacency.py
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
def add_neuron(self, nrn: TreeNeuron):
    """Add a single neuron to the connector.

    All neurons must have unique names.

    Parameters
    ----------
    nrn : TreeNeuron

    Returns
    -------
    Modified connector.
    """
    if nrn.name in self.neurons:
        logger.warning(
            "Neuron with name %s has already been added to NeuronConnector. "
            "These will occupy the same node in the graph, "
            "but have connectors from both.",
            nrn.name
        )

    self.neurons[nrn.name] = nrn
    if nrn.connectors is None:
        logger.warning("Neuron with name %s has no connector information", nrn.name)
        return self

    for row in nrn.connectors.itertuples():
        # connector_id, node_id, x, y, z, is_input
        self.connector_xyz[row.connector_id] = (row.x, row.y, row.z)
        if row.type == 1:
            self.conn_outputs.setdefault(row.connector_id, []).append((nrn.name, row.node_id))
        elif row.type == 0:
            if row.connector_id in self.conn_inputs:
                logger.warning(
                    "Connector with ID %s has multiple inputs: "
                    "connector tables are probably inconsistent",
                    row.connector_id
                )
            self.conn_inputs[row.connector_id] = (nrn.name, row.node_id)

    return self

Add several neurons to the connector.

All neurons must have unique names.

PARAMETER DESCRIPTION
nrns

TYPE: Iterable[TreeNeuron]

RETURNS DESCRIPTION
Modified connector.
Source code in navis/connectivity/adjacency.py
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def add_neurons(self, nrns: Iterable[TreeNeuron]):
    """Add several neurons to the connector.

    All neurons must have unique names.

    Parameters
    ----------
    nrns : Iterable[TreeNeuron]

    Returns
    -------
    Modified connector.
    """
    for nrn in nrns:
        self.add_neuron(nrn)
    return self

Iterate through all synapse edges.

PARAMETER DESCRIPTION
include_other

Include edges for which only one partner is known, by default True. If included, the name of the unknown partner will be "__OTHER__", and the treenode ID will be None.

TYPE: bool DEFAULT: True

YIELDS DESCRIPTION
tuple[int, str, str, int, int]

Connector ID, source name, target name, source treenode, target treenode.

Source code in navis/connectivity/adjacency.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
def edges(self, include_other=True) -> Iterable[Edge]:
    """Iterate through all synapse edges.

    Parameters
    ----------
    include_other : bool, optional
        Include edges for which only one partner is known, by default True.
        If included, the name of the unknown partner will be `"__OTHER__"`,
        and the treenode ID will be None.

    Yields
    ------
    tuple[int, str, str, int, int]
        Connector ID, source name, target name, source treenode, target treenode.
    """
    for conn_id in set(self.conn_inputs).union(self.conn_outputs):
        src, src_node = self.conn_inputs.get(conn_id, (OTHER, None))
        if src_node is None and not include_other:
            continue
        for tgt, tgt_node in self.conn_outputs.get(conn_id, [(OTHER, None)]):
            if tgt_node is None and not include_other:
                continue
            yield Edge(conn_id, src, tgt, src_node, tgt_node)

Create an adjacency matrix of neuron connectivity.

PARAMETER DESCRIPTION
include_other

Whether to include a node called "__OTHER__", which represents all unknown partners. By default True. This can be helpful when calculating a neuron's input fraction, but cannot be used for output fractions if synapses are polyadic.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
pandas.DataFrame

Row index is source neuron name, column index is target neuron name, cells are the number of synapses from source to target.

Source code in navis/connectivity/adjacency.py
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
def to_adjacency(self, include_other=True) -> pd.DataFrame:
    """Create an adjacency matrix of neuron connectivity.

    Parameters
    ----------
    include_other : bool, optional
        Whether to include a node called `"__OTHER__"`,
        which represents all unknown partners.
        By default True.
        This can be helpful when calculating a neuron's input fraction,
        but cannot be used for output fractions if synapses are polyadic.

    Returns
    -------
    pandas.DataFrame
        Row index is source neuron name,
        column index is target neuron name,
        cells are the number of synapses from source to target.
    """
    index = list(self.neurons)
    if include_other:
        index.append(OTHER)
    data = np.zeros((len(index), len(index)), np.uint64)
    df = pd.DataFrame(data, index, index)
    for _, src, tgt, _, _ in self.edges(include_other):
        df.loc[src, tgt] += 1

    return df

Create a graph of neuron connectivity.

PARAMETER DESCRIPTION
include_other

Whether to include a node called "__OTHER__", which represents all unknown partners. By default True. This can be helpful when calculating a neuron's input fraction, but cannot be used for output fractions if synapses are polyadic.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
nx.DiGraph

The graph has data {"connector_xyz": {connector_id: (x, y, z), ...}}. The nodes have data {"neuron": tree_neuron}. The edges have data {"connectors": data_frame, "weight": n_connectors}, where the connectors data frame has columns "connector_id", "pre_node", "post_node".

Source code in navis/connectivity/adjacency.py
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
def to_digraph(self, include_other=True) -> nx.DiGraph:
    """Create a graph of neuron connectivity.

    Parameters
    ----------
    include_other : bool, optional
        Whether to include a node called `"__OTHER__"`,
        which represents all unknown partners.
        By default True.
        This can be helpful when calculating a neuron's input fraction,
        but cannot be used for output fractions if synapses are polyadic.

    Returns
    -------
    nx.DiGraph
        The graph has data `{"connector_xyz": {connector_id: (x, y, z), ...}}`.
        The nodes have data `{"neuron": tree_neuron}`.
        The edges have data `{"connectors": data_frame, "weight": n_connectors}`,
        where the connectors data frame has columns
        "connector_id", "pre_node", "post_node".
    """
    g = nx.DiGraph()
    g.add_nodes_from((k, {"neuron": v}) for k, v in self.neurons.items())
    if include_other:
        g.add_node(OTHER, neuron=None)

    g.graph["connector_xyz"] = self.connector_xyz
    headers = {
        "connector_id": pd.UInt64Dtype(),
        "pre_node": pd.UInt64Dtype(),
        "post_node": pd.UInt64Dtype(),
    }
    edges = dict()
    for conn_id, src, tgt, src_node, tgt_node in self.edges(include_other):
        edges.setdefault((src, tgt), []).append([conn_id, src_node, tgt_node])

    for (src, tgt), rows in edges.items():
        df_tmp = pd.DataFrame(rows, columns=list(headers), dtype=object)
        df = df_tmp.astype(headers, copy=False)
        g.add_edge(src, tgt, connectors=df, weight=len(df))

    return g

Create a graph of neuron connectivity where each synapse is an edge.

PARAMETER DESCRIPTION
include_other

Whether to include a node called "__OTHER__", which represents all unknown partners. By default True. This can be helpful when calculating a neuron's input fraction, but cannot be used for output fractions if synapses are polyadic.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
nx.MultiDiGraph

The nodes have data {"neuron": tree_neuron}. The edges have data {"pre_node": presyn_treenode_id, "post_node": postsyn_treenode_id, "xyz": connector_location, "connector_id": conn_id}.

Source code in navis/connectivity/adjacency.py
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
def to_multidigraph(self, include_other=True) -> nx.MultiDiGraph:
    """Create a graph of neuron connectivity where each synapse is an edge.

    Parameters
    ----------
    include_other : bool, optional
        Whether to include a node called `"__OTHER__"`,
        which represents all unknown partners.
        By default True.
        This can be helpful when calculating a neuron's input fraction,
        but cannot be used for output fractions if synapses are polyadic.

    Returns
    -------
    nx.MultiDiGraph
        The nodes have data `{"neuron": tree_neuron}`.
        The edges have data
        `{"pre_node": presyn_treenode_id, "post_node": postsyn_treenode_id, "xyz": connector_location, "connector_id": conn_id}`.
    """
    g = nx.MultiDiGraph()
    g.add_nodes_from((k, {"neuron": v}) for k, v in self.neurons.items())
    if include_other:
        g.add_node(OTHER, neuron=None)

    for conn_id, src, tgt, src_node, tgt_node in self.edges(include_other):
        g.add_edge(
            src,
            tgt,
            pre_node=src_node,
            post_node=tgt_node,
            xyz=self.connector_xyz[conn_id],
            connector_id=conn_id,
        )

    return g

Collection of neurons.

Gives quick access to neurons' attributes and functions.

PARAMETER DESCRIPTION
x
            Data to construct neuronlist from. Can be either:

            1. Tree/MeshNeuron(s) or Dotprops
            2. NeuronList(s)
            3. Anything that constructs a Tree/MeshNeuron
            4. List of the above

TYPE: list | array | TreeNeuron | MeshNeuron | Dotprops | NeuronList

make_copy
            If True, Neurons are deepcopied before being
            assigned to the NeuronList.

TYPE: bool DEFAULT: False

make_using
            Function or class used to construct neurons from
            elements in `x` if they aren't already neurons.
            By default, will use `navis.Neuron` to try to infer
            what kind of neuron can be constructed.

TYPE: function | class DEFAULT: None

parallel
            If True, will use parallel threads when initialising the
            NeuronList. Should be slightly up to a lot faster
            depending on the numbers of cores and the input data.

TYPE: bool DEFAULT: False

n_cores
            Number of cores to use for when `parallel=True`.
            Defaults to half the available cores.

TYPE: int DEFAULT: os.cpu_count() // 2

**kwargs
            Will be passed to constructor of Tree/MeshNeuron (see
            `make_using`).

DEFAULT: {}

Source code in navis/core/neuronlist.py
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
class NeuronList:
    """Collection of neurons.

    Gives quick access to neurons' attributes and functions.

    Parameters
    ----------
    x :                 list | array | TreeNeuron | MeshNeuron | Dotprops | NeuronList
                        Data to construct neuronlist from. Can be either:

                        1. Tree/MeshNeuron(s) or Dotprops
                        2. NeuronList(s)
                        3. Anything that constructs a Tree/MeshNeuron
                        4. List of the above

    make_copy :         bool, optional
                        If True, Neurons are deepcopied before being
                        assigned to the NeuronList.
    make_using :        function | class, optional
                        Function or class used to construct neurons from
                        elements in `x` if they aren't already neurons.
                        By default, will use `navis.Neuron` to try to infer
                        what kind of neuron can be constructed.
    parallel :          bool
                        If True, will use parallel threads when initialising the
                        NeuronList. Should be slightly up to a lot faster
                        depending on the numbers of cores and the input data.
    n_cores :           int
                        Number of cores to use for when `parallel=True`.
                        Defaults to half the available cores.
    **kwargs
                        Will be passed to constructor of Tree/MeshNeuron (see
                        `make_using`).

    """

    neurons: List['core.NeuronObject']

    cable_length: Sequence[float]

    soma: Sequence[int]
    root: Sequence[int]

    graph: 'nx.DiGraph'
    igraph: 'igraph.Graph'  # type: ignore  # doesn't know iGraph

    def __init__(self,
                 x: Union[Iterable[Union[core.BaseNeuron,
                                         'NeuronList',
                                         pd.DataFrame]],
                          'NeuronList',
                          core.BaseNeuron,
                          pd.DataFrame],
                 make_copy: bool = False,
                 make_using: Optional[type] = None,
                 parallel: bool = False,
                 n_cores: int = os.cpu_count() // 2,
                 **kwargs):
        # If below parameter is True, most calculations will be parallelized
        # which speeds them up quite a bit. Unfortunately, this uses A TON of
        # memory - for large lists this might make your system run out of
        # memory. In these cases, leave this property at False
        self.parallel = parallel
        self.n_cores = n_cores

        # Determines if subsetting this NeuronList will copy the neurons
        self.copy_on_subset: bool = False

        if isinstance(x, NeuronList):
            # We can't simply say self.neurons = x.neurons b/c that way
            # changes in the list would backpropagate
            self.neurons = [n for n in x.neurons]
        elif utils.is_iterable(x):
            # If x is a list of mixed objects we need to unpack/flatten that
            # E.g. x = [NeuronList, NeuronList, core.TreeNeuron]
            # We need to make sure the order is retained though (important for
            # e.g. plotting)
            self.neurons = []
            for n in x:
                # Unpack neuronlists
                if isinstance(n, NeuronList):
                    self.neurons += n.neurons
                # Everything else is just appended - will throw error later
                else:
                    self.neurons.append(n)
        elif isinstance(x, type(None)):
            # Empty Neuronlist
            self.neurons = []
        else:
            # Any other datatype will simply be assumed to be accepted by
            # core.Neuron() - if not this will throw an error
            self.neurons = [x]  # type: ignore

        # Now convert and/or make copies if necessary
        to_convert = []
        for i, n in enumerate(self.neurons):
            if not isinstance(n, core.BaseNeuron) or make_copy is True:
                # The `i` keeps track of the original index so that after
                # conversion to Neurons, the objects will occupy the same
                # position
                to_convert.append((n, i))

        if to_convert:
            if not make_using:
                make_using = core.Neuron
            elif not isinstance(make_using, type) and not callable(make_using):
                make_using = make_using.__class__

            if self.parallel:
                with ThreadPoolExecutor(max_workers=self.n_cores) as e:
                    futures = e.map(lambda x: make_using(x, **kwargs),
                                    [n[0] for n in to_convert])

                    converted = [n for n in config.tqdm(futures,
                                                        total=len(to_convert),
                                                        desc='Make nrn',
                                                        disable=config.pbar_hide,
                                                        leave=config.pbar_leave)]

                    for i, c in enumerate(to_convert):
                        self.neurons[c[1]] = converted[i]

            else:
                for n in config.tqdm(to_convert, desc='Make nrn',
                                     disable=config.pbar_hide or len(to_convert) == 1,
                                     leave=config.pbar_leave):
                    self.neurons[n[1]] = make_using(n[0], **kwargs)

        # Add ID-based indexer
        self.idx = _IdIndexer(self)

    @property
    def neurons(self):
        """Neurons contained in this NeuronList."""
        return self.__dict__.get('neurons', [])

    @property
    def is_mixed(self):
        """Return True if contains more than one type of neuron."""
        return len(self.types) > 1

    @property
    def is_degenerated(self):
        """Return True if contains neurons with non-unique IDs."""
        return len(set(self.id)) < len(self.neurons)

    @property
    def types(self):
        """Return neuron types present in this list."""
        return tuple(set([type(n) for n in self.neurons]))

    @property
    def shape(self):
        """Shape of NeuronList (N, )."""
        return (self.__len__(),)

    @property
    def bbox(self):
        """Bounding box across all neurons in the list."""
        if self.empty:
            raise ValueError('No bounding box - NeuronList is empty.')

        bboxes = np.hstack([n.bbox for n in self.neurons])
        mn = np.min(bboxes, axis=1)
        mx = np.max(bboxes, axis=1)
        return np.vstack((mn, mx)).T

    @property
    def empty(self):
        """Return True if NeuronList is empty."""
        return len(self.neurons) == 0

    def __reprframe__(self):
        """Return truncated DataFrame for self representation."""
        if self.empty:
            return pd.DataFrame([])
        elif len(self) < 5:
            return self.summary()
        else:
            nl = self[:3] + self[-3:]
            s = nl.summary()
            # Fix index
            s.index = np.append(s.index[:3], np.arange(len(self)-3, len(self)))
            return s

    def __reprheader__(self, html=False):
        """Generate header for representation."""
        if len(self) <= 2000:
            size = utils.sizeof_fmt(self.memory_usage(deep=False, estimate=True))
            head = f'{type(self)} containing {len(self)} neurons ({size})'
        else:
            # For larger lists, extrapolate from sampling 10% of the list
            size = utils.sizeof_fmt(self.memory_usage(deep=False,
                                                      sample=True,
                                                      estimate=True))
            head = f'{type(self)} containing {len(self)} neurons (est. {size})'

        if html:
            head = head.replace('<', '&lt;').replace('>', '&gt;')

        return head

    def __str__(self):
        return self.__repr__()

    def __repr__(self):
        string = self.__reprheader__(html=False)
        if not self.empty:
            with pd.option_context("display.max_rows", 4,
                                   "display.show_dimensions", False):
                string += f'\n{str(self.__reprframe__())}'

        return string

    def _repr_html_(self):
        string = self.__reprheader__(html=True)
        if not self.empty:
            with pd.option_context("display.max_rows", 4,
                                   "display.show_dimensions", False):
                string += self.__reprframe__()._repr_html_()
        return string

    def __iter__(self) -> Iterator['core.NeuronObject']:
        """Iterator instanciates a new class every time it is called.
        This allows the use of nested loops on the same NeuronList object.
        """
        class prange_iter(Iterator['core.NeuronObject']):
            def __init__(self, neurons, start):
                self.iter = start
                self.neurons = neurons

            def __next__(self) -> 'core.NeuronObject':
                if self.iter >= len(self.neurons):
                    raise StopIteration
                to_return = self.neurons[self.iter]
                self.iter += 1
                return to_return

        return prange_iter(self.neurons, 0)

    def __len__(self):
        """Number of neurons in this list."""
        return len(self.neurons)

    def __dir__(self):
        """Custom __dir__ to add some parameters that we want to make searchable."""
        add_attr = set.union(*[set(dir(n)) for n in self.neurons[:MAX_SEARCH]])

        return list(set(super().__dir__() + list(add_attr)))

    def __getattr__(self, key):
        if self.empty:
            raise AttributeError(f'Neuronlist is empty - "{key}" not found')
        # Dynamically check if the requested attribute/function exists in
        # all neurons
        values = [getattr(n, key, NotImplemented) for n in self.neurons]
        is_method = [isinstance(v, types.MethodType) for v in values]
        # is_none = [isinstance(v, type(None)) for v in values]
        is_frame = [isinstance(v, pd.DataFrame) for v in values]
        is_quantity = [isinstance(v, config.ureg.Quantity) for v in values]

        # First check if there is any reason why we can't collect this
        # attribute across all neurons
        if all([isinstance(v, type(NotImplemented)) for v in values]):
            raise AttributeError(f'Attribute "{key}" not found in '
                                 'NeuronList or its neurons')
        elif any([isinstance(v, type(NotImplemented)) for v in values]):
            raise AttributeError(f'Attribute or function "{key}" missing '
                                 'for some neurons')
        elif len(set(is_method)) > 1:
            raise TypeError('Found both methods and attributes with name '
                            f'"{key}" among neurons.')
        # Concatenate if dealing with DataFrame
        elif not all(is_method):
            if any(is_frame):
                df = pd.concat([v for v in values if isinstance(v, pd.DataFrame)],
                               axis=0,
                               ignore_index=True,
                               join='outer',
                               sort=True)

                # For each row label which neuron (id) it belongs to
                df['neuron'] = None
                ix = 0
                for k, v in enumerate(values):
                    if isinstance(v, pd.DataFrame):
                        df.iloc[ix:ix + v.shape[0],
                                df.columns.get_loc('neuron')] = self.neurons[k].id
                        ix += v.shape[0]
                return df
            elif all(is_quantity):
                # See if units are all compatible
                is_compatible = [values[0].is_compatible_with(v) for v in values]
                if all(is_compatible):
                    # Convert all to the same units
                    conv = [v.to(values[0]).magnitude for v in values]
                    # Return pint array
                    return config.ureg.Quantity(np.array(conv), values[0].units)
                else:
                    logger.warning(f'"{key}" contains incompatible units. '
                                   'Returning unitless values.')
                    return np.array([v.magnitude for v in values])
            elif any(is_quantity):
                logger.warning(f'"{key}" contains data with and without '
                               'units. Removing units.')
                return np.array([getattr(v, 'magnitude', v) for v in values])
            else:
                # If the result would be a ragged array specify dtype as object
                # This avoids a depcrecation warning and future issues
                dtype = None
                if any([utils.is_iterable(v) for v in values]):
                    if not all([utils.is_iterable(v) for v in values]):
                        dtype = object
                    elif len(set([len(v) for v in values])) > 1:
                        dtype = object
                return np.array(values, dtype=dtype)
        # If everything is a method
        else:
            # To avoid confusion we will not allow calling of magic methods
            # via the NeuronProcessor as those are generally expected to
            # be methods of the NeuronList itself
            if key.startswith('__') and key.endswith('__'):
                raise AttributeError(f"'NeuronList' object has no attribute '{key}'")

            # Delayed import to avoid circular import
            from .core_utils import NeuronProcessor

            # Return function but wrap it in a function that will show
            # a progress bar. Note that we do not use parallel processing by
            # default to avoid errors with `inplace=True`
            return NeuronProcessor(self,
                                   values,
                                   parallel=False,
                                   desc=key)

    def __setattr__(self, key, value):
        # We have cater for the situation when we want to replace the whole
        # dictionary - e.g. when unpickling (see __setstate__)
        # Below code for setting the dictionary looks complicated and
        # unnecessary but is really complicated and VERY necessary
        if key == '__dict__':
            if not isinstance(value, dict):
                raise TypeError(f'__dict__ must be dict, got {type(value)}')
            self.__dict__.clear()
            for k, v in value.items():
                self.__dict__[k] = v
            return

        # Check if this attribute exists in the neurons
        if any([hasattr(n, key) for n in self.neurons]):
            logger.warning('It looks like you are trying to add a neuron '
                           'attribute to a NeuronList. Setting the attribute '
                           f'"{key}" on the NeuronList will not propagated to '
                           'the neurons it contains! To set neuron attributes '
                           'use the `NeuronList.set_neuron_attributes()` method.')

        self.__dict__[key] = value

    def __getstate__(self):
        """Get state (used e.g. for pickling)."""
        # We have to implement this to make sure that we don't accidentally
        # call __getstate__ of each neuron via the NeuronProcessor
        state = {k: v for k, v in self.__dict__.items() if not callable(v)}
        return state

    def __setstate__(self, d):
        """Set state (used e.g. for unpickling)."""
        # We have to implement this to make sure that we don't accidentally
        # call __setstate__ of each neuron via the NeuronProcessor
        self.__dict__ = d

    def __contains__(self, x):
        return x in self.neurons

    def __copy__(self):
        return self.copy(deepcopy=False)

    def __deepcopy__(self):
        return self.copy(deepcopy=True)

    def __getitem__(self, key):
        if utils.is_iterable(key):
            if all([isinstance(k, (bool, np.bool_)) for k in key]):
                if len(key) != len(self.neurons):
                    raise IndexError('boolean index did not match indexed '
                                     f'NeuronList; dimension is {len(self.neurons)} '
                                     'but corresponding boolean dimension is '
                                     f'{len(key)}')
                subset = [n for i, n in enumerate(self.neurons) if key[i]]
            else:
                subset = [self[i] for i in key]
        elif isinstance(key, str):
            subset = [n for n in self.neurons if re.fullmatch(key, getattr(n, 'name', ''))]

            # For indexing by name, we expect a match
            if not subset:
                raise AttributeError('NeuronList does not contain neuron(s) '
                                     f'with name: "{key}"')

        elif isinstance(key, (int, np.integer, slice)):
            subset = self.neurons[key]
        else:
            raise NotImplementedError(f'Indexing NeuronList by {type(key)} not implemented')

        if isinstance(subset, core.BaseNeuron):
            return subset

        # Make sure we unpack neurons
        subset = utils.unpack_neurons(subset)

        return self.__class__(subset, make_copy=self.copy_on_subset)

    def __setitem__(self, key, value):
        if isinstance(key, str):
            if not utils.is_iterable(value):
                for n in self.neurons:
                    setattr(n, key, value)
            elif len(value) == len(self.neurons):
                for n, v in zip(self.neurons, value):
                    setattr(n, key, v)
            else:
                raise ValueError('Length of values does not match number of '
                                 'neurons in NeuronList.')
        else:
            msg = ('Itemsetter can only be used to set attributes of the '
                   'neurons contained in the NeuronList. For example:\n'
                   '  >>> nl = navis.example_neurons(3)\n'
                   '  >>> nl["propertyA"] = 1\n'
                   '  >>> nl[0].propertyA\n'
                   '  1\n'
                   '  >>> nl["propertyB"] = ["a", "b", "c"]\n'
                   '  >>> nl[2].propertyB\n'
                   '  "c"')
            raise NotImplementedError(msg)

    def __missing__(self, key):
        raise AttributeError('No neuron matching the search criteria.')

    def __add__(self, to_add):
        """Implement addition."""
        if isinstance(to_add, core.BaseNeuron):
            return self.__class__(self.neurons + [to_add],
                                  make_copy=self.copy_on_subset)
        elif isinstance(to_add, NeuronList):
            return self.__class__(self.neurons + to_add.neurons,
                                  make_copy=self.copy_on_subset)
        elif utils.is_iterable(to_add):
            if False not in [isinstance(n, core.BaseNeuron) for n in to_add]:
                return self.__class__(self.neurons + list(to_add),
                                      make_copy=self.copy_on_subset)
            else:
                return self.__class__(self.neurons + [core.BaseNeuron[n] for n in to_add],
                                      make_copy=self.copy_on_subset)
        else:
            return NotImplemented

    def __eq__(self, other):
        """Implement equality."""
        if isinstance(other, NeuronList):
            if len(self) != len(other):
                return False
            else:
                return all([n1 == n2 for n1, n2 in zip(self, other)])
        else:
            return NotImplemented

    def __sub__(self, to_sub):
        """Implement substraction."""
        if isinstance(to_sub, core.BaseNeuron):
            return self.__class__([n for n in self.neurons if n != to_sub],
                                  make_copy=self.copy_on_subset)
        elif isinstance(to_sub, NeuronList):
            return self.__class__([n for n in self.neurons if n not in to_sub],
                                  make_copy=self.copy_on_subset)
        else:
            return NotImplemented

    def __truediv__(self, other):
        """Implements division for coordinates (nodes, connectors)."""
        return self.__class__([n / other for n in config.tqdm(self.neurons,
                                                              desc='Dividing',
                                                              disable=config.pbar_hide,
                                                              leave=False)])


    def __mul__(self, other):
        """Implement multiplication for coordinates (nodes, connectors)."""
        return self.__class__([n * other for n in config.tqdm(self.neurons,
                                                              desc='Multiplying',
                                                              disable=config.pbar_hide,
                                                              leave=False)])

    def __and__(self, other):
        """Implement bitwise AND using the & operator."""
        if isinstance(other, core.BaseNeuron):
            return self.__class__([n for n in self.neurons if n == other],
                                  make_copy=self.copy_on_subset)
        elif isinstance(other, NeuronList):
            return self.__class__([n for n in self.neurons if n in other],
                                  make_copy=self.copy_on_subset)
        else:
            return NotImplemented

    def __or__(self, other):
        """Implement bitwise OR using the | operator."""
        if isinstance(other, core.BaseNeuron):
            neurons = self.neurons
            if not any(n == other for n in neurons):
                neurons.append(other)
            return self.__class__(neurons, make_copy=self.copy_on_subset)
        elif isinstance(other, NeuronList):
            neurons = self.neurons + [n for n in other.neurons if n not in self]
            return self.__class__(neurons, make_copy=self.copy_on_subset)
        else:
            return NotImplemented

    def append(self, v):
        """Add neuron(s) to this list.

        Examples
        --------
        >>> import navis
        >>> # This is mostly for doctests
        >>> nl = navis.example_neurons()
        >>> len(nl)
        5
        >>> # Add a single neuron to the list
        >>> nl.append(nl[0])
        >>> len(nl)
        6
        >>> # Add a list of neurons to the list
        >>> nl.append(nl)
        >>> len(nl)
        12

        """
        if isinstance(v, core.BaseNeuron):
            self.neurons.append(v)
        elif isinstance(v, NeuronList):
            self.neurons += v.neurons
        else:
            raise NotImplementedError('Unable to append data of type'
                                      f'{type(v)} to NeuronList')

    def apply(self,
              func: Callable,
              *,
              parallel: bool = False,
              n_cores: int = os.cpu_count() // 2,
              omit_failures: bool = False,
              **kwargs):
        """Apply function across all neurons in this NeuronList.

        Parameters
        ----------
        func :          callable
                        Function to be applied. Must accept
                        [`navis.BaseNeuron`][] as first argument.
        parallel :      bool
                        If True (default) will use multiprocessing. Spawning the
                        processes takes time (and memory). Using `parallel=True`
                        makes only sense if the NeuronList is large or the
                        function takes a long time to run.
        n_cores :       int
                        Number of CPUs to use for multiprocessing. Defaults to
                        half the available cores.
        omit_failures : bool
                        If True, will ignore failures.

        **kwargs
                    Will be passed to function.

        Returns
        -------
        Results

        Examples
        --------
        >>> import navis
        >>> nl = navis.example_neurons()
        >>> # Apply resampling function
        >>> nl_rs = nl.apply(navis.resample_skeleton, resample_to=1000, inplace=False)

        """
        if not callable(func):
            raise TypeError('"func" must be callable')

        # Delayed import to avoid circular import
        from .core_utils import NeuronProcessor
        proc = NeuronProcessor(self,
                               func,
                               parallel=parallel,
                               n_cores=n_cores,
                               omit_failures=omit_failures,
                               desc=f'Apply {func.__name__}')

        return proc(self.neurons, **kwargs)

    def sum(self) -> pd.DataFrame:
        """Return sum numeric and boolean values over all neurons."""
        return self.summary().sum(numeric_only=True)

    def mean(self) -> pd.DataFrame:
        """Return mean numeric and boolean values over all neurons."""
        return self.summary().mean(numeric_only=True)

    def memory_usage(self, deep=False, estimate=False, sample=False):
        """Return estimated size in memory of this NeuronList.

        Works by going over each neuron and summing up their size in memory.

        Parameters
        ----------
        deep :          bool
                        Pass to pandas DataFrames. If True will inspect data of
                        object type too.
        estimate :      bool
                        If True, we will only estimate the size. This is
                        considerably faster but will slightly underestimate the
                        memory usage.
        sample :        bool
                        If True, we will only sample 10% of the neurons
                        contained in the list and extrapolate an estimate from
                        there.

        Returns
        -------
        int
                    Memory usage in bytes.

        """
        if self.empty:
            return 0

        if not sample:
            try:
                return sum([n.memory_usage(deep=deep,
                                           estimate=estimate) for n in self.neurons])
            except BaseException:
                return 0
        else:
            try:
                s = sum([n.memory_usage(deep=deep,
                                        estimate=estimate) for n in self.neurons[::10]])
                return s * (len(self.neurons) / len(self.neurons[::10]))
            except BaseException:
                return 0

    def sample(self, N: Union[int, float] = 1) -> 'NeuronList':
        """Return random subset of neurons."""
        if N < 1 and N > 0:
            N = int(len(self.neurons) * N)

        indices = list(range(len(self.neurons)))
        random.shuffle(indices)
        return self.__class__([n for i, n in enumerate(self.neurons) if i in indices[:N]],
                              make_copy=self.copy_on_subset)

    def plot3d(self, **kwargs):
        """Plot neuron in 3D using [`navis.plot3d`][].

        Parameters
        ----------
        **kwargs
                Keyword arguments will be passed to [`navis.plot3d`][].
                See `help(navis.plot3d)` for a list of keywords.

        See Also
        --------
        [`navis.plot3d`][]
                Base function called to generate 3d plot.

        """
        from ..plotting import plot3d

        return plot3d(self, **kwargs)

    def plot2d(self, **kwargs):
        """Plot neuron in 2D using [`navis.plot2d`][].

        Parameters
        ----------
        **kwargs
                Keyword arguments will be passed to [`navis.plot2d`][].
                See `help(navis.plot2d)` for a list of accepted keywords.

        See Also
        --------
        [`navis.plot2d`][]
                Base function called to generate 2d plot.

        """
        from ..plotting import plot2d

        return plot2d(self, **kwargs)

    def summary(self,
                N: Optional[Union[int, slice]] = None,
                add_props: list = [],
                progress=False
                ) -> pd.DataFrame:
        """Get summary over all neurons in this NeuronList.

        Parameters
        ----------
        N :         int | slice, optional
                    If int, get only first N entries.
        add_props : list, optional
                    Additional properties to add to summary. If attribute not
                    available will return 'NA'.
        progress :  bool
                    Whether to show a progress bar. Can be useful for very
                    large list.

        Returns
        -------
        pandas DataFrame

        """
        if not self.empty:
            # Fetch a union of all summary props (keep order)
            all_props = [p for l in self.SUMMARY_PROPS for p in l]
            props = np.unique(all_props)
            props = sorted(props, key=lambda x: all_props.index(x))
        else:
            props = []

        # Add ID to properties - unless all are generic UUIDs
        if any([not isinstance(n.id, uuid.UUID) for n in self.neurons]):
            # Make sure we don't have two IDs
            if 'id' in props:
                props.remove('id')
            props = np.insert(props, 2, 'id')

        if add_props:
            props = np.append(props, add_props)

        if not isinstance(N, slice):
            N = slice(N)

        return pd.DataFrame(data=[[getattr(n, a, 'NA') for a in props]
                                  for n in config.tqdm(self.neurons[N],
                                                       desc='Summarizing',
                                                       leave=False,
                                                       disable=not progress)],
                            columns=props)

    def itertuples(self):
        """Helper to mimic `pandas.DataFrame.itertuples()`."""
        return self.neurons

    def add_metadata(self, meta, id_col='id', neuron_id='id', columns=None, register=False, missing='raise'):
        """Add neuron metadata from a DataFrame.

        Parameters
        ----------
        meta :      pd.DataFrame | str | Path
                    DataFrame or filepath to a CSV file containing metadata.
                    Must contain a column with neuron IDs.
        id_col :    str
                    Name of the column containing neuron IDs.
        neuron_id : str
                    Name of the attribute in the neuron that corresponds to
                    the `id_col`.
        columns :   list, optional
                    List of columns to add. If None, will add all columns except
                    for `id_col`.
        register :  bool
                    If True, will also register the attribute(s) as properties
                    that should show up in the summary.
        missing :   'raise' | 'warn' | 'ignore'
                    What to do if `meta` is missing a value for a neuron.

        See Also
        --------
        navis.NeuronList.set_neuron_attributes
                    Set individual attributes of neurons contained in the NeuronList.

        """
        assert missing in ('raise', 'warn', 'ignore')

        if isinstance(meta, (str, Path)):
            meta = pd.read_csv(meta)

        if not isinstance(meta, pd.DataFrame):
            raise TypeError('`meta` must be a DataFrame or a path to a CSV file, '
                            f'got {type(meta)}')

        if id_col not in meta.columns:
            raise KeyError(f'Column "{id_col}" not found in metadata.')

        # Index meta data by the neuron_id
        neuron_id = getattr(self, neuron_id)
        miss = ~np.isin(neuron_id, meta[id_col].values)
        if any(miss):
            msg = f'Metadata is missing entries for IDs: {neuron_id[miss]}'
            if missing == 'raise':
                raise KeyError(msg)
            elif missing == 'warn':
                logger.warning(msg)

        meta = meta.set_index(id_col).reindex(neuron_id)

        if columns is None:
            columns = meta.columns

        for c in columns:
            if c == id_col:
                continue
            self.set_neuron_attributes(
                meta[c].values.tolist(),
                name=c,
                register=register
                )

    def get_neuron_attributes(self, *args, **kwargs):
        """Get attributes of neurons contained in the NeuronList.

        Parameters
        ----------
        name :      str
                    Name of the property to get.
        default :   any, optional
                    Default value to return if attribute is not found.

        Returns
        -------
        np.ndarray
                    Array of values for the requested attribute.

        """
        return np.array([getattr(n, *args, **kwargs) for n in self.neurons])

    def set_neuron_attributes(self, x, name, register=False, na='raise'):
        """Set attributes of neurons contained in the NeuronList.

        Parameters
        ----------
        x :         any | list | np.ndarray | dict | function
                    Value of the property:
                      - lists and arrays are expected to contain a value for
                        each neuron and hence have to match the length of the
                        NeuronList
                      - dict is expected to map `{neuron.id: value}`
                      - a function is expected to take `neuron.id` as input
                        and return a value
        name :      str
                    Name of the property to set.
        register :  bool
                    If True, will also register the attribute(s) as properties
                    that should show up in the summary.
        na :        'raise' | 'propagate' | 'skip'
                    What to do if `x` is a dictionary and does not contain a
                    value for a neuron:
                     - 'raise' will raise a KeyError
                     - 'propagate' will set the attribute to `None`
                     - 'skip' will not set the attribute

        See Also
        --------
        navis.NeuronList.add_metadata
                    Set metadata from a dataframe.

        Examples
        --------
        >>> import navis
        >>> nl = navis.example_neurons(5)
        >>> # Set a single value
        >>> nl.set_neuron_attributes('some_value', name='my_attr')
        >>> nl[0].my_attr
        'some_value'
        >>> # Set individual values as iterable
        >>> nl.set_neuron_attributes([1, 2, 3, 4, 5], name='my_attr')
        >>> nl[0].my_attr
        1
        >>> nl.my_attr
        array([1, 2, 3, 4, 5])
        >>> # Set individual values using a dictionary
        >>> val_dict = dict(zip(nl.id, ['test', 2, 2.2, 4, 'test2']))
        >>> nl.set_neuron_attributes(val_dict, name='my_attr')
        >>> nl[0].my_attr
        'test'

        """
        utils.eval_param(na, name='na',
                         allowed_values=('raise', 'propagate', 'skip'))
        utils.eval_param(name, name='name', allowed_types=(str, ))

        if isinstance(x, dict):
            if na == 'raise':
                miss = ~np.isin(self.id, list(x))
                if any(miss):
                    raise KeyError('Dictionary `x` is missing entries for IDs: '
                                   f'{self.id[miss]}')
            for n in self.neurons:
                v = x.get(n.id, None)
                if (v is None) and (na == 'skip'):
                    continue
                n._register_attr(name, v, summary=register)
        elif isinstance(x, (list, np.ndarray)):
            if len(x) != len(self):
                raise ValueError(f'Got {len(x)} values for the{len(self)} '
                                 'neurons in the NeuronList.')
            for n, v in zip(self.neurons, x):
                n._register_attr(name, v, summary=register)
        elif callable(x):
            for n in self.neurons:
                n._register_attr(name, x(n.id), summary=register)
        else:
            for n in self.neurons:
                n._register_attr(name, x, summary=register)

    def sort_values(self, key: str, ascending: bool = False):
        """Sort neurons by given key.

        Needs to be an attribute of all neurons: for example `name`.
        Also works with custom attributes.
        """
        self.neurons = sorted(self.neurons,
                              key=lambda x: getattr(x, key),
                              reverse=ascending is False)

    def copy(self, **kwargs) -> 'NeuronList':
        """Return copy of this NeuronList.

        Parameters
        ----------
        **kwargs
                    Keyword arguments passed to neuron's `.copy()` method::

                    deepcopy :  bool, for TreeNeurons only
                                If False, `.graph` (NetworkX DiGraphs) will be
                                returned as views - changes to nodes/edges can
                                progagate back! `.igraph` (iGraph) - if
                                available - will always be deepcopied.

        """
        return self.__class__([n.copy(**kwargs) for n in config.tqdm(self.neurons,
                                                                     desc='Copy',
                                                                     leave=False,
                                                                     disable=config.pbar_hide or len(self) < 20)],
                              make_copy=False)

    def head(self, N: int = 5) -> pd.DataFrame:
        """Return summary for top N neurons."""
        return self.summary(N=N)

    def tail(self, N: int = 5) -> pd.DataFrame:
        """Return summary for bottom N neurons."""
        return self.summary(N=slice(-N, len(self)))

    def remove_duplicates(self,
                          key: str = 'name',
                          keep: str = 'first',
                          inplace: bool = False
                          ) -> Optional['NeuronList']:
        """Remove duplicate neurons from list.

        Parameters
        ----------
        key :       str | list, optional
                    Attribute(s) by which to identify duplicates. In case of
                    multiple, all attributes must match to flag a neuron as
                    duplicate.
        keep :      str
                    Which of the duplicated neurons to keep.
        inplace :   bool, optional
                    If False will return a copy of the original with
                    duplicates removed.

        """
        if inplace:
            x = self
        else:
            x = self.copy()

        key = utils.make_iterable(key)

        # Generate pandas DataFrame
        df = pd.DataFrame([[getattr(n, at) for at in key] for n in x],
                          columns=key)

        # Find out which neurons to keep
        to_keep = ~df.duplicated(keep=keep).values

        # Reassign neurons
        x.neurons = x[to_keep].neurons

        if not inplace:
            return x
        return None

    def unmix(self):
        """Split into NeuronLists of the same neuron type.

        Returns
        -------
        dict
                Dictionary of `{Neurontype: NeuronList}`

        """
        return {t: self.__class__([n for n in self.neurons if isinstance(n, t)])
                for t in self.types}

Bounding box across all neurons in the list.

Return True if NeuronList is empty.

Return True if contains neurons with non-unique IDs.

Return True if contains more than one type of neuron.

Neurons contained in this NeuronList.

Shape of NeuronList (N, ).

Return neuron types present in this list.

Add neuron metadata from a DataFrame.

PARAMETER DESCRIPTION
meta
    DataFrame or filepath to a CSV file containing metadata.
    Must contain a column with neuron IDs.

TYPE: pd.DataFrame | str | Path

id_col
    Name of the column containing neuron IDs.

TYPE: str DEFAULT: 'id'

neuron_id
    Name of the attribute in the neuron that corresponds to
    the `id_col`.

TYPE: str DEFAULT: 'id'

columns
    List of columns to add. If None, will add all columns except
    for `id_col`.

TYPE: list DEFAULT: None

register
    If True, will also register the attribute(s) as properties
    that should show up in the summary.

TYPE: bool DEFAULT: False

missing
    What to do if `meta` is missing a value for a neuron.

TYPE: 'raise' | 'warn' | 'ignore' DEFAULT: 'raise'

See Also

navis.NeuronList.set_neuron_attributes Set individual attributes of neurons contained in the NeuronList.

Source code in navis/core/neuronlist.py
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
def add_metadata(self, meta, id_col='id', neuron_id='id', columns=None, register=False, missing='raise'):
    """Add neuron metadata from a DataFrame.

    Parameters
    ----------
    meta :      pd.DataFrame | str | Path
                DataFrame or filepath to a CSV file containing metadata.
                Must contain a column with neuron IDs.
    id_col :    str
                Name of the column containing neuron IDs.
    neuron_id : str
                Name of the attribute in the neuron that corresponds to
                the `id_col`.
    columns :   list, optional
                List of columns to add. If None, will add all columns except
                for `id_col`.
    register :  bool
                If True, will also register the attribute(s) as properties
                that should show up in the summary.
    missing :   'raise' | 'warn' | 'ignore'
                What to do if `meta` is missing a value for a neuron.

    See Also
    --------
    navis.NeuronList.set_neuron_attributes
                Set individual attributes of neurons contained in the NeuronList.

    """
    assert missing in ('raise', 'warn', 'ignore')

    if isinstance(meta, (str, Path)):
        meta = pd.read_csv(meta)

    if not isinstance(meta, pd.DataFrame):
        raise TypeError('`meta` must be a DataFrame or a path to a CSV file, '
                        f'got {type(meta)}')

    if id_col not in meta.columns:
        raise KeyError(f'Column "{id_col}" not found in metadata.')

    # Index meta data by the neuron_id
    neuron_id = getattr(self, neuron_id)
    miss = ~np.isin(neuron_id, meta[id_col].values)
    if any(miss):
        msg = f'Metadata is missing entries for IDs: {neuron_id[miss]}'
        if missing == 'raise':
            raise KeyError(msg)
        elif missing == 'warn':
            logger.warning(msg)

    meta = meta.set_index(id_col).reindex(neuron_id)

    if columns is None:
        columns = meta.columns

    for c in columns:
        if c == id_col:
            continue
        self.set_neuron_attributes(
            meta[c].values.tolist(),
            name=c,
            register=register
            )

Add neuron(s) to this list.

Examples:

>>> import navis
>>> # This is mostly for doctests
>>> nl = navis.example_neurons()
>>> len(nl)
5
>>> # Add a single neuron to the list
>>> nl.append(nl[0])
>>> len(nl)
6
>>> # Add a list of neurons to the list
>>> nl.append(nl)
>>> len(nl)
12
Source code in navis/core/neuronlist.py
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
def append(self, v):
    """Add neuron(s) to this list.

    Examples
    --------
    >>> import navis
    >>> # This is mostly for doctests
    >>> nl = navis.example_neurons()
    >>> len(nl)
    5
    >>> # Add a single neuron to the list
    >>> nl.append(nl[0])
    >>> len(nl)
    6
    >>> # Add a list of neurons to the list
    >>> nl.append(nl)
    >>> len(nl)
    12

    """
    if isinstance(v, core.BaseNeuron):
        self.neurons.append(v)
    elif isinstance(v, NeuronList):
        self.neurons += v.neurons
    else:
        raise NotImplementedError('Unable to append data of type'
                                  f'{type(v)} to NeuronList')

Apply function across all neurons in this NeuronList.

PARAMETER DESCRIPTION
func
        Function to be applied. Must accept
        [`navis.BaseNeuron`][] as first argument.

TYPE: callable

parallel
        If True (default) will use multiprocessing. Spawning the
        processes takes time (and memory). Using `parallel=True`
        makes only sense if the NeuronList is large or the
        function takes a long time to run.

TYPE: bool DEFAULT: False

n_cores
        Number of CPUs to use for multiprocessing. Defaults to
        half the available cores.

TYPE: int DEFAULT: os.cpu_count() // 2

omit_failures
        If True, will ignore failures.

TYPE: bool DEFAULT: False

**kwargs
    Will be passed to function.

DEFAULT: {}

RETURNS DESCRIPTION
Results

Examples:

>>> import navis
>>> nl = navis.example_neurons()
>>> # Apply resampling function
>>> nl_rs = nl.apply(navis.resample_skeleton, resample_to=1000, inplace=False)
Source code in navis/core/neuronlist.py
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
def apply(self,
          func: Callable,
          *,
          parallel: bool = False,
          n_cores: int = os.cpu_count() // 2,
          omit_failures: bool = False,
          **kwargs):
    """Apply function across all neurons in this NeuronList.

    Parameters
    ----------
    func :          callable
                    Function to be applied. Must accept
                    [`navis.BaseNeuron`][] as first argument.
    parallel :      bool
                    If True (default) will use multiprocessing. Spawning the
                    processes takes time (and memory). Using `parallel=True`
                    makes only sense if the NeuronList is large or the
                    function takes a long time to run.
    n_cores :       int
                    Number of CPUs to use for multiprocessing. Defaults to
                    half the available cores.
    omit_failures : bool
                    If True, will ignore failures.

    **kwargs
                Will be passed to function.

    Returns
    -------
    Results

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons()
    >>> # Apply resampling function
    >>> nl_rs = nl.apply(navis.resample_skeleton, resample_to=1000, inplace=False)

    """
    if not callable(func):
        raise TypeError('"func" must be callable')

    # Delayed import to avoid circular import
    from .core_utils import NeuronProcessor
    proc = NeuronProcessor(self,
                           func,
                           parallel=parallel,
                           n_cores=n_cores,
                           omit_failures=omit_failures,
                           desc=f'Apply {func.__name__}')

    return proc(self.neurons, **kwargs)

Return copy of this NeuronList.

PARAMETER DESCRIPTION
**kwargs
    Keyword arguments passed to neuron's `.copy()` method::

    deepcopy :  bool, for TreeNeurons only
                If False, `.graph` (NetworkX DiGraphs) will be
                returned as views - changes to nodes/edges can
                progagate back! `.igraph` (iGraph) - if
                available - will always be deepcopied.

DEFAULT: {}

Source code in navis/core/neuronlist.py
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
def copy(self, **kwargs) -> 'NeuronList':
    """Return copy of this NeuronList.

    Parameters
    ----------
    **kwargs
                Keyword arguments passed to neuron's `.copy()` method::

                deepcopy :  bool, for TreeNeurons only
                            If False, `.graph` (NetworkX DiGraphs) will be
                            returned as views - changes to nodes/edges can
                            progagate back! `.igraph` (iGraph) - if
                            available - will always be deepcopied.

    """
    return self.__class__([n.copy(**kwargs) for n in config.tqdm(self.neurons,
                                                                 desc='Copy',
                                                                 leave=False,
                                                                 disable=config.pbar_hide or len(self) < 20)],
                          make_copy=False)

Get attributes of neurons contained in the NeuronList.

PARAMETER DESCRIPTION
name
    Name of the property to get.

TYPE: str

default
    Default value to return if attribute is not found.

TYPE: any

RETURNS DESCRIPTION
np.ndarray

Array of values for the requested attribute.

Source code in navis/core/neuronlist.py
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
def get_neuron_attributes(self, *args, **kwargs):
    """Get attributes of neurons contained in the NeuronList.

    Parameters
    ----------
    name :      str
                Name of the property to get.
    default :   any, optional
                Default value to return if attribute is not found.

    Returns
    -------
    np.ndarray
                Array of values for the requested attribute.

    """
    return np.array([getattr(n, *args, **kwargs) for n in self.neurons])

Return summary for top N neurons.

Source code in navis/core/neuronlist.py
986
987
988
def head(self, N: int = 5) -> pd.DataFrame:
    """Return summary for top N neurons."""
    return self.summary(N=N)

Helper to mimic pandas.DataFrame.itertuples().

Source code in navis/core/neuronlist.py
790
791
792
def itertuples(self):
    """Helper to mimic `pandas.DataFrame.itertuples()`."""
    return self.neurons

Return mean numeric and boolean values over all neurons.

Source code in navis/core/neuronlist.py
645
646
647
def mean(self) -> pd.DataFrame:
    """Return mean numeric and boolean values over all neurons."""
    return self.summary().mean(numeric_only=True)

Return estimated size in memory of this NeuronList.

Works by going over each neuron and summing up their size in memory.

PARAMETER DESCRIPTION
deep
        Pass to pandas DataFrames. If True will inspect data of
        object type too.

TYPE: bool DEFAULT: False

estimate
        If True, we will only estimate the size. This is
        considerably faster but will slightly underestimate the
        memory usage.

TYPE: bool DEFAULT: False

sample
        If True, we will only sample 10% of the neurons
        contained in the list and extrapolate an estimate from
        there.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
int

Memory usage in bytes.

Source code in navis/core/neuronlist.py
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
def memory_usage(self, deep=False, estimate=False, sample=False):
    """Return estimated size in memory of this NeuronList.

    Works by going over each neuron and summing up their size in memory.

    Parameters
    ----------
    deep :          bool
                    Pass to pandas DataFrames. If True will inspect data of
                    object type too.
    estimate :      bool
                    If True, we will only estimate the size. This is
                    considerably faster but will slightly underestimate the
                    memory usage.
    sample :        bool
                    If True, we will only sample 10% of the neurons
                    contained in the list and extrapolate an estimate from
                    there.

    Returns
    -------
    int
                Memory usage in bytes.

    """
    if self.empty:
        return 0

    if not sample:
        try:
            return sum([n.memory_usage(deep=deep,
                                       estimate=estimate) for n in self.neurons])
        except BaseException:
            return 0
    else:
        try:
            s = sum([n.memory_usage(deep=deep,
                                    estimate=estimate) for n in self.neurons[::10]])
            return s * (len(self.neurons) / len(self.neurons[::10]))
        except BaseException:
            return 0

Plot neuron in 2D using navis.plot2d.

PARAMETER DESCRIPTION
**kwargs
Keyword arguments will be passed to [`navis.plot2d`][].
See `help(navis.plot2d)` for a list of accepted keywords.

DEFAULT: {}

See Also

navis.plot2d Base function called to generate 2d plot.

Source code in navis/core/neuronlist.py
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
def plot2d(self, **kwargs):
    """Plot neuron in 2D using [`navis.plot2d`][].

    Parameters
    ----------
    **kwargs
            Keyword arguments will be passed to [`navis.plot2d`][].
            See `help(navis.plot2d)` for a list of accepted keywords.

    See Also
    --------
    [`navis.plot2d`][]
            Base function called to generate 2d plot.

    """
    from ..plotting import plot2d

    return plot2d(self, **kwargs)

Plot neuron in 3D using navis.plot3d.

PARAMETER DESCRIPTION
**kwargs
Keyword arguments will be passed to [`navis.plot3d`][].
See `help(navis.plot3d)` for a list of keywords.

DEFAULT: {}

See Also

navis.plot3d Base function called to generate 3d plot.

Source code in navis/core/neuronlist.py
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
def plot3d(self, **kwargs):
    """Plot neuron in 3D using [`navis.plot3d`][].

    Parameters
    ----------
    **kwargs
            Keyword arguments will be passed to [`navis.plot3d`][].
            See `help(navis.plot3d)` for a list of keywords.

    See Also
    --------
    [`navis.plot3d`][]
            Base function called to generate 3d plot.

    """
    from ..plotting import plot3d

    return plot3d(self, **kwargs)

Remove duplicate neurons from list.

PARAMETER DESCRIPTION
key
    Attribute(s) by which to identify duplicates. In case of
    multiple, all attributes must match to flag a neuron as
    duplicate.

TYPE: str | list DEFAULT: 'name'

keep
    Which of the duplicated neurons to keep.

TYPE: str DEFAULT: 'first'

inplace
    If False will return a copy of the original with
    duplicates removed.

TYPE: bool DEFAULT: False

Source code in navis/core/neuronlist.py
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
def remove_duplicates(self,
                      key: str = 'name',
                      keep: str = 'first',
                      inplace: bool = False
                      ) -> Optional['NeuronList']:
    """Remove duplicate neurons from list.

    Parameters
    ----------
    key :       str | list, optional
                Attribute(s) by which to identify duplicates. In case of
                multiple, all attributes must match to flag a neuron as
                duplicate.
    keep :      str
                Which of the duplicated neurons to keep.
    inplace :   bool, optional
                If False will return a copy of the original with
                duplicates removed.

    """
    if inplace:
        x = self
    else:
        x = self.copy()

    key = utils.make_iterable(key)

    # Generate pandas DataFrame
    df = pd.DataFrame([[getattr(n, at) for at in key] for n in x],
                      columns=key)

    # Find out which neurons to keep
    to_keep = ~df.duplicated(keep=keep).values

    # Reassign neurons
    x.neurons = x[to_keep].neurons

    if not inplace:
        return x
    return None

Return random subset of neurons.

Source code in navis/core/neuronlist.py
691
692
693
694
695
696
697
698
699
def sample(self, N: Union[int, float] = 1) -> 'NeuronList':
    """Return random subset of neurons."""
    if N < 1 and N > 0:
        N = int(len(self.neurons) * N)

    indices = list(range(len(self.neurons)))
    random.shuffle(indices)
    return self.__class__([n for i, n in enumerate(self.neurons) if i in indices[:N]],
                          make_copy=self.copy_on_subset)

Set attributes of neurons contained in the NeuronList.

PARAMETER DESCRIPTION
x
    Value of the property:
      - lists and arrays are expected to contain a value for
        each neuron and hence have to match the length of the
        NeuronList
      - dict is expected to map `{neuron.id: value}`
      - a function is expected to take `neuron.id` as input
        and return a value

TYPE: any | list | np.ndarray | dict | function

name
    Name of the property to set.

TYPE: str

register
    If True, will also register the attribute(s) as properties
    that should show up in the summary.

TYPE: bool DEFAULT: False

na
    What to do if `x` is a dictionary and does not contain a
    value for a neuron:
     - 'raise' will raise a KeyError
     - 'propagate' will set the attribute to `None`
     - 'skip' will not set the attribute

TYPE: 'raise' | 'propagate' | 'skip' DEFAULT: 'raise'

See Also

navis.NeuronList.add_metadata Set metadata from a dataframe.

Examples:

>>> import navis
>>> nl = navis.example_neurons(5)
>>> # Set a single value
>>> nl.set_neuron_attributes('some_value', name='my_attr')
>>> nl[0].my_attr
'some_value'
>>> # Set individual values as iterable
>>> nl.set_neuron_attributes([1, 2, 3, 4, 5], name='my_attr')
>>> nl[0].my_attr
1
>>> nl.my_attr
array([1, 2, 3, 4, 5])
>>> # Set individual values using a dictionary
>>> val_dict = dict(zip(nl.id, ['test', 2, 2.2, 4, 'test2']))
>>> nl.set_neuron_attributes(val_dict, name='my_attr')
>>> nl[0].my_attr
'test'
Source code in navis/core/neuronlist.py
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
def set_neuron_attributes(self, x, name, register=False, na='raise'):
    """Set attributes of neurons contained in the NeuronList.

    Parameters
    ----------
    x :         any | list | np.ndarray | dict | function
                Value of the property:
                  - lists and arrays are expected to contain a value for
                    each neuron and hence have to match the length of the
                    NeuronList
                  - dict is expected to map `{neuron.id: value}`
                  - a function is expected to take `neuron.id` as input
                    and return a value
    name :      str
                Name of the property to set.
    register :  bool
                If True, will also register the attribute(s) as properties
                that should show up in the summary.
    na :        'raise' | 'propagate' | 'skip'
                What to do if `x` is a dictionary and does not contain a
                value for a neuron:
                 - 'raise' will raise a KeyError
                 - 'propagate' will set the attribute to `None`
                 - 'skip' will not set the attribute

    See Also
    --------
    navis.NeuronList.add_metadata
                Set metadata from a dataframe.

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(5)
    >>> # Set a single value
    >>> nl.set_neuron_attributes('some_value', name='my_attr')
    >>> nl[0].my_attr
    'some_value'
    >>> # Set individual values as iterable
    >>> nl.set_neuron_attributes([1, 2, 3, 4, 5], name='my_attr')
    >>> nl[0].my_attr
    1
    >>> nl.my_attr
    array([1, 2, 3, 4, 5])
    >>> # Set individual values using a dictionary
    >>> val_dict = dict(zip(nl.id, ['test', 2, 2.2, 4, 'test2']))
    >>> nl.set_neuron_attributes(val_dict, name='my_attr')
    >>> nl[0].my_attr
    'test'

    """
    utils.eval_param(na, name='na',
                     allowed_values=('raise', 'propagate', 'skip'))
    utils.eval_param(name, name='name', allowed_types=(str, ))

    if isinstance(x, dict):
        if na == 'raise':
            miss = ~np.isin(self.id, list(x))
            if any(miss):
                raise KeyError('Dictionary `x` is missing entries for IDs: '
                               f'{self.id[miss]}')
        for n in self.neurons:
            v = x.get(n.id, None)
            if (v is None) and (na == 'skip'):
                continue
            n._register_attr(name, v, summary=register)
    elif isinstance(x, (list, np.ndarray)):
        if len(x) != len(self):
            raise ValueError(f'Got {len(x)} values for the{len(self)} '
                             'neurons in the NeuronList.')
        for n, v in zip(self.neurons, x):
            n._register_attr(name, v, summary=register)
    elif callable(x):
        for n in self.neurons:
            n._register_attr(name, x(n.id), summary=register)
    else:
        for n in self.neurons:
            n._register_attr(name, x, summary=register)

Sort neurons by given key.

Needs to be an attribute of all neurons: for example name. Also works with custom attributes.

Source code in navis/core/neuronlist.py
955
956
957
958
959
960
961
962
963
def sort_values(self, key: str, ascending: bool = False):
    """Sort neurons by given key.

    Needs to be an attribute of all neurons: for example `name`.
    Also works with custom attributes.
    """
    self.neurons = sorted(self.neurons,
                          key=lambda x: getattr(x, key),
                          reverse=ascending is False)

Return sum numeric and boolean values over all neurons.

Source code in navis/core/neuronlist.py
641
642
643
def sum(self) -> pd.DataFrame:
    """Return sum numeric and boolean values over all neurons."""
    return self.summary().sum(numeric_only=True)

Get summary over all neurons in this NeuronList.

PARAMETER DESCRIPTION
N
    If int, get only first N entries.

TYPE: int | slice DEFAULT: None

add_props
    Additional properties to add to summary. If attribute not
    available will return 'NA'.

TYPE: list DEFAULT: []

progress
    Whether to show a progress bar. Can be useful for very
    large list.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
pandas DataFrame
Source code in navis/core/neuronlist.py
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
def summary(self,
            N: Optional[Union[int, slice]] = None,
            add_props: list = [],
            progress=False
            ) -> pd.DataFrame:
    """Get summary over all neurons in this NeuronList.

    Parameters
    ----------
    N :         int | slice, optional
                If int, get only first N entries.
    add_props : list, optional
                Additional properties to add to summary. If attribute not
                available will return 'NA'.
    progress :  bool
                Whether to show a progress bar. Can be useful for very
                large list.

    Returns
    -------
    pandas DataFrame

    """
    if not self.empty:
        # Fetch a union of all summary props (keep order)
        all_props = [p for l in self.SUMMARY_PROPS for p in l]
        props = np.unique(all_props)
        props = sorted(props, key=lambda x: all_props.index(x))
    else:
        props = []

    # Add ID to properties - unless all are generic UUIDs
    if any([not isinstance(n.id, uuid.UUID) for n in self.neurons]):
        # Make sure we don't have two IDs
        if 'id' in props:
            props.remove('id')
        props = np.insert(props, 2, 'id')

    if add_props:
        props = np.append(props, add_props)

    if not isinstance(N, slice):
        N = slice(N)

    return pd.DataFrame(data=[[getattr(n, a, 'NA') for a in props]
                              for n in config.tqdm(self.neurons[N],
                                                   desc='Summarizing',
                                                   leave=False,
                                                   disable=not progress)],
                        columns=props)

Return summary for bottom N neurons.

Source code in navis/core/neuronlist.py
990
991
992
def tail(self, N: int = 5) -> pd.DataFrame:
    """Return summary for bottom N neurons."""
    return self.summary(N=slice(-N, len(self)))

Split into NeuronLists of the same neuron type.

RETURNS DESCRIPTION
dict

Dictionary of {Neurontype: NeuronList}

Source code in navis/core/neuronlist.py
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
def unmix(self):
    """Split into NeuronLists of the same neuron type.

    Returns
    -------
    dict
            Dictionary of `{Neurontype: NeuronList}`

    """
    return {t: self.__class__([n for n in self.neurons if isinstance(n, t)])
            for t in self.types}

Neuron represented as hierarchical tree (i.e. a skeleton).

PARAMETER DESCRIPTION
x
        Data to construct neuron from:
         - `pandas.DataFrame` is expected to be a SWC table
         - `pandas.Series` is expected to have a DataFrame as
           `.nodes` - additional properties will be attached
           as meta data
         - `tuple` of `(vertices, edges)` arrays is passed to
           [`navis.edges2neuron`][]
         - `str` is passed to [`navis.read_swc`][]
         - `BufferedIOBase` e.g. from `open(filename)`
         - `networkx.DiGraph` parsed by [`navis.nx2neuron`][]
         - `skeletor.Skeleton`
         - `TreeNeuron` - in this case we will try to copy every
           attribute
         - `None` will initialize an empty neuron

TYPE: Union[pd.DataFrame, BufferedIOBase, str, TreeNeuron, nx.DiGraph]

units
        Units for coordinates. Defaults to `None` (dimensionless).
        Strings must be parsable by pint: e.g. "nm", "um",
        "micrometer" or "8 nanometers".

TYPE: str | pint.Units | pint.Quantity DEFAULT: None

**metadata
        Any additional data to attach to neuron.

DEFAULT: {}

Source code in navis/core/skeleton.py
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
class TreeNeuron(BaseNeuron):
    """Neuron represented as hierarchical tree (i.e. a skeleton).

    Parameters
    ----------
    x
                    Data to construct neuron from:
                     - `pandas.DataFrame` is expected to be a SWC table
                     - `pandas.Series` is expected to have a DataFrame as
                       `.nodes` - additional properties will be attached
                       as meta data
                     - `tuple` of `(vertices, edges)` arrays is passed to
                       [`navis.edges2neuron`][]
                     - `str` is passed to [`navis.read_swc`][]
                     - `BufferedIOBase` e.g. from `open(filename)`
                     - `networkx.DiGraph` parsed by [`navis.nx2neuron`][]
                     - `skeletor.Skeleton`
                     - `TreeNeuron` - in this case we will try to copy every
                       attribute
                     - `None` will initialize an empty neuron
    units :         str | pint.Units | pint.Quantity
                    Units for coordinates. Defaults to `None` (dimensionless).
                    Strings must be parsable by pint: e.g. "nm", "um",
                    "micrometer" or "8 nanometers".
    **metadata
                    Any additional data to attach to neuron.

    """

    nodes: pd.DataFrame

    graph: 'nx.DiGraph'
    igraph: 'igraph.Graph'  # type: ignore  # doesn't know iGraph

    n_branches: int
    n_leafs: int
    cable_length: Union[int, float]

    segments: List[list]
    small_segments: List[list]

    root: np.ndarray

    soma: Optional[Union[int, str]]
    soma_pos: Optional[Sequence]

    #: Minimum radius for soma detection. Set to `None` if no tag needed.
    #: Default = 1 micron
    soma_detection_radius: Union[float, int, pint.Quantity] = 1 * config.ureg.um
    #: Label for soma detection. Set to `None` if no tag needed. Default = 1.
    soma_detection_label: Union[float, int, str] = 1
    #: Soma radius (e.g. for plotting). If string, must be column in nodes
    #: table. Default = 'radius'.
    soma_radius: Union[float, int, str] = 'radius'
    # Set default function for soma finding. Default = [`navis.morpho.find_soma`][]
    _soma: Union[Callable[['TreeNeuron'], Sequence[int]], int] = morpho.find_soma

    tags: Optional[Dict[str, List[int]]] = None

    #: Attributes to be used when comparing two neurons.
    EQ_ATTRIBUTES = ['n_nodes', 'n_connectors', 'soma', 'root',
                     'n_branches', 'n_leafs', 'cable_length', 'name']

    #: Temporary attributes that need to be regenerated when data changes.
    TEMP_ATTR = ['_igraph', '_graph_nx', '_segments', '_small_segments',
                 '_geodesic_matrix', 'centrality_method', '_simple',
                 '_cable_length', '_memory_usage', '_adjacency_matrix']

    #: Attributes used for neuron summary
    SUMMARY_PROPS = ['type', 'name', 'n_nodes', 'n_connectors', 'n_branches',
                     'n_leafs', 'cable_length', 'soma', 'units']

    #: Core data table(s) used to calculate hash
    CORE_DATA = ['nodes:node_id,parent_id,x,y,z']

    def __init__(self,
                 x: Union[pd.DataFrame,
                          BufferedIOBase,
                          str,
                          'TreeNeuron',
                          nx.DiGraph],
                 units: Union[pint.Unit, str] = None,
                 **metadata
                 ):
        """Initialize Skeleton Neuron."""
        super().__init__()

        # Lock neuron during construction
        self._lock = 1

        if isinstance(x, pd.DataFrame):
            self.nodes = x
        elif isinstance(x, pd.Series):
            if not hasattr(x, 'nodes'):
                raise ValueError('pandas.Series must have `nodes` entry.')
            elif not isinstance(x.nodes, pd.DataFrame):
                raise TypeError(f'Nodes must be pandas DataFrame, got "{type(x.nodes)}"')
            self.nodes = x.nodes
            metadata.update(x.to_dict())
        elif isinstance(x, nx.Graph):
            self.nodes = graph.nx2neuron(x).nodes
        elif isinstance(x, BufferedIOBase) or isinstance(x, str):
            x = io.read_swc(x)  # type: ignore
            self.__dict__.update(x.__dict__)
        elif isinstance(x, sk.Skeleton):
            self.nodes = x.swc.copy()
            self.vertex_map = x.mesh_map
        elif isinstance(x, TreeNeuron):
            self.__dict__.update(x.copy().__dict__)
            # Try to copy every attribute
            for at in self.__dict__:
                try:
                    setattr(self, at, copy.copy(getattr(self, at)))
                except BaseException:
                    logger.warning(f'Unable to deep-copy attribute "{at}"')
        elif isinstance(x, tuple):
            # Tuple of vertices and edges
            if len(x) != 2:
                raise ValueError('Tuple must have 2 elements: vertices and edges.')
            self.nodes = graph.edges2neuron(edges=x[1], vertices=x[0]).nodes
        elif isinstance(x, type(None)):
            # This is a essentially an empty neuron
            pass
        else:
            raise utils.ConstructionError(f'Unable to construct TreeNeuron from "{type(x)}"')

        for k, v in metadata.items():
            try:
                setattr(self, k, v)
            except AttributeError:
                raise AttributeError(f"Unable to set neuron's `{k}` attribute.")

        self.units = units
        self._current_md5 = self.core_md5

        self._lock = 0

    def __getattr__(self, key):
        """We will use this magic method to calculate some attributes on-demand."""
        # Note that we're mixing @property and __getattr__ which causes problems:
        # if a @property raises an Exception, Python falls back to __getattr__
        # and traceback is lost!

        # Last ditch effort - maybe the base class knows the key?
        return super().__getattr__(key)

    def __truediv__(self, other, copy=True):
        """Implement division for coordinates (nodes, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            if utils.is_iterable(other):
                # If divisor is isotropic use only single value
                if len(set(other)) == 1:
                    other == other[0]
                elif len(other) != 4:
                    raise ValueError('Division by list/array requires 4 '
                                     'divisors for x/y/z and radius - '
                                     f'got {len(other)}')

            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            n.nodes[['x', 'y', 'z', 'radius']] /= other

            # At this point we can ditch any 4th unit
            if utils.is_iterable(other):
                other = other[:3]
            if n.has_connectors:
                n.connectors[['x', 'y', 'z']] /= other

            if hasattr(n, 'soma_radius'):
                if isinstance(n.soma_radius, numbers.Number):
                    n.soma_radius /= other

            # Convert units
            # Note: .to_compact() throws a RuntimeWarning and returns unchanged
            # values  when `units` is a iterable
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                n.units = (n.units * other).to_compact()

            n._clear_temp_attr(exclude=['classify_nodes'])
            return n
        return NotImplemented

    def __mul__(self, other, copy=True):
        """Implement multiplication for coordinates (nodes, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            if utils.is_iterable(other):
                # If multiplicator is isotropic use only single value
                if len(set(other)) == 1:
                    other == other[0]
                elif len(other) != 4:
                    raise ValueError('Multiplication by list/array requires 4'
                                     'multipliers for x/y/z and radius - '
                                     f'got {len(other)}')

            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            n.nodes[['x', 'y', 'z', 'radius']] *= other

            # At this point we can ditch any 4th unit
            if utils.is_iterable(other):
                other = other[:3]
            if n.has_connectors:
                n.connectors[['x', 'y', 'z']] *= other

            if hasattr(n, 'soma_radius'):
                if isinstance(n.soma_radius, numbers.Number):
                    n.soma_radius *= other

            # Convert units
            # Note: .to_compact() throws a RuntimeWarning and returns unchanged
            # values  when `units` is a iterable
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                n.units = (n.units / other).to_compact()

            n._clear_temp_attr(exclude=['classify_nodes'])
            return n
        return NotImplemented

    def __add__(self, other, copy=True):
        """Implement addition for coordinates (nodes, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            if utils.is_iterable(other):
                # If offset isotropic use only single value
                if len(set(other)) == 1:
                    other == other[0]
                elif len(other) != 3:
                    raise ValueError('Addition by list/array requires 3'
                                     'multipliers for x/y/z coordinates '
                                     f'got {len(other)}')

            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            n.nodes[['x', 'y', 'z']] += other

            # Do the connectors
            if n.has_connectors:
                n.connectors[['x', 'y', 'z']] += other

            n._clear_temp_attr(exclude=['classify_nodes'])
            return n
        # If another neuron, return a list of neurons
        elif isinstance(other, BaseNeuron):
            return core.NeuronList([self, other])
        return NotImplemented

    def __sub__(self, other, copy=True):
        """Implement subtraction for coordinates (nodes, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            if utils.is_iterable(other):
                # If offset is isotropic use only single value
                if len(set(other)) == 1:
                    other == other[0]
                elif len(other) != 3:
                    raise ValueError('Addition by list/array requires 3'
                                     'multipliers for x/y/z coordinates '
                                     f'got {len(other)}')

            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self
            n.nodes[['x', 'y', 'z']] -= other

            # Do the connectors
            if n.has_connectors:
                n.connectors[['x', 'y', 'z']] -= other

            n._clear_temp_attr(exclude=['classify_nodes'])
            return n
        return NotImplemented

    def __getstate__(self):
        """Get state (used e.g. for pickling)."""
        state = {k: v for k, v in self.__dict__.items() if not callable(v)}

        # Pickling the graphs actually takes longer than regenerating them
        # from scratch
        if '_graph_nx' in state:
            _ = state.pop('_graph_nx')
        if '_igraph' in state:
            _ = state.pop('_igraph')

        return state

    @property
    @temp_property
    def adjacency_matrix(self):
        """Adjacency matrix of the skeleton."""
        if not hasattr(self, '_adjacency_matrix'):
            self._adjacency_matrix = graph.skeleton_adjacency_matrix(self)
        return self._adjacency_matrix

    @property
    @requires_nodes
    def vertices(self) -> np.ndarray:
        """Vertices of the skeleton."""
        return self.nodes[['x', 'y', 'z']].values

    @property
    @requires_nodes
    def edges(self) -> np.ndarray:
        """Edges between nodes.

        See Also
        --------
        edge_coords
                Same but with x/y/z coordinates instead of node IDs.

        """
        not_root = self.nodes[self.nodes.parent_id >= 0]
        return not_root[['node_id', 'parent_id']].values

    @property
    @requires_nodes
    def edge_coords(self) -> np.ndarray:
        """Coordinates of edges between nodes.

        See Also
        --------
        edges
                Same but with node IDs instead of x/y/z coordinates.

        """
        locs = self.nodes.set_index('node_id')[['x', 'y', 'z']]
        edges = self.edges
        edges_co = np.zeros((edges.shape[0], 2, 3))
        edges_co[:, 0, :] = locs.loc[edges[:, 0]].values
        edges_co[:, 1, :] = locs.loc[edges[:, 1]].values
        return edges_co

    @property
    @temp_property
    def igraph(self) -> 'igraph.Graph':
        """iGraph representation of this neuron."""
        # If igraph does not exist, create and return
        if not hasattr(self, '_igraph'):
            # This also sets the attribute
            return self.get_igraph()
        return self._igraph

    @property
    @temp_property
    def graph(self) -> nx.DiGraph:
        """Networkx Graph representation of this neuron."""
        # If graph does not exist, create and return
        if not hasattr(self, '_graph_nx'):
            # This also sets the attribute
            return self.get_graph_nx()
        return self._graph_nx

    @property
    @temp_property
    def geodesic_matrix(self):
        """Matrix with geodesic (along-the-arbor) distance between nodes."""
        # If matrix has not yet been generated or needs update
        if not hasattr(self, '_geodesic_matrix'):
            # (Re-)generate matrix
            self._geodesic_matrix = graph.geodesic_matrix(self)

        return self._geodesic_matrix

    @property
    @requires_nodes
    def leafs(self) -> pd.DataFrame:
        """Leaf node table."""
        return self.nodes[self.nodes['type'] == 'end']

    @property
    @requires_nodes
    def ends(self):
        """End node table (same as leafs)."""
        return self.leafs

    @property
    @requires_nodes
    def branch_points(self):
        """Branch node table."""
        return self.nodes[self.nodes['type'] == 'branch']

    @property
    def nodes(self) -> pd.DataFrame:
        """Node table."""
        return self._get_nodes()

    def _get_nodes(self) -> pd.DataFrame:
        # Redefine this function in subclass to change how nodes are retrieved
        return self._nodes

    @nodes.setter
    def nodes(self, v):
        """Validate and set node table."""
        # We are refering to an extra function to facilitate subclassing:
        # Redefine _set_nodes() to not break property
        self._set_nodes(v)

    def _set_nodes(self, v):
        # Redefine this function in subclass to change validation
        self._nodes = utils.validate_table(v,
                                           required=[('node_id', 'rowId', 'node', 'treenode_id', 'PointNo'),
                                                     ('parent_id', 'link', 'parent', 'Parent'),
                                                     ('x', 'X'),
                                                     ('y', 'Y'),
                                                     ('z', 'Z')],
                                           rename=True,
                                           optional={('radius', 'W'): 0},
                                           restrict=False)

        # Make sure we don't end up with object dtype anywhere as this can
        # cause problems
        for c in ('node_id', 'parent_id'):
            if self._nodes[c].dtype == 'O':
                self._nodes[c] = self._nodes[c].astype(int)

        graph.classify_nodes(self)

    @property
    def n_trees(self) -> int:
        """Count number of connected trees in this neuron."""
        return len(self.subtrees)

    @property
    def is_tree(self) -> bool:
        """Whether neuron is a tree.

        Also returns True if neuron consists of multiple separate trees!

        See also
        --------
        networkx.is_forest()
                    Function used to test whether neuron is a tree.
        :attr:`TreeNeuron.cycles`
                    If your neuron is not a tree, this will help you identify
                    cycles.

        """
        return nx.is_forest(self.graph)

    @property
    def subtrees(self) -> List[List[int]]:
        """List of subtrees. Sorted by size as sets of node IDs."""
        return sorted(graph._connected_components(self),
                      key=lambda x: -len(x))

    @property
    def connectors(self) -> pd.DataFrame:
        """Connector table. If none, will return `None`."""
        return self._get_connectors()

    def _get_connectors(self) -> pd.DataFrame:
        # Redefine this function in subclass to change how nodes are retrieved
        return getattr(self, '_connectors', None)

    @connectors.setter
    def connectors(self, v):
        """Validate and set connector table."""
        # We are refering to an extra function to facilitate subclassing:
        # Redefine _set_connectors() to not break property
        self._set_connectors(v)

    def _set_connectors(self, v):
        # Redefine this function in subclass to change validation
        if isinstance(v, type(None)):
            self._connectors = None
        else:
            self._connectors = utils.validate_table(v,
                                                    required=[('connector_id', 'id'),
                                                              ('node_id', 'rowId', 'node', 'treenode_id'),
                                                              ('x', 'X'),
                                                              ('y', 'Y'),
                                                              ('z', 'Z'),
                                                              ('type', 'relation', 'label', 'prepost')],
                                                    rename=True,
                                                    restrict=False)

    @property
    @requires_nodes
    def cycles(self) -> Optional[List[int]]:
        """Cycles in neuron (if any).

        See also
        --------
        networkx.find_cycles()
                    Function used to find cycles.

        """
        try:
            c = nx.find_cycle(self.graph,
                              source=self.nodes[self.nodes.type == 'end'].node_id.values)
            return c
        except nx.exception.NetworkXNoCycle:
            return None
        except BaseException:
            raise

    @property
    def simple(self) -> 'TreeNeuron':
        """Simplified representation consisting only of root, branch points and leafs."""
        if not hasattr(self, '_simple'):
            self._simple = self.copy()

            # Make sure we don't have a soma, otherwise that node will be preserved
            self._simple.soma = None

            # Downsample
            self._simple.downsample(float('inf'), inplace=True)
        return self._simple

    @property
    def soma(self) -> Optional[Union[str, int]]:
        """Search for soma and return node ID(s).

        `None` if no soma. You can assign either a function that accepts a
        TreeNeuron as input or a fix value. The default is [`navis.find_soma`][].

        """
        if callable(self._soma):
            soma = self._soma.__call__()  # type: ignore  # say int not callable
        else:
            soma = self._soma

        # Sanity check to make sure that the soma node actually exists
        if isinstance(soma, type(None)):
            # Return immmediately without expensive checks
            return soma
        elif utils.is_iterable(soma):
            if all(pd.isnull(soma)):
                soma = None
            elif not any(self.nodes.node_id.isin(soma)):
                logger.warning(f'Soma(s) {soma} not found in node table.')
                soma = None
        else:
            if soma not in self.nodes.node_id.values:
                logger.warning(f'Soma {soma} not found in node table.')
                soma = None

        return soma

    @soma.setter
    def soma(self, value: Union[Callable, int, None]) -> None:
        """Set soma."""
        if hasattr(value, '__call__'):
            self._soma = types.MethodType(value, self)
        elif isinstance(value, type(None)):
            self._soma = None
        elif isinstance(value, bool) and not value:
            self._soma = None
        else:
            if value in self.nodes.node_id.values:
                self._soma = value
            else:
                raise ValueError('Soma must be function, None or a valid node ID.')

    @property
    def soma_pos(self) -> Optional[Sequence]:
        """Search for soma and return its position.

        Returns `None` if no soma. You can also use this to assign a soma by
        position in which case it will snap to the closest node.
        """
        # Sanity check to make sure that the soma node actually exists
        soma = self.soma
        if isinstance(soma, type(None)):
            return None
        elif utils.is_iterable(soma):
            if all(pd.isnull(soma)):
                return None
        else:
            soma = utils.make_iterable(soma)

        return self.nodes.loc[self.nodes.node_id.isin(soma), ['x', 'y', 'z']].values

    @soma_pos.setter
    def soma_pos(self, value: Sequence) -> None:
        """Set soma by position."""
        if value is None:
            self.soma = None
            return

        try:
            value = np.asarray(value).astype(np.float64).reshape(3)
        except BaseException:
            raise ValueError(f'Unable to convert soma position "{value}" '
                             f'to numeric (3, ) numpy array.')

        # Generate tree
        id, dist = self.snap(value, to='nodes')

        # A sanity check
        if dist > (self.sampling_resolution * 10):
            logger.warning(f'New soma position for {self.id} is suspiciously '
                           f'far away from the closest node: {dist}')

        self.soma = id

    @property
    @requires_nodes
    def root(self) -> Sequence:
        """Root node(s)."""
        roots = self.nodes[self.nodes.parent_id < 0].node_id.values
        return roots

    @root.setter
    def root(self, value: Union[int, List[int]]) -> None:
        """Reroot neuron to given node."""
        self.reroot(value, inplace=True)

    @property
    def type(self) -> str:
        """Neuron type."""
        return 'navis.TreeNeuron'

    @property
    @requires_nodes
    def n_branches(self) -> Optional[int]:
        """Number of branch points."""
        return self.nodes[self.nodes.type == 'branch'].shape[0]

    @property
    @requires_nodes
    def n_leafs(self) -> Optional[int]:
        """Number of leaf nodes."""
        return self.nodes[self.nodes.type == 'end'].shape[0]

    @property
    @temp_property
    @add_units(compact=True)
    def cable_length(self) -> Union[int, float]:
        """Cable length."""
        if not hasattr(self, '_cable_length'):
            self._cable_length = morpho.cable_length(self)
        return self._cable_length

    @property
    @add_units(compact=True, power=2)
    def surface_area(self) -> float:
        """Radius-based lateral surface area."""
        if 'radius' not in self.nodes.columns:
            raise ValueError(f'Neuron {self.id} does not have radius information')

        if any(self.nodes.radius < 0):
            logger.warning(f'Neuron {self.id} has negative radii - area will not be correct.')

        if any(self.nodes.radius.isnull()):
            logger.warning(f'Neuron {self.id} has NaN radii - area will not be correct.')

        # Generate radius dict
        radii = self.nodes.set_index('node_id').radius.to_dict()
        # Drop root node(s)
        not_root = self.nodes.parent_id >= 0
        # For each cylinder get the height
        h = morpho.mmetrics.parent_dist(self, root_dist=0)[not_root]

        # Radii for top and bottom of tapered cylinder
        nodes = self.nodes[not_root]
        r1 = nodes.node_id.map(radii).values
        r2 = nodes.parent_id.map(radii).values

        return (np.pi * (r1 + r2) * np.sqrt( (r1-r2)**2 + h**2)).sum()

    @property
    @add_units(compact=True, power=3)
    def volume(self) -> float:
        """Radius-based volume."""
        if 'radius' not in self.nodes.columns:
            raise ValueError(f'Neuron {self.id} does not have radius information')

        if any(self.nodes.radius < 0):
            logger.warning(f'Neuron {self.id} has negative radii - volume will not be correct.')

        if any(self.nodes.radius.isnull()):
            logger.warning(f'Neuron {self.id} has NaN radii - volume will not be correct.')

        # Generate radius dict
        radii = self.nodes.set_index('node_id').radius.to_dict()
        # Drop root node(s)
        not_root = self.nodes.parent_id >= 0
        # For each cylinder get the height
        h = morpho.mmetrics.parent_dist(self, root_dist=0)[not_root]

        # Radii for top and bottom of tapered cylinder
        nodes = self.nodes[not_root]
        r1 = nodes.node_id.map(radii).values
        r2 = nodes.parent_id.map(radii).values

        return (1/3 * np.pi * (r1**2 + r1 * r2 + r2**2) * h).sum()

    @property
    def bbox(self) -> np.ndarray:
        """Bounding box (includes connectors)."""
        mn = np.min(self.nodes[['x', 'y', 'z']].values, axis=0)
        mx = np.max(self.nodes[['x', 'y', 'z']].values, axis=0)

        if self.has_connectors:
            cn_mn = np.min(self.connectors[['x', 'y', 'z']].values, axis=0)
            cn_mx = np.max(self.connectors[['x', 'y', 'z']].values, axis=0)

            mn = np.min(np.vstack((mn, cn_mn)), axis=0)
            mx = np.max(np.vstack((mx, cn_mx)), axis=0)

        return np.vstack((mn, mx)).T

    @property
    def sampling_resolution(self) -> float:
        """Average cable length between child -> parent nodes."""
        res = self.cable_length / self.n_nodes

        if isinstance(res, pint.Quantity):
            res = res.to_compact()

        return res

    @property
    @temp_property
    def segments(self) -> List[list]:
        """Neuron broken down into linear segments (see also `.small_segments`)."""
        # Calculate if required
        if not hasattr(self, '_segments'):
            # This also sets the attribute
            self._segments = self._get_segments(how='length')
        return self._segments

    @property
    @temp_property
    def small_segments(self) -> List[list]:
        """Neuron broken down into small linear segments (see also `.segments`)."""
        # Calculate if required
        if not hasattr(self, '_small_segments'):
            # This also sets the attribute
            self._small_segments = self._get_segments(how='break')
        return self._small_segments

    def _get_segments(self,
                      how: Union[Literal['length'],
                                 Literal['break']] = 'length'
                      ) -> List[list]:
        """Generate segments for neuron."""
        if how == 'length':
            return graph._generate_segments(self)
        elif how == 'break':
            return graph._break_segments(self)
        else:
            raise ValueError(f'Unknown method: "{how}"')

    @property
    def n_skeletons(self) -> int:
        """Number of seperate skeletons in this neuron."""
        return len(self.root)

    def _clear_temp_attr(self, exclude: list = []) -> None:
        """Clear temporary attributes."""
        super()._clear_temp_attr(exclude=exclude)

        # Remove temporary node values
        # temp_node_cols = ['flow_centrality', 'strahler_index', 'SI', 'bending_flow']
        # self._nodes.drop(columns=temp_node_cols, errors='ignore', inplace=True)

        # Remove soma if it was manually assigned and is not present anymore
        if not callable(self._soma) and not isinstance(self._soma, type(None)):
            if utils.is_iterable(self._soma):
                exists = np.isin(self._soma, self.nodes.node_id.values)
                self._soma = np.asarray(self._soma)[exists]
                if not np.any(self._soma):
                    self._soma = None
            elif self._soma not in self.nodes.node_id.values:
                self.soma = None

        if 'classify_nodes' not in exclude:
            # Reclassify nodes
            graph.classify_nodes(self, inplace=True)

    def copy(self, deepcopy: bool = False) -> 'TreeNeuron':
        """Return a copy of the neuron.

        Parameters
        ----------
        deepcopy :  bool, optional
                    If False, `.graph` (NetworkX DiGraph) will be returned
                    as view - changes to nodes/edges can progagate back!
                    `.igraph` (iGraph) - if available - will always be
                    deepcopied.

        Returns
        -------
        TreeNeuron

        """
        no_copy = ['_lock']
        # Generate new empty neuron
        x = self.__class__(None)
        # Populate with this neuron's data
        x.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy})

        # Copy graphs only if neuron is not stale
        if not self.is_stale:
            if '_graph_nx' in self.__dict__:
                x._graph_nx = self._graph_nx.copy(as_view=deepcopy is not True)
            if '_igraph' in self.__dict__:
                if self._igraph is not None:
                    # This is pretty cheap, so we will always make a deep copy
                    x._igraph = self._igraph.copy()
        else:
            x._clear_temp_attr()

        return x

    def get_graph_nx(self) -> nx.DiGraph:
        """Calculate and return networkX representation of neuron.

        Once calculated stored as `.graph`. Call function again to update
        graph.

        See Also
        --------
        [`navis.neuron2nx`][]

        """
        self._graph_nx = graph.neuron2nx(self)
        return self._graph_nx

    def get_igraph(self) -> 'igraph.Graph':  # type: ignore
        """Calculate and return iGraph representation of neuron.

        Once calculated stored as `.igraph`. Call function again to update
        iGraph.

        Important
        ---------
        Returns `None` if igraph is not installed!

        See Also
        --------
        [`navis.neuron2igraph`][]

        """
        self._igraph = graph.neuron2igraph(self, raise_not_installed=False)
        return self._igraph

    @overload
    def resample(self, resample_to: int, inplace: Literal[False]) -> 'TreeNeuron': ...

    @overload
    def resample(self, resample_to: int, inplace: Literal[True]) -> None: ...

    def resample(self, resample_to, inplace=False):
        """Resample neuron to given resolution.

        Parameters
        ----------
        resample_to :           int
                                Resolution to which to resample the neuron.
        inplace :               bool, optional
                                If True, operation will be performed on
                                itself. If False, operation is performed on
                                copy which is then returned.

        See Also
        --------
        [`navis.resample_skeleton`][]
            Base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy(deepcopy=False)

        sampling.resample_skeleton(x, resample_to, inplace=True)

        # No need to call this as base function does this for us
        # x._clear_temp_attr()

        if not inplace:
            return x
        return None

    @overload
    def downsample(self,
                   factor: float,
                   inplace: Literal[False],
                   **kwargs) -> 'TreeNeuron': ...

    @overload
    def downsample(self,
                   factor: float,
                   inplace: Literal[True],
                   **kwargs) -> None: ...

    def downsample(self, factor=5, inplace=False, **kwargs):
        """Downsample the neuron by given factor.

        Parameters
        ----------
        factor :                int, optional
                                Factor by which to downsample the neurons.
                                Default = 5.
        inplace :               bool, optional
                                If True, operation will be performed on
                                itself. If False, operation is performed on
                                copy which is then returned.
        **kwargs
                                Additional arguments passed to
                                [`navis.downsample_neuron`][].

        See Also
        --------
        [`navis.downsample_neuron`][]
            Base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy(deepcopy=False)

        sampling.downsample_neuron(x, factor, inplace=True, **kwargs)

        # Delete outdated attributes
        x._clear_temp_attr()

        if not inplace:
            return x
        return None

    def reroot(self,
               new_root: Union[int, str],
               inplace: bool = False) -> Optional['TreeNeuron']:
        """Reroot neuron to given node ID or node tag.

        Parameters
        ----------
        new_root :  int | str
                    Either node ID or node tag.
        inplace :   bool, optional
                    If True, operation will be performed on itself. If False,
                    operation is performed on copy which is then returned.

        See Also
        --------
        [`navis.reroot_skeleton`][]
            Base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy(deepcopy=False)

        graph.reroot_skeleton(x, new_root, inplace=True)

        # Clear temporary attributes is done by morpho.reroot_skeleton()
        # x._clear_temp_attr()

        if not inplace:
            return x
        return None

    def prune_distal_to(self,
                        node: Union[str, int],
                        inplace: bool = False) -> Optional['TreeNeuron']:
        """Cut off nodes distal to given nodes.

        Parameters
        ----------
        node :      node ID | node tag
                    Provide either node ID(s) or a unique tag(s)
        inplace :   bool, optional
                    If True, operation will be performed on itself. If False,
                    operation is performed on copy which is then returned.

        See Also
        --------
        [`navis.cut_skeleton`][]
            Base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy(deepcopy=False)

        node = utils.make_iterable(node, force_type=None)

        for n in node:
            prox = graph.cut_skeleton(x, n, ret='proximal')[0]
            # Reinitialise with proximal data
            x.__init__(prox)  # type: ignore  # Cannot access "__init__" directly
            # Remove potential "left over" attributes (happens if we use a copy)
            x._clear_temp_attr()

        if not inplace:
            return x
        return None

    def prune_proximal_to(self,
                          node: Union[str, int],
                          inplace: bool = False) -> Optional['TreeNeuron']:
        """Remove nodes proximal to given node. Reroots neuron to cut node.

        Parameters
        ----------
        node :      node_id | node tag
                    Provide either a node ID or a (unique) tag
        inplace :   bool, optional
                    If True, operation will be performed on itself. If False,
                    operation is performed on copy which is then returned.

        See Also
        --------
        [`navis.cut_skeleton`][]
            Base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy(deepcopy=False)

        node = utils.make_iterable(node, force_type=None)

        for n in node:
            dist = graph.cut_skeleton(x, n, ret='distal')[0]
            # Reinitialise with distal data
            x.__init__(dist)  # type: ignore  # Cannot access "__init__" directly
            # Remove potential "left over" attributes (happens if we use a copy)
            x._clear_temp_attr()

        # Clear temporary attributes is done by cut_skeleton
        # x._clear_temp_attr()

        if not inplace:
            return x
        return None

    def prune_by_strahler(self,
                          to_prune: Union[int, List[int], slice],
                          inplace: bool = False) -> Optional['TreeNeuron']:
        """Prune neuron based on [Strahler order](https://en.wikipedia.org/wiki/Strahler_number).

        Will reroot neuron to soma if possible.

        Parameters
        ----------
        to_prune :  int | list | range | slice
                    Strahler indices to prune. For example:

                    1. `to_prune=1` removes all leaf branches
                    2. `to_prune=[1, 2]` removes SI 1 and 2
                    3. `to_prune=range(1, 4)` removes SI 1, 2 and 3
                    4. `to_prune=slice(1, -1)` removes everything but the
                       highest SI
                    5. `to_prune=slice(-1, None)` removes only the highest
                       SI

        inplace :   bool, optional
                    If True, operation will be performed on itself. If False,
                    operation is performed on copy which is then returned.

        See Also
        --------
        [`navis.prune_by_strahler`][]
            This is the base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy()

        morpho.prune_by_strahler(
            x, to_prune=to_prune, reroot_soma=True, inplace=True)

        # No need to call this as morpho.prune_by_strahler does this already
        # self._clear_temp_attr()

        if not inplace:
            return x
        return None

    def prune_twigs(self,
                    size: float,
                    inplace: bool = False,
                    recursive: Union[int, bool, float] = False
                    ) -> Optional['TreeNeuron']:
        """Prune terminal twigs under a given size.

        Parameters
        ----------
        size :          int | float
                        Twigs shorter than this will be pruned.
        inplace :       bool, optional
                        If False, pruning is performed on copy of original neuron
                        which is then returned.
        recursive :     int | bool | "inf", optional
                        If `int` will undergo that many rounds of recursive
                        pruning. Use `float("inf")` to prune until no more
                        twigs under the given size are left.

        See Also
        --------
        [`navis.prune_twigs`][]
            This is the base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy()

        morpho.prune_twigs(x, size=size, inplace=True)

        if not inplace:
            return x
        return None

    def prune_at_depth(self,
                       depth: Union[float, int],
                       source: Optional[int] = None,
                       inplace: bool = False
                       ) -> Optional['TreeNeuron']:
        """Prune all neurites past a given distance from a source.

        Parameters
        ----------
        x :             TreeNeuron | NeuronList
        depth :         int | float
                        Distance from source at which to start pruning.
        source :        int, optional
                        Source node for depth calculation. If `None`, will use
                        root. If `x` is a list of neurons then must provide a
                        source for each neuron.
        inplace :       bool, optional
                        If False, pruning is performed on copy of original neuron
                        which is then returned.

        Returns
        -------
        TreeNeuron/List
                        Pruned neuron(s).

        See Also
        --------
        [`navis.prune_at_depth`][]
            This is the base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy()

        morpho.prune_at_depth(x, depth=depth, source=source, inplace=True)

        if not inplace:
            return x
        return None

    def cell_body_fiber(self,
                        reroot_soma: bool = True,
                        inplace: bool = False,
                        ) -> Optional['TreeNeuron']:
        """Prune neuron to its cell body fiber.

        Parameters
        ----------
        reroot_soma :       bool, optional
                            If True, will reroot to soma.
        inplace :           bool, optional
                            If True, operation will be performed on itself.
                            If False, operation is performed on copy which is
                            then returned.

        See Also
        --------
        [`navis.cell_body_fiber`][]
            This is the base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy()

        morpho.cell_body_fiber(x, inplace=True, reroot_soma=reroot_soma)

        # Clear temporary attributes
        x._clear_temp_attr()

        if not inplace:
            return x
        return None

    def prune_by_longest_neurite(self,
                                 n: int = 1,
                                 reroot_soma: bool = False,
                                 inplace: bool = False,
                                 ) -> Optional['TreeNeuron']:
        """Prune neuron down to the longest neurite.

        Parameters
        ----------
        n :                 int, optional
                            Number of longest neurites to preserve.
        reroot_soma :       bool, optional
                            If True, will reroot to soma before pruning.
        inplace :           bool, optional
                            If True, operation will be performed on itself.
                            If False, operation is performed on copy which is
                            then returned.

        See Also
        --------
        [`navis.longest_neurite`][]
            This is the base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy()

        graph.longest_neurite(
            x, n, inplace=True, reroot_soma=reroot_soma)

        # Clear temporary attributes
        x._clear_temp_attr()

        if not inplace:
            return x
        return None

    def prune_by_volume(self,
                        v: Union[core.Volume,
                                 List[core.Volume],
                                 Dict[str, core.Volume]],
                        mode: Union[Literal['IN'], Literal['OUT']] = 'IN',
                        prevent_fragments: bool = False,
                        inplace: bool = False
                        ) -> Optional['TreeNeuron']:
        """Prune neuron by intersection with given volume(s).

        Parameters
        ----------
        v :                 str | navis.Volume | list of either
                            Volume(s) to check for intersection
        mode :              'IN' | 'OUT', optional
                            If 'IN', parts of the neuron inside the volume are
                            kept.
        prevent_fragments : bool, optional
                            If True, will add nodes to `subset` required to
                            keep neuron from fragmenting.
        inplace :           bool, optional
                            If True, operation will be performed on itself. If
                            False, operation is performed on copy which is then
                            returned.

        See Also
        --------
        [`navis.in_volume`][]
            Base function. See for details and examples.

        """
        if inplace:
            x = self
        else:
            x = self.copy()

        intersection.in_volume(x, v, inplace=True,
                               prevent_fragments=prevent_fragments,
                               mode=mode)

        # Clear temporary attributes
        # x._clear_temp_attr()

        if not inplace:
            return x
        return None

    def to_swc(self,
               filename: Optional[str] = None,
               **kwargs) -> None:
        """Generate SWC file from this neuron.

        Parameters
        ----------
        filename :      str | None, optional
                        If `None`, will use "neuron_{id}.swc".
        kwargs
                        Additional arguments passed to [`navis.write_swc`][].

        Returns
        -------
        Nothing

        See Also
        --------
        [`navis.write_swc`][]
                See this function for further details.

        """
        return io.write_swc(self, filename, **kwargs)  # type: ignore  # double import of "io"

    def reload(self,
               inplace: bool = False,
               ) -> Optional['TreeNeuron']:
        """Reload neuron. Must have filepath as `.origin` as attribute.

        Returns
        -------
        TreeNeuron
                If `inplace=False`.

        """
        if not hasattr(self, 'origin'):
            raise AttributeError('To reload TreeNeuron must have `.origin` '
                                 'attribute')

        if self.origin in ('DataFrame', 'string'):
            raise ValueError('Unable to reload TreeNeuron: it appears to have '
                             'been created from string or DataFrame.')

        kwargs = {}
        if hasattr(self, 'soma_label'):
            kwargs['soma_label'] = self.soma_label
        if hasattr(self, 'connector_labels'):
            kwargs['connector_labels'] = self.connector_labels

        x = io.read_swc(self.origin, **kwargs)

        if inplace:
            self.__dict__.update(x.__dict__)
            self._clear_temp_attr()
        else:
            # This makes sure that we keep any additional data stored after
            # this neuron has been loaded
            x2 = self.copy()
            x2.__dict__.update(x.__dict__)
            x2._clear_temp_attr()
            return x

    def snap(self, locs, to='nodes'):
        """Snap xyz location(s) to closest node or synapse.

        Parameters
        ----------
        locs :      (N, 3) array | (3, ) array
                    Either single or multiple XYZ locations.
        to :        "nodes" | "connectors"
                    Whether to snap to nodes or connectors.

        Returns
        -------
        id :        int | list of int
                    ID(s) of the closest node/connector.
        dist :      float | list of float
                    Distance(s) to the closest node/connector.

        Examples
        --------
        >>> import navis
        >>> n = navis.example_neurons(1)
        >>> id, dist = n.snap([0, 0, 0])
        >>> id
        1124

        """
        locs = np.asarray(locs).astype(np.float64)

        is_single = (locs.ndim == 1 and len(locs) == 3)
        is_multi = (locs.ndim == 2 and locs.shape[1] == 3)
        if not is_single and not is_multi:
            raise ValueError('Expected a single (x, y, z) location or a '
                             '(N, 3) array of multiple locations')

        if to not in ['nodes', 'connectors']:
            raise ValueError('`to` must be "nodes" or "connectors", '
                             f'got {to}')

        # Generate tree
        tree = graph.neuron2KDTree(self, data=to)

        # Find the closest node
        dist, ix = tree.query(locs)

        if to == 'nodes':
            id = self.nodes.node_id.values[ix]
        else:
            id = self.connectors.connector_id.values[ix]

        return id, dist

Adjacency matrix of the skeleton.

Bounding box (includes connectors).

Branch node table.

Cable length.

Connector table. If none, will return None.

Cycles in neuron (if any).

See also

networkx.find_cycles() Function used to find cycles.

Coordinates of edges between nodes.

See Also

edges Same but with node IDs instead of x/y/z coordinates.

Edges between nodes.

See Also

edge_coords Same but with x/y/z coordinates instead of node IDs.

End node table (same as leafs).

Matrix with geodesic (along-the-arbor) distance between nodes.

Networkx Graph representation of this neuron.

iGraph representation of this neuron.

Whether neuron is a tree.

Also returns True if neuron consists of multiple separate trees!

See also

networkx.is_forest() Function used to test whether neuron is a tree. :attr:TreeNeuron.cycles If your neuron is not a tree, this will help you identify cycles.

Leaf node table.

Number of branch points.

Number of leaf nodes.

Number of seperate skeletons in this neuron.

Count number of connected trees in this neuron.

Node table.

Root node(s).

Average cable length between child -> parent nodes.

Neuron broken down into linear segments (see also .small_segments).

Simplified representation consisting only of root, branch points and leafs.

Neuron broken down into small linear segments (see also .segments).

Search for soma and return node ID(s).

None if no soma. You can assign either a function that accepts a TreeNeuron as input or a fix value. The default is navis.find_soma.

Search for soma and return its position.

Returns None if no soma. You can also use this to assign a soma by position in which case it will snap to the closest node.

List of subtrees. Sorted by size as sets of node IDs.

Radius-based lateral surface area.

Neuron type.

Vertices of the skeleton.

Radius-based volume.

Initialize Skeleton Neuron.

Source code in navis/core/skeleton.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
def __init__(self,
             x: Union[pd.DataFrame,
                      BufferedIOBase,
                      str,
                      'TreeNeuron',
                      nx.DiGraph],
             units: Union[pint.Unit, str] = None,
             **metadata
             ):
    """Initialize Skeleton Neuron."""
    super().__init__()

    # Lock neuron during construction
    self._lock = 1

    if isinstance(x, pd.DataFrame):
        self.nodes = x
    elif isinstance(x, pd.Series):
        if not hasattr(x, 'nodes'):
            raise ValueError('pandas.Series must have `nodes` entry.')
        elif not isinstance(x.nodes, pd.DataFrame):
            raise TypeError(f'Nodes must be pandas DataFrame, got "{type(x.nodes)}"')
        self.nodes = x.nodes
        metadata.update(x.to_dict())
    elif isinstance(x, nx.Graph):
        self.nodes = graph.nx2neuron(x).nodes
    elif isinstance(x, BufferedIOBase) or isinstance(x, str):
        x = io.read_swc(x)  # type: ignore
        self.__dict__.update(x.__dict__)
    elif isinstance(x, sk.Skeleton):
        self.nodes = x.swc.copy()
        self.vertex_map = x.mesh_map
    elif isinstance(x, TreeNeuron):
        self.__dict__.update(x.copy().__dict__)
        # Try to copy every attribute
        for at in self.__dict__:
            try:
                setattr(self, at, copy.copy(getattr(self, at)))
            except BaseException:
                logger.warning(f'Unable to deep-copy attribute "{at}"')
    elif isinstance(x, tuple):
        # Tuple of vertices and edges
        if len(x) != 2:
            raise ValueError('Tuple must have 2 elements: vertices and edges.')
        self.nodes = graph.edges2neuron(edges=x[1], vertices=x[0]).nodes
    elif isinstance(x, type(None)):
        # This is a essentially an empty neuron
        pass
    else:
        raise utils.ConstructionError(f'Unable to construct TreeNeuron from "{type(x)}"')

    for k, v in metadata.items():
        try:
            setattr(self, k, v)
        except AttributeError:
            raise AttributeError(f"Unable to set neuron's `{k}` attribute.")

    self.units = units
    self._current_md5 = self.core_md5

    self._lock = 0

Prune neuron to its cell body fiber.

PARAMETER DESCRIPTION
reroot_soma
            If True, will reroot to soma.

TYPE: bool DEFAULT: True

inplace
            If True, operation will be performed on itself.
            If False, operation is performed on copy which is
            then returned.

TYPE: bool DEFAULT: False

See Also

navis.cell_body_fiber This is the base function. See for details and examples.

Source code in navis/core/skeleton.py
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
def cell_body_fiber(self,
                    reroot_soma: bool = True,
                    inplace: bool = False,
                    ) -> Optional['TreeNeuron']:
    """Prune neuron to its cell body fiber.

    Parameters
    ----------
    reroot_soma :       bool, optional
                        If True, will reroot to soma.
    inplace :           bool, optional
                        If True, operation will be performed on itself.
                        If False, operation is performed on copy which is
                        then returned.

    See Also
    --------
    [`navis.cell_body_fiber`][]
        This is the base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy()

    morpho.cell_body_fiber(x, inplace=True, reroot_soma=reroot_soma)

    # Clear temporary attributes
    x._clear_temp_attr()

    if not inplace:
        return x
    return None

Return a copy of the neuron.

PARAMETER DESCRIPTION
deepcopy
    If False, `.graph` (NetworkX DiGraph) will be returned
    as view - changes to nodes/edges can progagate back!
    `.igraph` (iGraph) - if available - will always be
    deepcopied.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron
Source code in navis/core/skeleton.py
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
def copy(self, deepcopy: bool = False) -> 'TreeNeuron':
    """Return a copy of the neuron.

    Parameters
    ----------
    deepcopy :  bool, optional
                If False, `.graph` (NetworkX DiGraph) will be returned
                as view - changes to nodes/edges can progagate back!
                `.igraph` (iGraph) - if available - will always be
                deepcopied.

    Returns
    -------
    TreeNeuron

    """
    no_copy = ['_lock']
    # Generate new empty neuron
    x = self.__class__(None)
    # Populate with this neuron's data
    x.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy})

    # Copy graphs only if neuron is not stale
    if not self.is_stale:
        if '_graph_nx' in self.__dict__:
            x._graph_nx = self._graph_nx.copy(as_view=deepcopy is not True)
        if '_igraph' in self.__dict__:
            if self._igraph is not None:
                # This is pretty cheap, so we will always make a deep copy
                x._igraph = self._igraph.copy()
    else:
        x._clear_temp_attr()

    return x
downsample
downsample

Downsample the neuron by given factor.

PARAMETER DESCRIPTION
factor
                Factor by which to downsample the neurons.
                Default = 5.

TYPE: int DEFAULT: 5

inplace
                If True, operation will be performed on
                itself. If False, operation is performed on
                copy which is then returned.

TYPE: bool DEFAULT: False

**kwargs
                Additional arguments passed to
                [`navis.downsample_neuron`][].

DEFAULT: {}

See Also

navis.downsample_neuron Base function. See for details and examples.

Source code in navis/core/skeleton.py
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
def downsample(self, factor=5, inplace=False, **kwargs):
    """Downsample the neuron by given factor.

    Parameters
    ----------
    factor :                int, optional
                            Factor by which to downsample the neurons.
                            Default = 5.
    inplace :               bool, optional
                            If True, operation will be performed on
                            itself. If False, operation is performed on
                            copy which is then returned.
    **kwargs
                            Additional arguments passed to
                            [`navis.downsample_neuron`][].

    See Also
    --------
    [`navis.downsample_neuron`][]
        Base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy(deepcopy=False)

    sampling.downsample_neuron(x, factor, inplace=True, **kwargs)

    # Delete outdated attributes
    x._clear_temp_attr()

    if not inplace:
        return x
    return None

Calculate and return networkX representation of neuron.

Once calculated stored as .graph. Call function again to update graph.

See Also

navis.neuron2nx

Source code in navis/core/skeleton.py
872
873
874
875
876
877
878
879
880
881
882
883
884
def get_graph_nx(self) -> nx.DiGraph:
    """Calculate and return networkX representation of neuron.

    Once calculated stored as `.graph`. Call function again to update
    graph.

    See Also
    --------
    [`navis.neuron2nx`][]

    """
    self._graph_nx = graph.neuron2nx(self)
    return self._graph_nx

Calculate and return iGraph representation of neuron.

Once calculated stored as .igraph. Call function again to update iGraph.

Important

Returns None if igraph is not installed!

See Also

navis.neuron2igraph

Source code in navis/core/skeleton.py
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
def get_igraph(self) -> 'igraph.Graph':  # type: ignore
    """Calculate and return iGraph representation of neuron.

    Once calculated stored as `.igraph`. Call function again to update
    iGraph.

    Important
    ---------
    Returns `None` if igraph is not installed!

    See Also
    --------
    [`navis.neuron2igraph`][]

    """
    self._igraph = graph.neuron2igraph(self, raise_not_installed=False)
    return self._igraph

Prune all neurites past a given distance from a source.

PARAMETER DESCRIPTION
x

TYPE: TreeNeuron | NeuronList

depth
        Distance from source at which to start pruning.

TYPE: int | float

source
        Source node for depth calculation. If `None`, will use
        root. If `x` is a list of neurons then must provide a
        source for each neuron.

TYPE: int DEFAULT: None

inplace
        If False, pruning is performed on copy of original neuron
        which is then returned.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

Pruned neuron(s).

See Also

navis.prune_at_depth This is the base function. See for details and examples.

Source code in navis/core/skeleton.py
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
def prune_at_depth(self,
                   depth: Union[float, int],
                   source: Optional[int] = None,
                   inplace: bool = False
                   ) -> Optional['TreeNeuron']:
    """Prune all neurites past a given distance from a source.

    Parameters
    ----------
    x :             TreeNeuron | NeuronList
    depth :         int | float
                    Distance from source at which to start pruning.
    source :        int, optional
                    Source node for depth calculation. If `None`, will use
                    root. If `x` is a list of neurons then must provide a
                    source for each neuron.
    inplace :       bool, optional
                    If False, pruning is performed on copy of original neuron
                    which is then returned.

    Returns
    -------
    TreeNeuron/List
                    Pruned neuron(s).

    See Also
    --------
    [`navis.prune_at_depth`][]
        This is the base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy()

    morpho.prune_at_depth(x, depth=depth, source=source, inplace=True)

    if not inplace:
        return x
    return None

Prune neuron down to the longest neurite.

PARAMETER DESCRIPTION
n
            Number of longest neurites to preserve.

TYPE: int DEFAULT: 1

reroot_soma
            If True, will reroot to soma before pruning.

TYPE: bool DEFAULT: False

inplace
            If True, operation will be performed on itself.
            If False, operation is performed on copy which is
            then returned.

TYPE: bool DEFAULT: False

See Also

navis.longest_neurite This is the base function. See for details and examples.

Source code in navis/core/skeleton.py
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
def prune_by_longest_neurite(self,
                             n: int = 1,
                             reroot_soma: bool = False,
                             inplace: bool = False,
                             ) -> Optional['TreeNeuron']:
    """Prune neuron down to the longest neurite.

    Parameters
    ----------
    n :                 int, optional
                        Number of longest neurites to preserve.
    reroot_soma :       bool, optional
                        If True, will reroot to soma before pruning.
    inplace :           bool, optional
                        If True, operation will be performed on itself.
                        If False, operation is performed on copy which is
                        then returned.

    See Also
    --------
    [`navis.longest_neurite`][]
        This is the base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy()

    graph.longest_neurite(
        x, n, inplace=True, reroot_soma=reroot_soma)

    # Clear temporary attributes
    x._clear_temp_attr()

    if not inplace:
        return x
    return None

Prune neuron based on Strahler order.

Will reroot neuron to soma if possible.

PARAMETER DESCRIPTION
to_prune
    Strahler indices to prune. For example:

    1. `to_prune=1` removes all leaf branches
    2. `to_prune=[1, 2]` removes SI 1 and 2
    3. `to_prune=range(1, 4)` removes SI 1, 2 and 3
    4. `to_prune=slice(1, -1)` removes everything but the
       highest SI
    5. `to_prune=slice(-1, None)` removes only the highest
       SI

TYPE: int | list | range | slice

inplace
    If True, operation will be performed on itself. If False,
    operation is performed on copy which is then returned.

TYPE: bool DEFAULT: False

See Also

navis.prune_by_strahler This is the base function. See for details and examples.

Source code in navis/core/skeleton.py
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
def prune_by_strahler(self,
                      to_prune: Union[int, List[int], slice],
                      inplace: bool = False) -> Optional['TreeNeuron']:
    """Prune neuron based on [Strahler order](https://en.wikipedia.org/wiki/Strahler_number).

    Will reroot neuron to soma if possible.

    Parameters
    ----------
    to_prune :  int | list | range | slice
                Strahler indices to prune. For example:

                1. `to_prune=1` removes all leaf branches
                2. `to_prune=[1, 2]` removes SI 1 and 2
                3. `to_prune=range(1, 4)` removes SI 1, 2 and 3
                4. `to_prune=slice(1, -1)` removes everything but the
                   highest SI
                5. `to_prune=slice(-1, None)` removes only the highest
                   SI

    inplace :   bool, optional
                If True, operation will be performed on itself. If False,
                operation is performed on copy which is then returned.

    See Also
    --------
    [`navis.prune_by_strahler`][]
        This is the base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy()

    morpho.prune_by_strahler(
        x, to_prune=to_prune, reroot_soma=True, inplace=True)

    # No need to call this as morpho.prune_by_strahler does this already
    # self._clear_temp_attr()

    if not inplace:
        return x
    return None

Prune neuron by intersection with given volume(s).

PARAMETER DESCRIPTION
v
            Volume(s) to check for intersection

TYPE: str | navis.Volume | list of either

mode
            If 'IN', parts of the neuron inside the volume are
            kept.

TYPE: 'IN' | 'OUT' DEFAULT: 'IN'

prevent_fragments
            If True, will add nodes to `subset` required to
            keep neuron from fragmenting.

TYPE: bool DEFAULT: False

inplace
            If True, operation will be performed on itself. If
            False, operation is performed on copy which is then
            returned.

TYPE: bool DEFAULT: False

See Also

navis.in_volume Base function. See for details and examples.

Source code in navis/core/skeleton.py
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
def prune_by_volume(self,
                    v: Union[core.Volume,
                             List[core.Volume],
                             Dict[str, core.Volume]],
                    mode: Union[Literal['IN'], Literal['OUT']] = 'IN',
                    prevent_fragments: bool = False,
                    inplace: bool = False
                    ) -> Optional['TreeNeuron']:
    """Prune neuron by intersection with given volume(s).

    Parameters
    ----------
    v :                 str | navis.Volume | list of either
                        Volume(s) to check for intersection
    mode :              'IN' | 'OUT', optional
                        If 'IN', parts of the neuron inside the volume are
                        kept.
    prevent_fragments : bool, optional
                        If True, will add nodes to `subset` required to
                        keep neuron from fragmenting.
    inplace :           bool, optional
                        If True, operation will be performed on itself. If
                        False, operation is performed on copy which is then
                        returned.

    See Also
    --------
    [`navis.in_volume`][]
        Base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy()

    intersection.in_volume(x, v, inplace=True,
                           prevent_fragments=prevent_fragments,
                           mode=mode)

    # Clear temporary attributes
    # x._clear_temp_attr()

    if not inplace:
        return x
    return None

Cut off nodes distal to given nodes.

PARAMETER DESCRIPTION
node
    Provide either node ID(s) or a unique tag(s)

TYPE: node ID | node tag

inplace
    If True, operation will be performed on itself. If False,
    operation is performed on copy which is then returned.

TYPE: bool DEFAULT: False

See Also

navis.cut_skeleton Base function. See for details and examples.

Source code in navis/core/skeleton.py
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
def prune_distal_to(self,
                    node: Union[str, int],
                    inplace: bool = False) -> Optional['TreeNeuron']:
    """Cut off nodes distal to given nodes.

    Parameters
    ----------
    node :      node ID | node tag
                Provide either node ID(s) or a unique tag(s)
    inplace :   bool, optional
                If True, operation will be performed on itself. If False,
                operation is performed on copy which is then returned.

    See Also
    --------
    [`navis.cut_skeleton`][]
        Base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy(deepcopy=False)

    node = utils.make_iterable(node, force_type=None)

    for n in node:
        prox = graph.cut_skeleton(x, n, ret='proximal')[0]
        # Reinitialise with proximal data
        x.__init__(prox)  # type: ignore  # Cannot access "__init__" directly
        # Remove potential "left over" attributes (happens if we use a copy)
        x._clear_temp_attr()

    if not inplace:
        return x
    return None

Remove nodes proximal to given node. Reroots neuron to cut node.

PARAMETER DESCRIPTION
node
    Provide either a node ID or a (unique) tag

TYPE: node_id | node tag

inplace
    If True, operation will be performed on itself. If False,
    operation is performed on copy which is then returned.

TYPE: bool DEFAULT: False

See Also

navis.cut_skeleton Base function. See for details and examples.

Source code in navis/core/skeleton.py
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
def prune_proximal_to(self,
                      node: Union[str, int],
                      inplace: bool = False) -> Optional['TreeNeuron']:
    """Remove nodes proximal to given node. Reroots neuron to cut node.

    Parameters
    ----------
    node :      node_id | node tag
                Provide either a node ID or a (unique) tag
    inplace :   bool, optional
                If True, operation will be performed on itself. If False,
                operation is performed on copy which is then returned.

    See Also
    --------
    [`navis.cut_skeleton`][]
        Base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy(deepcopy=False)

    node = utils.make_iterable(node, force_type=None)

    for n in node:
        dist = graph.cut_skeleton(x, n, ret='distal')[0]
        # Reinitialise with distal data
        x.__init__(dist)  # type: ignore  # Cannot access "__init__" directly
        # Remove potential "left over" attributes (happens if we use a copy)
        x._clear_temp_attr()

    # Clear temporary attributes is done by cut_skeleton
    # x._clear_temp_attr()

    if not inplace:
        return x
    return None

Prune terminal twigs under a given size.

PARAMETER DESCRIPTION
size
        Twigs shorter than this will be pruned.

TYPE: int | float

inplace
        If False, pruning is performed on copy of original neuron
        which is then returned.

TYPE: bool DEFAULT: False

recursive
        If `int` will undergo that many rounds of recursive
        pruning. Use `float("inf")` to prune until no more
        twigs under the given size are left.

TYPE: int | bool | "inf" DEFAULT: False

See Also

navis.prune_twigs This is the base function. See for details and examples.

Source code in navis/core/skeleton.py
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
def prune_twigs(self,
                size: float,
                inplace: bool = False,
                recursive: Union[int, bool, float] = False
                ) -> Optional['TreeNeuron']:
    """Prune terminal twigs under a given size.

    Parameters
    ----------
    size :          int | float
                    Twigs shorter than this will be pruned.
    inplace :       bool, optional
                    If False, pruning is performed on copy of original neuron
                    which is then returned.
    recursive :     int | bool | "inf", optional
                    If `int` will undergo that many rounds of recursive
                    pruning. Use `float("inf")` to prune until no more
                    twigs under the given size are left.

    See Also
    --------
    [`navis.prune_twigs`][]
        This is the base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy()

    morpho.prune_twigs(x, size=size, inplace=True)

    if not inplace:
        return x
    return None

Reload neuron. Must have filepath as .origin as attribute.

RETURNS DESCRIPTION
TreeNeuron

If inplace=False.

Source code in navis/core/skeleton.py
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
def reload(self,
           inplace: bool = False,
           ) -> Optional['TreeNeuron']:
    """Reload neuron. Must have filepath as `.origin` as attribute.

    Returns
    -------
    TreeNeuron
            If `inplace=False`.

    """
    if not hasattr(self, 'origin'):
        raise AttributeError('To reload TreeNeuron must have `.origin` '
                             'attribute')

    if self.origin in ('DataFrame', 'string'):
        raise ValueError('Unable to reload TreeNeuron: it appears to have '
                         'been created from string or DataFrame.')

    kwargs = {}
    if hasattr(self, 'soma_label'):
        kwargs['soma_label'] = self.soma_label
    if hasattr(self, 'connector_labels'):
        kwargs['connector_labels'] = self.connector_labels

    x = io.read_swc(self.origin, **kwargs)

    if inplace:
        self.__dict__.update(x.__dict__)
        self._clear_temp_attr()
    else:
        # This makes sure that we keep any additional data stored after
        # this neuron has been loaded
        x2 = self.copy()
        x2.__dict__.update(x.__dict__)
        x2._clear_temp_attr()
        return x

Reroot neuron to given node ID or node tag.

PARAMETER DESCRIPTION
new_root
    Either node ID or node tag.

TYPE: int | str

inplace
    If True, operation will be performed on itself. If False,
    operation is performed on copy which is then returned.

TYPE: bool DEFAULT: False

See Also

navis.reroot_skeleton Base function. See for details and examples.

Source code in navis/core/skeleton.py
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
def reroot(self,
           new_root: Union[int, str],
           inplace: bool = False) -> Optional['TreeNeuron']:
    """Reroot neuron to given node ID or node tag.

    Parameters
    ----------
    new_root :  int | str
                Either node ID or node tag.
    inplace :   bool, optional
                If True, operation will be performed on itself. If False,
                operation is performed on copy which is then returned.

    See Also
    --------
    [`navis.reroot_skeleton`][]
        Base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy(deepcopy=False)

    graph.reroot_skeleton(x, new_root, inplace=True)

    # Clear temporary attributes is done by morpho.reroot_skeleton()
    # x._clear_temp_attr()

    if not inplace:
        return x
    return None
resample
resample

Resample neuron to given resolution.

PARAMETER DESCRIPTION
resample_to
                Resolution to which to resample the neuron.

TYPE: int

inplace
                If True, operation will be performed on
                itself. If False, operation is performed on
                copy which is then returned.

TYPE: bool DEFAULT: False

See Also

navis.resample_skeleton Base function. See for details and examples.

Source code in navis/core/skeleton.py
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
def resample(self, resample_to, inplace=False):
    """Resample neuron to given resolution.

    Parameters
    ----------
    resample_to :           int
                            Resolution to which to resample the neuron.
    inplace :               bool, optional
                            If True, operation will be performed on
                            itself. If False, operation is performed on
                            copy which is then returned.

    See Also
    --------
    [`navis.resample_skeleton`][]
        Base function. See for details and examples.

    """
    if inplace:
        x = self
    else:
        x = self.copy(deepcopy=False)

    sampling.resample_skeleton(x, resample_to, inplace=True)

    # No need to call this as base function does this for us
    # x._clear_temp_attr()

    if not inplace:
        return x
    return None

Snap xyz location(s) to closest node or synapse.

PARAMETER DESCRIPTION
locs
    Either single or multiple XYZ locations.

TYPE: (N, 3) array | (3, ) array

to
    Whether to snap to nodes or connectors.

TYPE: "nodes" | "connectors" DEFAULT: 'nodes'

RETURNS DESCRIPTION
id

ID(s) of the closest node/connector.

TYPE: int | list of int

dist

Distance(s) to the closest node/connector.

TYPE: float | list of float

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> id, dist = n.snap([0, 0, 0])
>>> id
1124
Source code in navis/core/skeleton.py
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
def snap(self, locs, to='nodes'):
    """Snap xyz location(s) to closest node or synapse.

    Parameters
    ----------
    locs :      (N, 3) array | (3, ) array
                Either single or multiple XYZ locations.
    to :        "nodes" | "connectors"
                Whether to snap to nodes or connectors.

    Returns
    -------
    id :        int | list of int
                ID(s) of the closest node/connector.
    dist :      float | list of float
                Distance(s) to the closest node/connector.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> id, dist = n.snap([0, 0, 0])
    >>> id
    1124

    """
    locs = np.asarray(locs).astype(np.float64)

    is_single = (locs.ndim == 1 and len(locs) == 3)
    is_multi = (locs.ndim == 2 and locs.shape[1] == 3)
    if not is_single and not is_multi:
        raise ValueError('Expected a single (x, y, z) location or a '
                         '(N, 3) array of multiple locations')

    if to not in ['nodes', 'connectors']:
        raise ValueError('`to` must be "nodes" or "connectors", '
                         f'got {to}')

    # Generate tree
    tree = graph.neuron2KDTree(self, data=to)

    # Find the closest node
    dist, ix = tree.query(locs)

    if to == 'nodes':
        id = self.nodes.node_id.values[ix]
    else:
        id = self.connectors.connector_id.values[ix]

    return id, dist

Generate SWC file from this neuron.

PARAMETER DESCRIPTION
filename
        If `None`, will use "neuron_{id}.swc".

TYPE: str | None DEFAULT: None

kwargs
        Additional arguments passed to [`navis.write_swc`][].

DEFAULT: {}

RETURNS DESCRIPTION
Nothing
See Also

navis.write_swc See this function for further details.

Source code in navis/core/skeleton.py
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
def to_swc(self,
           filename: Optional[str] = None,
           **kwargs) -> None:
    """Generate SWC file from this neuron.

    Parameters
    ----------
    filename :      str | None, optional
                    If `None`, will use "neuron_{id}.swc".
    kwargs
                    Additional arguments passed to [`navis.write_swc`][].

    Returns
    -------
    Nothing

    See Also
    --------
    [`navis.write_swc`][]
            See this function for further details.

    """
    return io.write_swc(self, filename, **kwargs)  # type: ignore  # double import of "io"

Vispy 3D viewer.

PARAMETER DESCRIPTION
picking
    If `True`, allow selecting neurons by shift-clicking on
    neurons and placing a 3D cursor via control-click (for OSX:
    command-click).

TYPE: bool DEFAULT: = False

**kwargs
  Keyword arguments passed to `vispy.scene.SceneCanvas`.

DEFAULT: {}

ATTRIBUTE DESCRIPTION
picking

Set to True to allow picking via shift-clicking.

TYPE: (bool,)

selected

List of currently selected neurons. Can also be used to set the selection.

TYPE: np.array

show_legend

Set to True or press L to show legend. This may impact performance.

TYPE: bool

legend_font_size

Font size for legend.

TYPE: int

Examples:

This viewer is what navis.plot3d uses when backend='vispy'. Instead of navis.plot3d we can interact with the viewer directly:

>>> # Open a 3D viewer
>>> import navis
>>> v = navis.Viewer()
>>> # Close the 3D viewer
>>> v.close()

You can change the background color from the start or on-the-go:

>>> # Set background to green
>>> v = navis.Viewer(bgcolor='green')
>>> # Set background back to white
>>> v.set_bgcolor((1, 1, 1))
>>> # Alternative to v.close():
>>> navis.close3d()
Source code in navis/plotting/vispy/viewer.py
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
class Viewer:
    """Vispy 3D viewer.

    Parameters
    ----------
    picking :   bool, default = False
                If `True`, allow selecting neurons by shift-clicking on
                neurons and placing a 3D cursor via control-click (for OSX:
                command-click).
    **kwargs
              Keyword arguments passed to `vispy.scene.SceneCanvas`.

    Attributes
    ----------
    picking :       bool,
                    Set to `True` to allow picking via shift-clicking.
    selected :      np.array
                    List of currently selected neurons. Can also be used to
                    set the selection.
    show_legend :   bool
                    Set to `True` or press `L` to show legend. This may
                    impact performance.
    legend_font_size : int
                    Font size for legend.

    Examples
    --------
    This viewer is what [`navis.plot3d`][] uses when `backend='vispy'`.
    Instead of [`navis.plot3d`][] we can interact with the viewer directly:

    >>> # Open a 3D viewer
    >>> import navis
    >>> v = navis.Viewer()
    >>> # Close the 3D viewer
    >>> v.close()

    You can change the background color from the start or on-the-go:

    >>> # Set background to green
    >>> v = navis.Viewer(bgcolor='green')
    >>> # Set background back to white
    >>> v.set_bgcolor((1, 1, 1))
    >>> # Alternative to v.close():
    >>> navis.close3d()

    """

    def __init__(self, picking=False, **kwargs):
        if not scene:
            raise ModuleNotFoundError(
                '`navis.Viewer` requires the `vispy` package to '
                'be installed:\n  pip3 install vispy'
                )
        # Update some defaults as necessary
        defaults = dict(keys=None,
                        show=True,
                        title='Vispy Viewer',
                        bgcolor='black')
        defaults.update(kwargs)

        # If we're runningg in headless mode (primarily for tests on CI) we will
        # simply not initialize the vispy objects. Not ideal but it turns
        # out to be very annoying to correctly setup on Github Actions.
        if getattr(config, 'headless', False):
            return

        # Set border rim -> this depends on how the framework (e.g. QT5)
        # renders the window
        self._rim_bot = 15
        self._rim_top = 20
        self._rim_left = 10
        self._rim_right = 10

        # Generate canvas
        self.canvas = scene.SceneCanvas(**defaults)

        """
        from PyQt5.QtWidgets import QPushButton

        # Create canvas
        button = QPushButton('PyQt5 button', self.canvas.native)
        button.move(10, 10)
        self.canvas.show()
        """

        # Add and setup 3d view
        self.view3d = self.canvas.central_widget.add_view()
        self.camera3d = scene.ArcballCamera()
        self.view3d.camera = self.camera3d

        # Add permanent overlays
        self.overlay = self._draw_overlay()

        self.canvas.unfreeze()
        self.canvas._overlay = self.overlay
        self.canvas._view3d = self.view3d
        self.canvas._wrapper = self
        self.canvas.freeze()

        # Add picking functionality
        if picking:
            self.picking = True
        else:
            self.picking = False

        # Set cursor_pos to None
        self.cursor_pos = None

        # Add keyboard shortcuts
        self.canvas.connect(on_key_press)

        # Add resize control to keep overlay in position
        self.canvas.connect(on_resize)

        # Legend settings
        self.__show_legend = False
        self.__selected = np.array([], dtype='object')
        self._cycle_index = -1
        self.__legend_font_size = 7

        # Color to use when selecting neurons
        self.highlight_color = (1, .9, .6)

        # Keep track of initial camera position
        self._camera_default = self.view3d.camera.get_state()

        # Cycle mode can be 'hide' or 'alpha'
        self._cycle_mode = 'alpha'

        # Cursors
        self._cursor = None
        self._picking_radius = 20

        # Other stuff
        self._show_bounds = False
        self._show_axes = False

    def _draw_overlay(self):
        overlay = scene.widgets.ViewBox(parent=self.canvas.scene)
        self.view3d.add_widget(overlay)

        """
        # Legend title
        t = scene.visuals.Text('Legend', pos=(10,10),
                                  anchor_x='left', name='permanent',
                                  parent=overlay,
                                  color=(0,0,0), font_size=9)
        """

        # Text color depends on background color
        v = self.canvas.bgcolor.hsv[2]
        text_color = colorsys.hsv_to_rgb(0, 0, 1 - v)

        # Keyboard shortcuts
        self._key_shortcuts = {'O': 'toggle overlay',
                               'L': 'toggle legend',
                               'P': 'toggle picking',
                               'Q/W': 'cycle neurons',
                               'U': 'unhide all',
                               'B': 'bounding box',
                               'F': 'show/hide FPS',
                               '1': 'XY',
                               '2': 'XZ',
                               '3': 'YZ'}

        shorts_text = 'SHORTCUTS: ' + ' | '.join([f"<{k}> {v}" for k, v in self._key_shortcuts.items()])
        self._shortcuts = scene.visuals.Text(shorts_text,
                                             pos=(self._rim_left,
                                                  overlay.size[1] - self._rim_bot),
                                             anchor_x='left',
                                             anchor_y='bottom',
                                             name='permanent',
                                             method='gpu',
                                             parent=overlay,
                                             color=text_color,
                                             font_size=6)

        # FPS (hidden at start)
        self._fps_text = scene.visuals.Text('FPS',
                                            pos=(overlay.size[0] / 2,
                                                 self._rim_top),
                                            anchor_x='center',
                                            anchor_y='top',
                                            name='permanent',
                                            method='gpu',
                                            parent=overlay,
                                            color=(0, 0, 0), font_size=6)
        self._fps_text.visible = False

        # Picking shortcuts (hidden at start)
        self._picking_shortcuts = {'LMB @legend': 'show/hide neuron',
                                   'SHIFT+LMB @neuron': 'select neuron',
                                   'D': 'deselect all',
                                   'H': 'hide selected',
                                   'C': 'url to cursor'}
        # Add platform-specific modifiers
        if platform.system() == 'darwin':
            self._picking_shortcuts['CMD+LMB'] = 'set cursor'
        else:
            self._picking_shortcuts['CTRL+LMB'] = 'set cursor'

        shorts_text = 'PICKING: ' + ' | '.join(['<{k}> {v}' for k, v in self._picking_shortcuts.items()])
        self._picking_text = scene.visuals.Text(shorts_text,
                                                pos=(self._rim_left,
                                                     overlay.size[1] - self._rim_bot - 10),
                                                anchor_x='left',
                                                anchor_y='bottom',
                                                name='permanent',
                                                method='gpu',
                                                parent=overlay,
                                                color=text_color,
                                                font_size=6)
        self._picking_text.visible = False

        # Text box in top right to display arbitrary data
        self._data_text = scene.visuals.Text('',
                                             pos=(overlay.size[0] - self._rim_right,
                                                  self._rim_top),
                                             anchor_x='right',
                                             anchor_y='top',
                                             name='permanent',
                                             method='gpu',
                                             parent=overlay,
                                             color=text_color,
                                             font_size=6)

        return overlay

    @property
    def size(self):
        """Size of canvas."""
        return self.canvas.size

    @size.setter
    def size(self, size):
        self.canvas.size = size

    @property
    def show_legend(self):
        """Set to `True` to hide neuron legend."""
        return self.__show_legend

    @show_legend.setter
    def show_legend(self, v):
        if not isinstance(v, bool):
            raise TypeError(f'Need boolean, got "{type(v)}"')

        if v != self.show_legend:
            self.__show_legend = v
            # Make sure changes take effect
            self.update_legend()

    @property
    def legend_font_size(self):
        """Change legend's font size."""
        return self.__legend_font_size

    @legend_font_size.setter
    def legend_font_size(self, val):
        self.__legend_font_size = val
        if self.show_legend:
            self.update_legend()

    @property
    def picking(self):
        """Set to `True` to allow picking."""
        return self.__picking

    def toggle_picking(self):
        """Toggle picking and overlay text."""
        if self.picking:
            self.picking = False
            self._picking_text.visible = False
        else:
            self.picking = True
            self._picking_text.visible = True

    @picking.setter
    def picking(self, v):
        if not isinstance(v, bool):
            raise TypeError(f'Need bool, got {type(v)}')

        self.__picking = v

        if self.picking:
            self.canvas.connect(on_mouse_press)
        else:
            self.canvas.events.mouse_press.disconnect(on_mouse_press)

    def _render_fb(self, crop=None):
        """Render framebuffer."""
        if not crop:
            crop = (0, 0,
                    self.canvas.size[0] * self.canvas.pixel_scale,
                    self.canvas.size[1] * self.canvas.pixel_scale)

        # We have to temporarily deactivate the overlay and view3d
        # otherwise we won't be able to see what's on the 3D or might
        # see holes in the framebuffer
        self.view3d.interactive = False
        self.overlay.interactive = False
        p = self.canvas._render_picking(crop=crop)
        self.view3d.interactive = True
        self.overlay.interactive = True
        return p

    @property
    def visible(self):
        """List IDs of currently visible neurons."""
        neurons = self.neurons  # grab this only once to speed things up
        return [s for s in neurons if neurons[s][0].visible]

    @property
    def invisible(self):
        """List IDs of currently visible neurons."""
        neurons = self.neurons  # grab this only once to speed things up
        return [s for s in neurons if not neurons[s][0].visible]

    @property
    def pinned(self):
        """List IDs of currently pinned neurons."""
        neurons = self.neurons  # grab this only once to speed things up
        return [s for s in neurons if getattr(neurons[s][0], 'pinned', False)]

    @property
    def selected(self):
        """Return IDs of or set selected neurons."""
        return self.__selected

    @selected.setter
    def selected(self, val):
        n = np.asarray(val).astype('object')

        neurons = self.neurons  # grab once to speed things up
        logger.debug(f'{len(n)} neurons selected ({len(self.selected)} previously)')
        # First un-highlight neurons no more selected
        for s in [s for s in self.__selected if s not in set(n)]:
            for v in neurons[s]:
                if isinstance(v, scene.visuals.Mesh):
                    v.color = v._stored_color
                else:
                    v.set_data(color=v._stored_color)

        # Highlight new additions
        for s in n:
            if s not in self.__selected:
                for v in neurons[s]:
                    # Keep track of old colour
                    v.unfreeze()
                    v._stored_color = v.color
                    v.freeze()
                    if isinstance(v, scene.visuals.Mesh):
                        v.color = self.highlight_color
                    else:
                        v.set_data(color=self.highlight_color)

        self.__selected = n

        # Update legend
        if self.show_legend:
            self.update_legend()

        # Update data text
        # Currently only the development version of vispy supports escape
        # character (e.g. \n)
        t = '| '.join([f'{neurons[s][0]._name} - #{s}' for s in self.__selected])
        self._data_text.text = t

    @property
    def visuals(self):
        """List of all 3D visuals on this canvas."""
        return [v for v in self.view3d.children[0].children if isinstance(v, scene.visuals.VisualNode)]

    @property
    def bounds(self):
        """Bounds of all currently visuals (visible and invisible)."""
        bounds = []
        for vis in self.visuals:
            # Skip the bounding box itself
            if getattr(vis, '_object_type', '') == 'boundingbox':
                continue

            try:
                bounds.append(vis._bounds)
            except BaseException:
                pass

        if not bounds:
            return None

        bounds = np.dstack(bounds)

        mn = bounds[:, 0, :].min(axis=1)
        mx = bounds[:, 1, :].max(axis=1)

        return np.vstack((mn, mx)).T

    @property
    def _object_ids(self):
        """All object IDs on this canvas in order of addition."""
        obj_ids = [getattr(v, '_object_id') for v in self.visuals]
        return sorted(set(obj_ids), key=lambda x: obj_ids.index(x))

    @property
    def objects(self):
        """Ordered dictionary {uuid->[visuals]} of all objects in order of addition."""
        objects = OrderedDict()
        for ob in self._object_ids:
            objects[ob] = [v for v in self.visuals if getattr(v, '_object_id') == ob]

        return objects

    @property
    def neurons(self):
        """Return visible and invisible neuron visuals currently on the canvas.

        Returns
        -------
        OrderedDict
                    `{id: [neurites, soma]}`

        """
        # Collect neuron objects (neurites + somata)
        visuals = self.visuals  # Get this only once to speed things up
        neuron_obj = [c for c in visuals if 'neuron' in getattr(c,
                                                                '_object_type',
                                                                '')]

        # Collect IDs
        neuron_ids = set([ob._id for ob in neuron_obj])

        # Collect somata and neurites by ID
        coll = OrderedDict()
        for ob in neuron_ids:
            coll[ob] = [v for v in visuals if getattr(v, '_id') == ob]
        return coll

    @property
    def _neuron_obj(self):
        """Return neurons by their object id."""
        # Collect neuron objects
        neuron_obj = [c for c in self.visuals if 'neuron' in getattr(
            c, '_object_type', '')]

        # Collect skeleton IDs
        obj_ids = set([ob._object_id for ob in neuron_obj])

        # Map visuals to unique skids
        return {s: [ob for ob in neuron_obj if ob._object_id == s] for s in obj_ids}

    def clear_legend(self):
        """Clear legend."""
        # Clear legend except for title
        for l in [l for l in self.overlay.children if isinstance(l, scene.visuals.Text) and l.name != 'permanent']:
            l.parent = None

    def clear(self):
        """Clear canvas."""
        # Skip if running in headless mode
        if getattr(config, 'headless', False):
            return

        for v in self.visuals:
            v.parent = None

        # `remove_bounds` set this to False but
        # here we want the current setting to persist
        show_bounds = self.show_bounds

        self.remove_bounds()
        self.clear_legend()

        self.show_bounds = show_bounds

    def remove(self, to_remove):
        """Remove given neurons/visuals from canvas."""
        to_remove = utils.make_iterable(to_remove)

        neurons = self.neurons  # grab this only once to speed things up
        for vis in to_remove:
            if isinstance(vis, scene.visuals.VisualNode):
                vis.parent = None
            else:
                uuids = utils.eval_id(to_remove)
                for u in uuids:
                    for v in neurons.get(u, []):
                        v.parent = None

        if self.show_bounds:
            self.update_bounds()

    def pop(self, N=1):
        """Remove the most recently added N visuals."""
        for vis in list(self.objects.values())[-N:]:
            self.remove(vis)

    @property
    def show_bounds(self):
        """Set to `True` to show bounding box."""
        return self._show_bounds

    def toggle_bounds(self):
        """Toggle bounding box."""
        self.show_bounds = not self.show_bounds

    @show_bounds.setter
    def show_bounds(self, v):
        if not isinstance(v, bool):
            raise TypeError(f'Need bool, got {type(v)}')

        self._show_bounds = v

        if self.show_bounds:
            self.update_bounds()
        else:
            self.remove_bounds()

    def remove_bounds(self):
        """Remove bounding box visual."""
        self._show_bounds = False
        for v in self.visuals:
            if getattr(v, '_object_type', '') == 'boundingbox':
                self.remove(v)

    @block_canvas
    def update_bounds(self, color='w', width=1):
        """Update bounding box visual."""
        # Remove any existing visual
        self.remove_bounds()

        bounds = self.bounds
        self._show_bounds = True

        # Skip if no visual on canvas
        if isinstance(bounds, type(None)):
            return

        # Create box visual
        dims = bounds[:, 1] - bounds[:, 0]
        center = bounds.mean(axis=1)
        box = tm.primitives.Box(extents=dims).apply_scale(1.1)

        # Recenter vertices
        vertices = np.array(box.vertices) + center
        connect = np.array([[0, 1], [0, 2], [0, 4],
                            [1, 3], [1, 5],
                            [2, 3], [2, 6],
                            [3, 7],
                            [4, 5], [4, 6],
                            [5, 7],
                            [6, 7]])

        box = scene.visuals.Line(pos=vertices,
                                 color=mcl.to_rgb(color),
                                 # Can only be used with method 'agg'
                                 width=width,
                                 connect=connect,
                                 antialias=True,
                                 name='BoundingBox',
                                 method='gl')

        # Add custom attributes
        box.unfreeze()
        box._object_type = 'boundingbox'
        box._object_id = uuid.uuid4()
        box.freeze()

        self.view3d.add(box)

    @block_canvas
    def update_legend(self):
        """Update legend."""
        # Get existing labels
        labels = {l._object_id: l for l in self.overlay.children if getattr(l, '_object_id', None)}

        # If legend is not meant to be shown, make sure everything is hidden and return
        if not self.show_legend:
            for v in labels.values():
                if v.visible:
                    v.visible = False
            return
        else:
            for v in labels.values():
                if not v.visible:
                    v.visible = True

        # Labels to be removed
        neuron_obj = self._neuron_obj  # grab only once to speed things up
        to_remove = [s for s in labels if s not in neuron_obj]
        for s in to_remove:
            labels[s].parent = None

        # Generate new labels
        to_add = [s for s in neuron_obj if s not in labels]
        for s in to_add:
            # Fallback is name or in lieu of that the object's type
            lbl = getattr(neuron_obj[s][0], '_name',
                          str(type(neuron_obj[s][0])))
            # See if we find a "label" property
            if hasattr(neuron_obj[s][0], '_object'):
                if hasattr(neuron_obj[s][0]._object, 'label'):
                    lbl = neuron_obj[s][0]._object.label

            txt = scene.visuals.Text(lbl,
                                     anchor_x='left',
                                     anchor_y='top',
                                     parent=self.overlay,
                                     method='gpu',
                                     font_size=self.legend_font_size)
            txt.interactive = True
            txt.unfreeze()
            txt._object_id = s
            txt._id = neuron_obj[s][0]._id
            txt.freeze()

        # Position and color labels
        labels = {l._object_id: l for l in self.overlay.children if getattr(
            l, '_object_id', None)}
        for i, s in enumerate(sorted(neuron_obj)):
            if neuron_obj[s][0].visible:
                color = neuron_obj[s][0].color
            else:
                color = (.3, .3, .3)

            offset = 10 * (self.legend_font_size / 7)

            labels[s].pos = (10, offset * (i + 1))
            labels[s].color = color
            labels[s].font_size = self.legend_font_size

    def toggle_overlay(self):
        """Toggle legend on and off."""
        self.overlay.visible = self.overlay.visible is False

    def center_camera(self):
        """Center camera on visuals."""
        visuals = self.visuals  # Get this only once to speed things up
        if not visuals:
            return

        xbounds = np.array([v.bounds(0) for v in visuals]).flatten()
        ybounds = np.array([v.bounds(1) for v in visuals]).flatten()
        zbounds = np.array([v.bounds(2) for v in visuals]).flatten()

        self.camera3d.set_range((xbounds.min(), xbounds.max()),
                                (ybounds.min(), ybounds.max()),
                                (zbounds.min(), zbounds.max()))

    def add(self, x, center=True, clear=False, combine=False, **kwargs):
        """Add objects to canvas.

        Parameters
        ----------
        x :         Neuron/List | Dotprops | Volumes | Points | vispy Visuals
                    Object(s) to add to the canvas.
        center :    bool, optional
                    If True, re-center camera to all objects on canvas.
        clear :     bool, optional
                    If True, clear canvas before adding new objects.
        combine :   bool, optional
                    If True, will try combining similar objects into a single
                    visual. This reduces the number of shader programs and
                    can greatly increase the frame rate. Downside: objects can
                    no longer be individually manipulated.
        **kwargs
                    Keyword arguments passed when generating visuals. See
                    [`navis.plot3d`][] for options.

        Returns
        -------
        None

        """
        from .visuals import neuron2vispy, volume2vispy, points2vispy, combine_visuals
        from ..settings import VispySettings

        settings = VispySettings().update_settings(**kwargs)

        (neurons, volumes, points, visuals) = utils.parse_objects(x)

        if neurons:
            visuals += neuron2vispy(neurons, settings)
        if volumes:
            visuals += volume2vispy(volumes, settings)
        if points:
            visuals += points2vispy(points, **settings.scatter_kws)

        if not visuals:
            raise ValueError('No visuals created.')

        if clear:
            self.clear()

        if combine:
            visuals = combine_visuals(visuals, settings.name)

        # If we're runningg in headless mode (primarily for tests on CI) we will
        # simply not add the objects. Not ideal but it turns out to be very
        # annoying to correctly setup on Github Actions.
        if getattr(config, 'headless', False):
            return

        for v in visuals:
            # Give visuals an _object_id if they don't already have one
            if not hasattr(v, '_object_id'):
                v.unfreeze()
                v._object_id = uuid.uuid4()
                v.freeze()
            self.view3d.add(v)

        if center:
            self.center_camera()

        if self.show_legend:
            self.update_legend()

        if self.show_bounds:
            self.update_bounds()

    def show(self):
        """Show viewer."""
        # This is for e.g. headless testing
        if getattr(config, 'headless', False):
            logger.info("Viewer widget not shown - navis running in headless mode. ")
            return

        self.canvas.show()

        # To actually show the widget, we need to return the canvas
        if utils.is_jupyter():
            from IPython.display import display
            display(self.canvas)

    def close(self):
        """Close viewer."""
        # Skip if this is headless mode
        if getattr(config, 'headless', False):
            return

        # Clear first to free all visuals
        self.clear()
        if self == getattr(config, 'primary_viewer', None):
            del config.primary_viewer
        self.canvas.close()

    def hide_neurons(self, n):
        """Hide given neuron(s)."""
        ids = utils.eval_id(n)

        neurons = self.neurons   # grab once to speed things up
        for s in ids:
            for v in neurons[s]:
                if getattr(v, 'pinned', False):
                    continue
                if v.visible:
                    v.visible = False

        self.update_legend()

    def hide_selected(self):
        """Hide currently selected neuron(s)."""
        self.hide_neurons(self.selected)

    def unhide_neurons(self, n=None, check_alpha=False):
        """Unhide given neuron(s).

        Use `n` to unhide specific neurons.

        """
        neurons = self.neurons  # grab once to speed things up
        if not isinstance(n, type(None)):
            ids = utils.eval_id(n)
        else:
            ids = list(neurons.keys())

        for s in ids:
            for v in neurons[s]:
                if getattr(v, 'pinned', False):
                    continue
                if not v.visible:
                    v.visible = True
            if check_alpha:
                # Make sure color has an alpha channel
                c = to_rgba(neurons[s][0].color)
                # Make sure alpha is 1
                if c.ndim == 1 and c[3] != 1:
                    c[3] = 1
                    self.set_colors({s: c})
                elif c.ndim == 2 and np.any(c[:, 3] != 1):
                    c[:, 3] = 1
                    self.set_colors({s: c})

        self.update_legend()

    def pin_neurons(self, n):
        """Pin given neuron(s).

        Changes to the color or visibility of pinned neurons are silently
        ignored. You can use this to keep specific neurons visible while
        cycling through the rest - useful for comparisons.

        """
        ids = utils.eval_id(n)

        neurons = self.neurons  # grab only once to speed things up

        for s in ids:
            for v in neurons[s]:
                v.unfreeze()
                v.pinned = True
                v.freeze()

    def unpin_neurons(self, n=None):
        """Unpin given neuron(s).

        Use `n` to unhide specific neurons.

        """
        neurons = self.neurons  # grab once to speed things up
        if not isinstance(n, type(None)):
            ids = utils.eval_id(n)
        else:
            ids = list(neurons.keys())

        for s in ids:
            for v in neurons[s]:
                v.unfreeze()
                v.pinned = False
                v.freeze()

    def toggle_neurons(self, n):
        """Toggle neuron(s) visibility."""
        n = utils.make_iterable(n)

        if False not in [isinstance(u, uuid.UUID) for u in n]:
            obj = self._neuron_obj
        else:
            n = utils.eval_id(n)
            obj = self.neurons

        for s in n:
            for v in obj[s]:
                v.visible = v.visible is False

        self.update_legend()

    def toggle_select(self, n):
        """Toggle selected of given neuron."""
        skids = utils.eval_id(n)

        neurons = self.neurons  # grab once to speed things up

        for s in skids:
            if self.selected != s:
                self.selected = s
                for v in neurons[s]:
                    self._selected_color = v.color
                    v.set_data(color=self.highlight_color)
            else:
                self.selected = None
                for v in neurons[s]:
                    v.set_data(color=self._selected_color)

        self.update_legend()

    @block_all
    def set_colors(self, c, include_connectors=False):
        """Set neuron color.

        Parameters
        ----------
        c :      tuple | dict
                 RGB color(s) to apply. Values must be 0-1. Accepted:
                   1. Tuple of single color. Applied to all visible neurons.
                   2. Dictionary mapping skeleton IDs to colors.

        """
        neurons = self.neurons  # grab once to speed things up
        if isinstance(c, (tuple, list, np.ndarray, str)):
            cmap = {s: c for s in neurons}
        elif isinstance(c, dict):
            cmap = c
        else:
            raise TypeError(f'Unable to use colors of type "{type(c)}"')

        for n in neurons:
            if n in cmap:
                for v in neurons[n]:
                    if getattr(v, 'pinned', False):
                        continue
                    if v._neuron_part == 'connectors' and not include_connectors:
                        continue
                    new_c = mcl.to_rgba(cmap[n])
                    if isinstance(v, scene.visuals.Mesh):
                        v.color = new_c
                    else:
                        v.set_data(color=mcl.to_rgba(cmap[n]))

        if self.show_legend:
            self.update_legend()

    @block_all
    def set_alpha(self, a, include_connectors=True):
        """Set neuron color alphas.

        Parameters
        ----------
        a :      tuple | dict
                 Alpha value(s) to apply. Values must be 0-1. Accepted:
                   1. Tuple of single alpha. Applied to all visible neurons.
                   2. Dictionary mapping skeleton IDs to alpha.

        """
        neurons = self.neurons  # grab once to speed things up
        if isinstance(a, (tuple, list, np.ndarray, str)):
            amap = {s: a for s in neurons}
        elif isinstance(a, dict):
            amap = a
        else:
            raise TypeError(f'Unable to use colors of type "{type(a)}"')

        for n in neurons:
            if n in amap:
                for v in neurons[n]:
                    if getattr(v, 'pinned', False):
                        continue
                    if v._neuron_part == 'connectors' and not include_connectors:
                        continue
                    try:
                        this_c = v.color.rgba
                    except BaseException:
                        this_c = v.color

                    this_c = np.asarray(this_c)

                    # For arrays of colors
                    if this_c.ndim == 2:
                        # If no alpha channel yet, add one
                        if this_c.shape[1] == 3:
                            this_c = np.insert(this_c,
                                               3,
                                               np.ones(this_c.shape[0]),
                                               axis=1)

                        # If already the correct alpha value
                        if np.all(this_c[:, 3] == amap[n]):
                            continue
                        else:
                            this_c[:, 3] = amap[n]
                    else:
                        if len(this_c) == 4 and this_c[3] == amap[n]:
                            continue
                        else:
                            this_c = tuple([this_c[0], this_c[1], this_c[2], amap[n]])

                    if isinstance(v, scene.visuals.Mesh):
                        v.color = this_c
                    else:
                        v.set_data(color=this_c)

        if self.show_legend:
            self.update_legend()

    def colorize(self, palette='hls', include_connectors=False):
        """Colorize neurons using a seaborn color palette."""
        neurons = self.neurons  # grab once to speed things up
        colors = sns.color_palette(palette, len(neurons))
        cmap = {s: colors[i] for i, s in enumerate(neurons)}

        self.set_colors(cmap, include_connectors=include_connectors)

    def set_bgcolor(self, c):
        """Set background color."""
        if getattr(config, 'headless', False):
            return
        self.canvas.bgcolor = c

    def _cycle_neurons(self, increment):
        """Cycle through neurons."""
        self._cycle_index += increment

        # If mode is 'hide' cycle over all neurons
        neurons = self.neurons  # grab once to speed things up
        if self._cycle_mode == 'hide':
            to_cycle = neurons
        # If mode is 'alpha' ignore all hidden neurons
        elif self._cycle_mode == 'alpha':
            # Make sure to keep the order
            to_cycle = OrderedDict()
            for s in self.visible:
                to_cycle[s] = neurons[s]
        else:
            raise ValueError(f'Unknown cycle mode "{self._cycle_mode}".')

        if self._cycle_index < 0:
            self._cycle_index = len(to_cycle) - 1
        elif self._cycle_index > len(to_cycle) - 1:
            self._cycle_index = 0

        to_hide = [n for i, n in enumerate(to_cycle) if i != self._cycle_index]
        to_show = [list(to_cycle.keys())[self._cycle_index]]

        # Depending on background color, we have to use different alphas
        v = self.canvas.bgcolor.hsv[2]
        out_alpha = .05 + .2 * v

        if self._cycle_mode == 'hide':
            self.hide_neurons(to_hide)
            self.unhide_neurons(to_show)
        elif self._cycle_mode == 'alpha':
            # Get current colors
            new_amap = {}
            for n in to_cycle:
                this_c = np.asarray(to_cycle[n][0].color)

                if this_c.ndim == 2:
                    if this_c.shape[1] == 4:
                        this_a = this_c[0, 3]
                    else:
                        this_a = 1
                else:
                    if this_c.shape[0] == 4:
                        this_a = this_c[3]
                    else:
                        this_a = 1

                # If neuron needs to be hidden, add to cmap
                if n in to_hide and this_a != out_alpha:
                    new_amap[n] = out_alpha
                elif n in to_show and this_a != 1:
                    new_amap[n] = 1
            self.set_alpha(new_amap)
        else:
            raise ValueError(f'Unknown cycle mode: "{self._cycle_mode}". Use '
                             '"hide" or "alpha"!')

        self.active_neuron = to_show

        # Generate names
        names = []
        for u in to_show:
            n = getattr(neurons[u][0], "name", "NA")
            if not isinstance(u, uuid.UUID):
                n += f' ({u})'
            names.append(n)

        self._data_text.text = f'{"|".join(names)}' \
                               f' [{self._cycle_index + 1}/{len(neurons)}]'

    def _draw_fps(self, fps):
        """Callback for `canvas.measure_fps`."""
        self._fps_text.text = f'{fps:.2f} FPS'

    def _toggle_fps(self):
        """Switch FPS measurement on and off."""
        if not self._fps_text.visible:
            self.canvas.measure_fps(1, self._draw_fps)
            self._fps_text.visible = True
        else:
            self.canvas.measure_fps(1, None)
            self._fps_text.visible = False

    def _snap_cursor(self, pos, visual, open_browser=False):
        """Snap cursor to clostest vertex of visual."""
        if not getattr(self, '_cursor', None):
            self._cursor = scene.visuals.Arrow(pos=np.array([(0, 0, 0), (1000, 0, 0)]),
                                               color=(1, 0, 0, 1),
                                               arrow_color=(1, 0, 0, 1),
                                               arrow_size=10,
                                               arrows=np.array([[800, 0, 0, 1000, 0, 0]]))

        if not self._cursor.parent:
            self.add(self._cursor, center=False)

        # Get vertices for this visual
        if isinstance(visual, scene.visuals.Line):
            verts = visual.pos
        elif isinstance(visual, scene.visuals.Mesh):
            verts = visual.mesh_data.get_vertices()

        # Map vertices to canvas
        tr = visual.get_transform(map_to='canvas')
        co_on_canvas = tr.map(verts)[:, [0, 1]]

        # Find the closest vertex to this mouse click pos
        tree = scipy.spatial.cKDTree(co_on_canvas)
        dist, ix = tree.query(pos)

        # Map canvas pos back to world coordinates
        self.cursor_pos = np.array(verts[ix])
        self.cursor_active_skeleton = getattr(visual, '_id', None)

        # Generate arrow coords
        vec_to_center = np.array(self.camera3d.center) - self.cursor_pos
        norm_to_center = vec_to_center / np.sqrt(np.sum(vec_to_center**2))
        start = self.cursor_pos - (norm_to_center * 10000)
        arrows = np.array([np.append(self.cursor_pos - (norm_to_center * 200),
                                     self.cursor_pos - (norm_to_center * 100))])

        self._cursor.set_data(pos=np.array([start, self.cursor_pos]),
                              arrows=arrows)

        logger.debug(f'World coordinates: {self.cursor_pos}')

    def screenshot(self, filename='screenshot.png', pixel_scale=2,
                   alpha=True, hide_overlay=True):
        """Save a screenshot of this viewer.

        Parameters
        ----------
        filename :      str, optional
                        Filename to save to.
        pixel_scale :   int, optional
                        Factor by which to scale canvas. Determines image
                        dimensions.
        alpha :         bool, optional
                        If True, will export transparent background.
        hide_overlay :  bool, optional
                        If True, will hide overlay for screenshot.

        """

        m = self._screenshot(pixel_scale=pixel_scale,
                             alpha=alpha,
                             hide_overlay=hide_overlay)

        im = png.from_array(m, mode='RGBA')
        im.save(filename)

    def _screenshot(self, pixel_scale=2, alpha=True, hide_overlay=True):
        """Return image array for screenshot."""
        if alpha:
            bgcolor = list(self.canvas.bgcolor.rgb) + [0]
        else:
            bgcolor = list(self.canvas.bgcolor.rgb)

        # region = (0, 0, self.canvas.size[0], self.canvas.size[1])
        size = tuple(np.array(self.canvas.size) * pixel_scale)

        if hide_overlay:
            prev_state = self.overlay.visible
            self.overlay.visible = False

        try:
            m = self.canvas.render(size=size, bgcolor=bgcolor)
        except BaseException:
            raise
        finally:
            if hide_overlay:
                self.overlay.visible = prev_state

        return m

    def visuals_at(self, pos):
        """List visuals at given canvas position."""
        # There appears to be some odd y offset - perhaps because of the
        # window's top bar? On OSX this is about 15px
        pos = (pos[0], pos[1] - 15)

        # Map mouse pos to framebuffer
        tr = self.canvas.transforms.get_transform(map_from='canvas',
                                                  map_to='framebuffer')
        pos = tr.map(pos)

        # Render framebuffer in picking mode
        p = self._render_fb(crop=(pos[0] - self._picking_radius / 2,
                                  pos[1] - self._picking_radius / 2,
                                  self._picking_radius,
                                  self._picking_radius))

        logger.debug('Picking framebuffer:')
        logger.debug(p)

        # List visuals in order from distance to center
        ids = []
        seen = set()
        center = (np.array(p.shape) / 2).astype(int)
        for i in range(self._picking_radius * self.canvas.pixel_scale):
            subr = p[center[0] - i: center[0] + i + 1,
                     center[1] - i: center[1] + i + 1]
            subr_ids = set(list(np.unique(subr)))
            ids.extend(list(subr_ids - seen))
            seen |= subr_ids
        visuals = [scene.visuals.VisualNode._visual_ids.get(x, None) for x in ids]

        return [v for v in visuals if v is not None]

    def set_view(self, view):
        """(Re-)set camera position.

        Parameters
        ----------
        view :      XY | XZ | YZ

        """
        if isinstance(view, Quaternion):
            q = view
        elif view == 'XY':
            q = Quaternion(w=0.707, x=0.707, y=0, z=0)
        elif view == 'XZ':
            q = Quaternion(w=1, x=0, y=0, z=0)
        elif view == 'YZ':
            q = Quaternion(w=.5, x=0.5, y=0.5, z=-.5)
        else:
            raise TypeError(f'Unable to set view from {type(view)}')

        self.camera3d._quaternion = q
        # This is necessary to force a redraw
        self.camera3d.set_range()

Bounds of all currently visuals (visible and invisible).

from PyQt5.QtWidgets import QPushButton

button = QPushButton('PyQt5 button', self.canvas.native) button.move(10, 10) self.canvas.show()

List IDs of currently visible neurons.

Change legend's font size.

Return visible and invisible neuron visuals currently on the canvas.

RETURNS DESCRIPTION
OrderedDict

{id: [neurites, soma]}

Ordered dictionary {uuid->[visuals]} of all objects in order of addition.

Set to True to allow picking.

List IDs of currently pinned neurons.

Return IDs of or set selected neurons.

Set to True to show bounding box.

Set to True to hide neuron legend.

Size of canvas.

List IDs of currently visible neurons.

List of all 3D visuals on this canvas.

Add objects to canvas.

PARAMETER DESCRIPTION
x
    Object(s) to add to the canvas.

TYPE: Neuron/List | Dotprops | Volumes | Points | vispy Visuals

center
    If True, re-center camera to all objects on canvas.

TYPE: bool DEFAULT: True

clear
    If True, clear canvas before adding new objects.

TYPE: bool DEFAULT: False

combine
    If True, will try combining similar objects into a single
    visual. This reduces the number of shader programs and
    can greatly increase the frame rate. Downside: objects can
    no longer be individually manipulated.

TYPE: bool DEFAULT: False

**kwargs
    Keyword arguments passed when generating visuals. See
    [`navis.plot3d`][] for options.

DEFAULT: {}

RETURNS DESCRIPTION
None
Source code in navis/plotting/vispy/viewer.py
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
def add(self, x, center=True, clear=False, combine=False, **kwargs):
    """Add objects to canvas.

    Parameters
    ----------
    x :         Neuron/List | Dotprops | Volumes | Points | vispy Visuals
                Object(s) to add to the canvas.
    center :    bool, optional
                If True, re-center camera to all objects on canvas.
    clear :     bool, optional
                If True, clear canvas before adding new objects.
    combine :   bool, optional
                If True, will try combining similar objects into a single
                visual. This reduces the number of shader programs and
                can greatly increase the frame rate. Downside: objects can
                no longer be individually manipulated.
    **kwargs
                Keyword arguments passed when generating visuals. See
                [`navis.plot3d`][] for options.

    Returns
    -------
    None

    """
    from .visuals import neuron2vispy, volume2vispy, points2vispy, combine_visuals
    from ..settings import VispySettings

    settings = VispySettings().update_settings(**kwargs)

    (neurons, volumes, points, visuals) = utils.parse_objects(x)

    if neurons:
        visuals += neuron2vispy(neurons, settings)
    if volumes:
        visuals += volume2vispy(volumes, settings)
    if points:
        visuals += points2vispy(points, **settings.scatter_kws)

    if not visuals:
        raise ValueError('No visuals created.')

    if clear:
        self.clear()

    if combine:
        visuals = combine_visuals(visuals, settings.name)

    # If we're runningg in headless mode (primarily for tests on CI) we will
    # simply not add the objects. Not ideal but it turns out to be very
    # annoying to correctly setup on Github Actions.
    if getattr(config, 'headless', False):
        return

    for v in visuals:
        # Give visuals an _object_id if they don't already have one
        if not hasattr(v, '_object_id'):
            v.unfreeze()
            v._object_id = uuid.uuid4()
            v.freeze()
        self.view3d.add(v)

    if center:
        self.center_camera()

    if self.show_legend:
        self.update_legend()

    if self.show_bounds:
        self.update_bounds()

Center camera on visuals.

Source code in navis/plotting/vispy/viewer.py
718
719
720
721
722
723
724
725
726
727
728
729
730
def center_camera(self):
    """Center camera on visuals."""
    visuals = self.visuals  # Get this only once to speed things up
    if not visuals:
        return

    xbounds = np.array([v.bounds(0) for v in visuals]).flatten()
    ybounds = np.array([v.bounds(1) for v in visuals]).flatten()
    zbounds = np.array([v.bounds(2) for v in visuals]).flatten()

    self.camera3d.set_range((xbounds.min(), xbounds.max()),
                            (ybounds.min(), ybounds.max()),
                            (zbounds.min(), zbounds.max()))

Clear canvas.

Source code in navis/plotting/vispy/viewer.py
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
def clear(self):
    """Clear canvas."""
    # Skip if running in headless mode
    if getattr(config, 'headless', False):
        return

    for v in self.visuals:
        v.parent = None

    # `remove_bounds` set this to False but
    # here we want the current setting to persist
    show_bounds = self.show_bounds

    self.remove_bounds()
    self.clear_legend()

    self.show_bounds = show_bounds

Clear legend.

Source code in navis/plotting/vispy/viewer.py
534
535
536
537
538
def clear_legend(self):
    """Clear legend."""
    # Clear legend except for title
    for l in [l for l in self.overlay.children if isinstance(l, scene.visuals.Text) and l.name != 'permanent']:
        l.parent = None

Close viewer.

Source code in navis/plotting/vispy/viewer.py
817
818
819
820
821
822
823
824
825
826
827
def close(self):
    """Close viewer."""
    # Skip if this is headless mode
    if getattr(config, 'headless', False):
        return

    # Clear first to free all visuals
    self.clear()
    if self == getattr(config, 'primary_viewer', None):
        del config.primary_viewer
    self.canvas.close()

Colorize neurons using a seaborn color palette.

Source code in navis/plotting/vispy/viewer.py
1047
1048
1049
1050
1051
1052
1053
def colorize(self, palette='hls', include_connectors=False):
    """Colorize neurons using a seaborn color palette."""
    neurons = self.neurons  # grab once to speed things up
    colors = sns.color_palette(palette, len(neurons))
    cmap = {s: colors[i] for i, s in enumerate(neurons)}

    self.set_colors(cmap, include_connectors=include_connectors)

Hide given neuron(s).

Source code in navis/plotting/vispy/viewer.py
829
830
831
832
833
834
835
836
837
838
839
840
841
def hide_neurons(self, n):
    """Hide given neuron(s)."""
    ids = utils.eval_id(n)

    neurons = self.neurons   # grab once to speed things up
    for s in ids:
        for v in neurons[s]:
            if getattr(v, 'pinned', False):
                continue
            if v.visible:
                v.visible = False

    self.update_legend()

Hide currently selected neuron(s).

Source code in navis/plotting/vispy/viewer.py
843
844
845
def hide_selected(self):
    """Hide currently selected neuron(s)."""
    self.hide_neurons(self.selected)

Pin given neuron(s).

Changes to the color or visibility of pinned neurons are silently ignored. You can use this to keep specific neurons visible while cycling through the rest - useful for comparisons.

Source code in navis/plotting/vispy/viewer.py
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
def pin_neurons(self, n):
    """Pin given neuron(s).

    Changes to the color or visibility of pinned neurons are silently
    ignored. You can use this to keep specific neurons visible while
    cycling through the rest - useful for comparisons.

    """
    ids = utils.eval_id(n)

    neurons = self.neurons  # grab only once to speed things up

    for s in ids:
        for v in neurons[s]:
            v.unfreeze()
            v.pinned = True
            v.freeze()

Remove the most recently added N visuals.

Source code in navis/plotting/vispy/viewer.py
575
576
577
578
def pop(self, N=1):
    """Remove the most recently added N visuals."""
    for vis in list(self.objects.values())[-N:]:
        self.remove(vis)

Remove given neurons/visuals from canvas.

Source code in navis/plotting/vispy/viewer.py
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
def remove(self, to_remove):
    """Remove given neurons/visuals from canvas."""
    to_remove = utils.make_iterable(to_remove)

    neurons = self.neurons  # grab this only once to speed things up
    for vis in to_remove:
        if isinstance(vis, scene.visuals.VisualNode):
            vis.parent = None
        else:
            uuids = utils.eval_id(to_remove)
            for u in uuids:
                for v in neurons.get(u, []):
                    v.parent = None

    if self.show_bounds:
        self.update_bounds()

Remove bounding box visual.

Source code in navis/plotting/vispy/viewer.py
601
602
603
604
605
606
def remove_bounds(self):
    """Remove bounding box visual."""
    self._show_bounds = False
    for v in self.visuals:
        if getattr(v, '_object_type', '') == 'boundingbox':
            self.remove(v)

Save a screenshot of this viewer.

PARAMETER DESCRIPTION
filename
        Filename to save to.

TYPE: str DEFAULT: 'screenshot.png'

pixel_scale
        Factor by which to scale canvas. Determines image
        dimensions.

TYPE: int DEFAULT: 2

alpha
        If True, will export transparent background.

TYPE: bool DEFAULT: True

hide_overlay
        If True, will hide overlay for screenshot.

TYPE: bool DEFAULT: True

Source code in navis/plotting/vispy/viewer.py
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
def screenshot(self, filename='screenshot.png', pixel_scale=2,
               alpha=True, hide_overlay=True):
    """Save a screenshot of this viewer.

    Parameters
    ----------
    filename :      str, optional
                    Filename to save to.
    pixel_scale :   int, optional
                    Factor by which to scale canvas. Determines image
                    dimensions.
    alpha :         bool, optional
                    If True, will export transparent background.
    hide_overlay :  bool, optional
                    If True, will hide overlay for screenshot.

    """

    m = self._screenshot(pixel_scale=pixel_scale,
                         alpha=alpha,
                         hide_overlay=hide_overlay)

    im = png.from_array(m, mode='RGBA')
    im.save(filename)

Set neuron color alphas.

PARAMETER DESCRIPTION
a
 Alpha value(s) to apply. Values must be 0-1. Accepted:
   1. Tuple of single alpha. Applied to all visible neurons.
   2. Dictionary mapping skeleton IDs to alpha.

TYPE: tuple | dict

Source code in navis/plotting/vispy/viewer.py
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
@block_all
def set_alpha(self, a, include_connectors=True):
    """Set neuron color alphas.

    Parameters
    ----------
    a :      tuple | dict
             Alpha value(s) to apply. Values must be 0-1. Accepted:
               1. Tuple of single alpha. Applied to all visible neurons.
               2. Dictionary mapping skeleton IDs to alpha.

    """
    neurons = self.neurons  # grab once to speed things up
    if isinstance(a, (tuple, list, np.ndarray, str)):
        amap = {s: a for s in neurons}
    elif isinstance(a, dict):
        amap = a
    else:
        raise TypeError(f'Unable to use colors of type "{type(a)}"')

    for n in neurons:
        if n in amap:
            for v in neurons[n]:
                if getattr(v, 'pinned', False):
                    continue
                if v._neuron_part == 'connectors' and not include_connectors:
                    continue
                try:
                    this_c = v.color.rgba
                except BaseException:
                    this_c = v.color

                this_c = np.asarray(this_c)

                # For arrays of colors
                if this_c.ndim == 2:
                    # If no alpha channel yet, add one
                    if this_c.shape[1] == 3:
                        this_c = np.insert(this_c,
                                           3,
                                           np.ones(this_c.shape[0]),
                                           axis=1)

                    # If already the correct alpha value
                    if np.all(this_c[:, 3] == amap[n]):
                        continue
                    else:
                        this_c[:, 3] = amap[n]
                else:
                    if len(this_c) == 4 and this_c[3] == amap[n]:
                        continue
                    else:
                        this_c = tuple([this_c[0], this_c[1], this_c[2], amap[n]])

                if isinstance(v, scene.visuals.Mesh):
                    v.color = this_c
                else:
                    v.set_data(color=this_c)

    if self.show_legend:
        self.update_legend()

Set background color.

Source code in navis/plotting/vispy/viewer.py
1055
1056
1057
1058
1059
def set_bgcolor(self, c):
    """Set background color."""
    if getattr(config, 'headless', False):
        return
    self.canvas.bgcolor = c

Set neuron color.

PARAMETER DESCRIPTION
c
 RGB color(s) to apply. Values must be 0-1. Accepted:
   1. Tuple of single color. Applied to all visible neurons.
   2. Dictionary mapping skeleton IDs to colors.

TYPE: tuple | dict

Source code in navis/plotting/vispy/viewer.py
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
@block_all
def set_colors(self, c, include_connectors=False):
    """Set neuron color.

    Parameters
    ----------
    c :      tuple | dict
             RGB color(s) to apply. Values must be 0-1. Accepted:
               1. Tuple of single color. Applied to all visible neurons.
               2. Dictionary mapping skeleton IDs to colors.

    """
    neurons = self.neurons  # grab once to speed things up
    if isinstance(c, (tuple, list, np.ndarray, str)):
        cmap = {s: c for s in neurons}
    elif isinstance(c, dict):
        cmap = c
    else:
        raise TypeError(f'Unable to use colors of type "{type(c)}"')

    for n in neurons:
        if n in cmap:
            for v in neurons[n]:
                if getattr(v, 'pinned', False):
                    continue
                if v._neuron_part == 'connectors' and not include_connectors:
                    continue
                new_c = mcl.to_rgba(cmap[n])
                if isinstance(v, scene.visuals.Mesh):
                    v.color = new_c
                else:
                    v.set_data(color=mcl.to_rgba(cmap[n]))

    if self.show_legend:
        self.update_legend()

(Re-)set camera position.

PARAMETER DESCRIPTION
view

TYPE: XY | XZ | YZ

Source code in navis/plotting/vispy/viewer.py
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
def set_view(self, view):
    """(Re-)set camera position.

    Parameters
    ----------
    view :      XY | XZ | YZ

    """
    if isinstance(view, Quaternion):
        q = view
    elif view == 'XY':
        q = Quaternion(w=0.707, x=0.707, y=0, z=0)
    elif view == 'XZ':
        q = Quaternion(w=1, x=0, y=0, z=0)
    elif view == 'YZ':
        q = Quaternion(w=.5, x=0.5, y=0.5, z=-.5)
    else:
        raise TypeError(f'Unable to set view from {type(view)}')

    self.camera3d._quaternion = q
    # This is necessary to force a redraw
    self.camera3d.set_range()

Show viewer.

Source code in navis/plotting/vispy/viewer.py
803
804
805
806
807
808
809
810
811
812
813
814
815
def show(self):
    """Show viewer."""
    # This is for e.g. headless testing
    if getattr(config, 'headless', False):
        logger.info("Viewer widget not shown - navis running in headless mode. ")
        return

    self.canvas.show()

    # To actually show the widget, we need to return the canvas
    if utils.is_jupyter():
        from IPython.display import display
        display(self.canvas)

Toggle bounding box.

Source code in navis/plotting/vispy/viewer.py
585
586
587
def toggle_bounds(self):
    """Toggle bounding box."""
    self.show_bounds = not self.show_bounds

Toggle neuron(s) visibility.

Source code in navis/plotting/vispy/viewer.py
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
def toggle_neurons(self, n):
    """Toggle neuron(s) visibility."""
    n = utils.make_iterable(n)

    if False not in [isinstance(u, uuid.UUID) for u in n]:
        obj = self._neuron_obj
    else:
        n = utils.eval_id(n)
        obj = self.neurons

    for s in n:
        for v in obj[s]:
            v.visible = v.visible is False

    self.update_legend()

Toggle legend on and off.

Source code in navis/plotting/vispy/viewer.py
714
715
716
def toggle_overlay(self):
    """Toggle legend on and off."""
    self.overlay.visible = self.overlay.visible is False

Toggle picking and overlay text.

Source code in navis/plotting/vispy/viewer.py
352
353
354
355
356
357
358
359
def toggle_picking(self):
    """Toggle picking and overlay text."""
    if self.picking:
        self.picking = False
        self._picking_text.visible = False
    else:
        self.picking = True
        self._picking_text.visible = True

Toggle selected of given neuron.

Source code in navis/plotting/vispy/viewer.py
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
def toggle_select(self, n):
    """Toggle selected of given neuron."""
    skids = utils.eval_id(n)

    neurons = self.neurons  # grab once to speed things up

    for s in skids:
        if self.selected != s:
            self.selected = s
            for v in neurons[s]:
                self._selected_color = v.color
                v.set_data(color=self.highlight_color)
        else:
            self.selected = None
            for v in neurons[s]:
                v.set_data(color=self._selected_color)

    self.update_legend()

Unhide given neuron(s).

Use n to unhide specific neurons.

Source code in navis/plotting/vispy/viewer.py
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
def unhide_neurons(self, n=None, check_alpha=False):
    """Unhide given neuron(s).

    Use `n` to unhide specific neurons.

    """
    neurons = self.neurons  # grab once to speed things up
    if not isinstance(n, type(None)):
        ids = utils.eval_id(n)
    else:
        ids = list(neurons.keys())

    for s in ids:
        for v in neurons[s]:
            if getattr(v, 'pinned', False):
                continue
            if not v.visible:
                v.visible = True
        if check_alpha:
            # Make sure color has an alpha channel
            c = to_rgba(neurons[s][0].color)
            # Make sure alpha is 1
            if c.ndim == 1 and c[3] != 1:
                c[3] = 1
                self.set_colors({s: c})
            elif c.ndim == 2 and np.any(c[:, 3] != 1):
                c[:, 3] = 1
                self.set_colors({s: c})

    self.update_legend()

Unpin given neuron(s).

Use n to unhide specific neurons.

Source code in navis/plotting/vispy/viewer.py
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
def unpin_neurons(self, n=None):
    """Unpin given neuron(s).

    Use `n` to unhide specific neurons.

    """
    neurons = self.neurons  # grab once to speed things up
    if not isinstance(n, type(None)):
        ids = utils.eval_id(n)
    else:
        ids = list(neurons.keys())

    for s in ids:
        for v in neurons[s]:
            v.unfreeze()
            v.pinned = False
            v.freeze()

Update bounding box visual.

Source code in navis/plotting/vispy/viewer.py
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
@block_canvas
def update_bounds(self, color='w', width=1):
    """Update bounding box visual."""
    # Remove any existing visual
    self.remove_bounds()

    bounds = self.bounds
    self._show_bounds = True

    # Skip if no visual on canvas
    if isinstance(bounds, type(None)):
        return

    # Create box visual
    dims = bounds[:, 1] - bounds[:, 0]
    center = bounds.mean(axis=1)
    box = tm.primitives.Box(extents=dims).apply_scale(1.1)

    # Recenter vertices
    vertices = np.array(box.vertices) + center
    connect = np.array([[0, 1], [0, 2], [0, 4],
                        [1, 3], [1, 5],
                        [2, 3], [2, 6],
                        [3, 7],
                        [4, 5], [4, 6],
                        [5, 7],
                        [6, 7]])

    box = scene.visuals.Line(pos=vertices,
                             color=mcl.to_rgb(color),
                             # Can only be used with method 'agg'
                             width=width,
                             connect=connect,
                             antialias=True,
                             name='BoundingBox',
                             method='gl')

    # Add custom attributes
    box.unfreeze()
    box._object_type = 'boundingbox'
    box._object_id = uuid.uuid4()
    box.freeze()

    self.view3d.add(box)

Update legend.

Source code in navis/plotting/vispy/viewer.py
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
@block_canvas
def update_legend(self):
    """Update legend."""
    # Get existing labels
    labels = {l._object_id: l for l in self.overlay.children if getattr(l, '_object_id', None)}

    # If legend is not meant to be shown, make sure everything is hidden and return
    if not self.show_legend:
        for v in labels.values():
            if v.visible:
                v.visible = False
        return
    else:
        for v in labels.values():
            if not v.visible:
                v.visible = True

    # Labels to be removed
    neuron_obj = self._neuron_obj  # grab only once to speed things up
    to_remove = [s for s in labels if s not in neuron_obj]
    for s in to_remove:
        labels[s].parent = None

    # Generate new labels
    to_add = [s for s in neuron_obj if s not in labels]
    for s in to_add:
        # Fallback is name or in lieu of that the object's type
        lbl = getattr(neuron_obj[s][0], '_name',
                      str(type(neuron_obj[s][0])))
        # See if we find a "label" property
        if hasattr(neuron_obj[s][0], '_object'):
            if hasattr(neuron_obj[s][0]._object, 'label'):
                lbl = neuron_obj[s][0]._object.label

        txt = scene.visuals.Text(lbl,
                                 anchor_x='left',
                                 anchor_y='top',
                                 parent=self.overlay,
                                 method='gpu',
                                 font_size=self.legend_font_size)
        txt.interactive = True
        txt.unfreeze()
        txt._object_id = s
        txt._id = neuron_obj[s][0]._id
        txt.freeze()

    # Position and color labels
    labels = {l._object_id: l for l in self.overlay.children if getattr(
        l, '_object_id', None)}
    for i, s in enumerate(sorted(neuron_obj)):
        if neuron_obj[s][0].visible:
            color = neuron_obj[s][0].color
        else:
            color = (.3, .3, .3)

        offset = 10 * (self.legend_font_size / 7)

        labels[s].pos = (10, offset * (i + 1))
        labels[s].color = color
        labels[s].font_size = self.legend_font_size

List visuals at given canvas position.

Source code in navis/plotting/vispy/viewer.py
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
def visuals_at(self, pos):
    """List visuals at given canvas position."""
    # There appears to be some odd y offset - perhaps because of the
    # window's top bar? On OSX this is about 15px
    pos = (pos[0], pos[1] - 15)

    # Map mouse pos to framebuffer
    tr = self.canvas.transforms.get_transform(map_from='canvas',
                                              map_to='framebuffer')
    pos = tr.map(pos)

    # Render framebuffer in picking mode
    p = self._render_fb(crop=(pos[0] - self._picking_radius / 2,
                              pos[1] - self._picking_radius / 2,
                              self._picking_radius,
                              self._picking_radius))

    logger.debug('Picking framebuffer:')
    logger.debug(p)

    # List visuals in order from distance to center
    ids = []
    seen = set()
    center = (np.array(p.shape) / 2).astype(int)
    for i in range(self._picking_radius * self.canvas.pixel_scale):
        subr = p[center[0] - i: center[0] + i + 1,
                 center[1] - i: center[1] + i + 1]
        subr_ids = set(list(np.unique(subr)))
        ids.extend(list(subr_ids - seen))
        seen |= subr_ids
    visuals = [scene.visuals.VisualNode._visual_ids.get(x, None) for x in ids]

    return [v for v in visuals if v is not None]

Mesh consisting of vertices and faces.

Subclass of trimesh.Trimesh with a few additional methods.

PARAMETER DESCRIPTION
vertices
    `(N, 3)` vertices coordinates or an object that has
    `.vertices` and `.faces` attributes in which case `faces`
    parameter will be ignored.

TYPE: list | array | mesh-like

faces
    `(M, 3)` array of indexed triangle faces.

TYPE: list | array DEFAULT: None

name
    A name for the volume.

TYPE: str DEFAULT: None

color
    RGB(A) color.

TYPE: tuple DEFAULT: (0.85, 0.85, 0.85, 0.2)

id
    If not provided, neuron will be assigned a random UUID as `.id`.

TYPE: int DEFAULT: None

**kwargs
    Keyword arguments passed through to `trimesh.Trimesh`

DEFAULT: {}

See Also

navis.example_volume Loads example volume(s).

Source code in navis/core/volumes.py
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
class Volume(UnitObject, trimesh.Trimesh):
    """Mesh consisting of vertices and faces.

    Subclass of `trimesh.Trimesh` with a few additional methods.

    Parameters
    ----------
    vertices :  list | array | mesh-like
                `(N, 3)` vertices coordinates or an object that has
                `.vertices` and `.faces` attributes in which case `faces`
                parameter will be ignored.
    faces :     list | array
                `(M, 3)` array of indexed triangle faces.
    name :      str, optional
                A name for the volume.
    color :     tuple, optional
                RGB(A) color.
    id :        int, optional
                If not provided, neuron will be assigned a random UUID as `.id`.
    **kwargs
                Keyword arguments passed through to `trimesh.Trimesh`

    See Also
    --------
    [`navis.example_volume`][]
                Loads example volume(s).

    """

    def __init__(
        self,
        vertices: Union[list, np.ndarray],
        faces: Union[list, np.ndarray] = None,
        name: Optional[str] = None,
        color: Union[str, Sequence[Union[int, float]]] = (0.85, 0.85, 0.85, 0.2),
        id: Optional[int] = None,
        units: Optional[str] = None,
        **kwargs,
    ):
        if hasattr(vertices, "vertices") and hasattr(vertices, "faces"):
            vertices, faces = vertices.vertices, vertices.faces

        super().__init__(vertices=vertices, faces=faces, **kwargs)

        self.name: Optional[str] = name
        self.color: Union[str, Sequence[Union[int, float]]] = color
        self.id: Optional[int] = id if id else uuid.uuid4()
        self.units = units

        # This is very hackish but we want to make sure that parent methods of
        # Trimesh return a navis.Volume instead of a trimesh.Trimesh
        for f in dir(trimesh.Trimesh):
            # Don't mess with magic/private methods
            if f.startswith("_"):
                continue
            # Skip properties
            if not callable(getattr(trimesh.Trimesh, f)):
                continue
            setattr(self, f, _force_volume(getattr(self, f)))

    @property
    def name(self):
        """Name of this volume."""
        return self.metadata.get("name")

    @name.setter
    def name(self, value):
        self.metadata["name"] = value

    @property
    def color(self):
        """Color used for plotting."""
        return self.metadata.get("color")

    @color.setter
    def color(self, value):
        self.metadata["color"] = value

    @property
    def id(self):
        """ID of this volume."""
        return self.metadata.get("id")

    @id.setter
    def id(self, value):
        self.metadata["id"] = value

    @classmethod
    def from_csv(
        cls,
        vertices: str,
        faces: str,
        name: Optional[str] = None,
        color: Union[str, Sequence[Union[int, float]]] = (0.85, 0.85, 0.85, 0.2),
        volume_id: Optional[int] = None,
        **kwargs,
    ) -> "Volume":
        """Load volume from csv files containing vertices and faces.

        Parameters
        ----------
        vertices :      filepath | file-like
                        CSV file containing vertices.
        faces :         filepath | file-like
                        CSV file containing faces.
        **kwargs
                        Keyword arguments passed to `csv.reader`.

        Returns
        -------
        navis.Volume

        """
        if not os.path.isfile(vertices) or not os.path.isfile(faces):
            raise ValueError("File(s) not found.")

        with open(vertices, "r") as f:
            reader = csv.reader(f, **kwargs)
            vertices = np.array([r for r in reader]).astype(float)

        with open(faces, "r") as f:
            reader = csv.reader(f, **kwargs)
            faces = np.array([r for r in reader]).astype(int)

        return cls(
            faces=faces, vertices=vertices, name=name, color=color, volume_id=volume_id
        )

    def to_csv(self, filename: str, **kwargs) -> None:
        """Save volume as two separated csv files containing vertices and faces.

        Parameters
        ----------
        filename :      str
                        Filename to use. Will get a `_vertices.csv` and
                        `_faces.csv` suffix.
        **kwargs
                        Keyword arguments passed to `csv.reader`.

        """
        for data, suffix in zip(
            [self.faces, self.vertices], ["_faces.csv", "_vertices.csv"]
        ):
            with open(filename + suffix, "w") as csvfile:
                writer = csv.writer(csvfile)
                writer.writerows(data)

    @classmethod
    def from_json(
        cls, filename: str, import_kwargs: Dict = {}, **init_kwargs
    ) -> "Volume":
        """Load volume from json file containing vertices and faces.

        Parameters
        ----------
        filename
        import_kwargs
                        Keyword arguments passed to `json.load`.
        **init_kwargs
                    Keyword arguments passed to navis.Volume upon
                    initialization.

        Returns
        -------
        navis.Volume

        """
        if not os.path.isfile(filename):
            raise ValueError("File not found.")

        with open(filename, "r") as f:
            data = json.load(f, **import_kwargs)

        return cls(faces=data["faces"], vertices=data["vertices"], **init_kwargs)

    @classmethod
    def from_object(cls, obj: Any, **init_kwargs) -> "Volume":
        """Load volume from generic object that has `.vertices` and
        `.faces` attributes.

        Parameters
        ----------
        obj
        **init_kwargs
                    Keyword arguments passed to navis.Volume upon
                    initialization.

        Returns
        -------
        navis.Volume

        """
        if not hasattr(obj, "vertices") or not hasattr(obj, "faces"):
            raise ValueError("Object must have faces and vertices attributes.")

        return cls(faces=obj.faces, vertices=obj.vertices, **init_kwargs)

    @classmethod
    def from_file(
        cls, filename: str, import_kwargs: Dict = {}, **init_kwargs
    ) -> "Volume":
        """Load volume from file.

        Parameters
        ----------
        filename :      str
                        File to load from.
        import_kwargs
                        Keyword arguments passed to importer:
                          - `json.load` for JSON file
                          - `trimesh.load_mesh` for OBJ and STL files
        **init_kwargs
                    Keyword arguments passed to navis.Volume upon
                    initialization.

        Returns
        -------
        navis.Volume

        """
        if not os.path.isfile(filename):
            raise ValueError("File not found.")

        f, ext = os.path.splitext(filename)

        if ext == ".json":
            return cls.from_json(
                filename=filename, import_kwargs=import_kwargs, **init_kwargs
            )

        try:
            import trimesh
        except ModuleNotFoundError:
            raise ModuleNotFoundError(
                "Unable to import: trimesh missing - please "
                'install: "pip install trimesh"'
            )
        except BaseException:
            raise

        tm = trimesh.load_mesh(filename, **import_kwargs)

        return cls.from_object(tm, **init_kwargs)

    def to_json(self, filename: str) -> None:
        """Save volume as json file.

        Parameters
        ----------
        filename :      str
                        Filename to use.

        """
        with open(filename, "w") as f:
            json.dump(
                {"vertices": self.vertices.tolist(), "faces": self.faces.tolist()}, f
            )

    @classmethod
    def combine(
        cls,
        x: Sequence["Volume"],
        name: str = "comb_vol",
        color: Union[str, Sequence[Union[int, float]]] = (0.85, 0.85, 0.85, 0.2),
    ) -> "Volume":
        """Merge multiple volumes into a single object.

        Parameters
        ----------
        x :     list or dict of Volumes
        name :  str, optional
                Name of the combined volume.
        color : tuple | str, optional
                Color of the combined volume.

        Returns
        -------
        [`navis.Volume`][]

        """
        if isinstance(x, Volume):
            return x

        if isinstance(x, dict):
            x = list(x.values())

        if not utils.is_iterable(x):
            x = [x]  # type: ignore

        if False in [isinstance(v, Volume) for v in x]:
            raise TypeError("Input must be list of volumes")

        vertices: np.ndarray = np.empty((0, 3))
        faces: List[List[int]] = []

        # Reindex faces
        for vol in x:
            offs = len(vertices)
            vertices = np.append(vertices, vol.vertices, axis=0)
            faces += [[f[0] + offs, f[1] + offs, f[2] + offs] for f in vol.faces]

        return cls(vertices=vertices, faces=faces, name=name, color=color)

    @property
    def bbox(self) -> np.ndarray:
        """Bounding box of this volume."""
        return self.bounds

    @property
    def verts(self) -> np.ndarray:
        """Legacy access to `.vertices`."""
        return self.vertices

    @verts.setter
    def verts(self, v):
        self.vertices = v

    @property
    def center(self) -> np.ndarray:
        """Center of mass."""
        return np.mean(self.vertices, axis=0)

    def __getstate__(self):
        """Get state (used e.g. for pickling)."""
        return {k: v for k, v in self.__dict__.items() if not callable(v)}

    def __setstate__(self, d):
        """Update state (used e.g. for pickling)."""
        self.__dict__.update(d)

    def __str__(self):
        return self.__repr__()

    def __repr__(self):
        """Return quick summary of the current geometry.

        Avoids computing properties.

        """
        elements = []
        if hasattr(self, "name"):
            # for Trimesh
            elements.append(f"name={self.name}")
        if hasattr(self, "id") and not isinstance(self.id, uuid.UUID):
            # for Trimesh
            elements.append(f"id={self.id}")
        elements.append(f"units={self.units}")
        if hasattr(self, "color"):
            # for Trimesh
            elements.append(f"color={self.color}")
        if hasattr(self, "vertices"):
            # for Trimesh and PointCloud
            elements.append(f"vertices.shape={self.vertices.shape}")
        if hasattr(self, "faces"):
            # for Trimesh
            elements.append(f"faces.shape={self.faces.shape}")
        return f'<navis.Volume({", ".join(elements)})>'

    def __truediv__(self, other):
        """Implement division for vertices."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            n = self.copy()
            _ = np.divide(n.vertices, other, out=n.vertices, casting="unsafe")
            return n
        return NotImplemented

    def __mul__(self, other):
        """Implement multiplication for vertices."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            n = self.copy()
            _ = np.multiply(n.vertices, other, out=n.vertices, casting="unsafe")
            return n
        return NotImplemented

    def __add__(self, other):
        """Implement addition for vertices."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            n = self.copy()
            _ = np.add(n.vertices, other, out=n.vertices, casting="unsafe")
            return n
        return NotImplemented

    def __sub__(self, other):
        """Implement subtraction for vertices."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            n = self.copy()
            _ = np.subtract(n.vertices, other, out=n.vertices, casting="unsafe")
            return n
        return NotImplemented

    def resize(
        self,
        x: Union[float, int],
        method: Union[
            Literal["center"],
            Literal["centroid"],
            Literal["normals"],
            Literal["origin"],
        ] = "center",
        inplace: bool = False,
    ) -> Optional["Volume"]:
        """Resize volume.

        Parameters
        ----------
        x :         int | float
                    Resizing factor. For methods "center", "centroid" and
                    "origin" this is the fraction of original size (e.g.
                    `.5` for half size). For method "normals", this is
                    is the absolute displacement (e.g. `-1000` to shrink
                    volume by that many units)!
        method :    "center" | "centroid" | "normals" | "origin"
                    Point in space to use for resizing.

                    .. list-table::
                        :widths: 15 75
                        :header-rows: 1

                        * - method
                          - explanation
                        * - center
                          - average of all vertices
                        * - centroid
                          - average of the triangle centroids weighted by the
                            area of each triangle.
                        * - origin
                          - resizes relative to origin of coordinate system
                            (0, 0, 0)
                        * - normals
                          - resize using face normals. Note that this method
                            uses absolute displacement for parameter `x`.

        inplace :   bool, optional
                    If False, will return resized copy.

        Returns
        -------
        [`navis.Volume`][]
                    Resized copy of original volume. Only if `inplace=False`.
        None
                    If `inplace=True`.

        """
        assert isinstance(method, str)

        method = method.lower()

        perm_methods = ["center", "origin", "normals", "centroid"]
        if method not in perm_methods:
            raise ValueError(
                f'Unknown method "{method}". Allowed '
                f'methods: {", ".join(perm_methods)}'
            )

        if not inplace:
            v = self.copy()
        else:
            v = self

        if method == "normals":
            v.vertices = v.vertices + (v.vertex_normals * x)
        else:
            # Get the center
            if method == "center":
                cn = np.mean(v.vertices, axis=0)
            elif method == "centroid":
                cn = v.centroid
            elif method == "origin":
                cn = np.array([0, 0, 0])

            # Get vector from center to each vertex
            vec = v.vertices - cn

            # Multiply vector by resize factor
            vec *= x

            # Recalculate vertex positions
            v.vertices = vec + cn

        # Make sure to reset any pyoctree data on this volume
        if hasattr(v, "pyoctree"):
            delattr(v, "pyoctree")

        if not inplace:
            return v

    def plot3d(self, **kwargs):
        """Plot volume using [`navis.plot3d`][].

        Parameters
        ----------
        **kwargs
                Keyword arguments. Will be passed to [`navis.plot3d`][].
                See `help(navis.plot3d)` for a list of keywords.

        See Also
        --------
        [`navis.plot3d`][]
                    Function called to generate 3d plot.

        Examples
        --------
        >>> import navis
        >>> vol = navis.example_volume('LH')
        >>> v = vol.plot3d(color = (255, 0, 0))

        """
        from .. import plotting

        if "color" in kwargs:
            self.color = kwargs["color"]

        return plotting.plot3d(self, **kwargs)

    def show(self, **kwargs):
        """See `.plot3d`."""
        # This is mostly to override trimesh.Trimesh method
        return self.plot3d(**kwargs)

    def _outlines_3d(self, view="xy", **kwargs):
        """Generate 3d outlines along a given view (see `.to_2d()`).

        Parameters
        ----------
        **kwargs
                    Keyword arguments passed to [`navis.Volume.to_2d`][].

        Returns
        -------
        list
                    Coordinates of 2d circumference.
                    e.g. `[(x1, y1, z1), (x2, y2, z2), (x3, y3, z3), ...]`
                    Third dimension is averaged.

        """
        co2d = np.array(self.to_2d(view=view, **kwargs))

        if view in ["xy", "yx"]:
            third = np.repeat(self.center[2], co2d.shape[0])
        elif view in ["xz", "zx"]:
            third = np.repeat(self.center[1], co2d.shape[0])
        elif view in ["yz", "zy"]:
            third = np.repeat(self.center[0], co2d.shape[0])

        return np.append(co2d, third.reshape(co2d.shape[0], 1), axis=1)

    def to_2d(
        self, alpha: float = 0.00017, view: tuple = ("x", "y"), invert_y: bool = False
    ) -> Sequence[Union[float, int]]:
        """Compute the 2d alpha shape (concave hull) this volume.

        Uses Scipy Delaunay and shapely.

        Parameters
        ----------
        alpha:      float, optional
                    Alpha value to influence the gooeyness of the border.
                    Smaller numbers don't fall inward as much as larger
                    numbers. Too large, and you lose everything!
        view :      tuple
                    Determines axis.

        Returns
        -------
        list
                    Coordinates of 2d circumference
                    e.g. `[(x1, y1), (x2, y2), (x3, y3), ...]`

        """

        def add_edge(edges, edge_points, coords, i, j):
            """Add line between the i-th and j-th points."""
            if (i, j) in edges or (j, i) in edges:
                # already added
                return
            edges.add((i, j))
            edge_points.append(coords[[i, j]])

        accepted_views = ["x", "z", "y", "-x", "-z", "-y"]

        for ax in view:
            if ax not in accepted_views:
                raise ValueError(f'Unable to parse "{ax}" view')

        try:
            from shapely.ops import unary_union, polygonize  # type: ignore
            import shapely.geometry as geometry  # type: ignore
        except ModuleNotFoundError:
            raise ModuleNotFoundError("This function needs the shapely>=1.8.0")

        coords: np.ndarray

        map = {"x": 0, "y": 1, "z": 2}

        x_ix = map[view[0].replace("-", "").replace("+", "")]
        y_ix = map[view[1].replace("-", "").replace("+", "")]

        coords = self.vertices[:, [x_ix, y_ix]]

        tri = scipy.spatial.Delaunay(coords)
        edges: set = set()
        edge_points: list = []
        # loop over triangles:
        # ia, ib, ic = indices of corner points of the triangle
        # Note that "vertices" property was renamed to "simplices"
        for ia, ib, ic in getattr(tri, "simplices", getattr(tri, "vertices", [])):
            pa: np.ndarray = coords[ia]  # type: ignore
            pb: np.ndarray = coords[ib]  # type: ignore
            pc: np.ndarray = coords[ic]  # type: ignore
            # Lengths of sides of triangle
            a = math.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)  # type: ignore
            b = math.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)  # type: ignore
            c = math.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)  # type: ignore
            # Semiperimeter of triangle
            s = (a + b + c) / 2.0
            # Area of triangle by Heron's formula
            area = math.sqrt(s * (s - a) * (s - b) * (s - c))
            circum_r = a * b * c / (4.0 * area)
            # Here's the radius filter.
            if circum_r < 1.0 / alpha:
                add_edge(edges, edge_points, coords, ia, ib)
                add_edge(edges, edge_points, coords, ib, ic)
                add_edge(edges, edge_points, coords, ic, ia)

        m = geometry.MultiLineString(edge_points)
        triangles = list(polygonize(m))
        concave_hull = unary_union(triangles)

        # Try with current settings, if this is not successful, try again
        # with lower alpha
        try:
            return list(concave_hull.exterior.coords)
        except AttributeError:
            return self.to_2d(alpha=alpha / 10, view=view, invert_y=invert_y)
        except BaseException:
            raise

    def validate(self):
        """Use trimesh to try and fix issues (holes/normals)."""
        if not self.is_volume:
            logger.info("Mesh not valid, attempting to fix")
            self.fill_holes()
            self.fix_normals()
            if not self.is_volume:
                raise utils.VolumeError(
                    "Mesh is not a volume "
                    "(e.g. not watertight, incorrect "
                    "winding) and could not be fixed."
                )

Bounding box of this volume.

Center of mass.

Color used for plotting.

ID of this volume.

Name of this volume.

Legacy access to .vertices.

Merge multiple volumes into a single object.

PARAMETER DESCRIPTION
x

TYPE: list or dict of Volumes

name
Name of the combined volume.

TYPE: str DEFAULT: 'comb_vol'

color
Color of the combined volume.

TYPE: tuple | str DEFAULT: (0.85, 0.85, 0.85, 0.2)

RETURNS DESCRIPTION
[`navis.Volume`][]
Source code in navis/core/volumes.py
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
@classmethod
def combine(
    cls,
    x: Sequence["Volume"],
    name: str = "comb_vol",
    color: Union[str, Sequence[Union[int, float]]] = (0.85, 0.85, 0.85, 0.2),
) -> "Volume":
    """Merge multiple volumes into a single object.

    Parameters
    ----------
    x :     list or dict of Volumes
    name :  str, optional
            Name of the combined volume.
    color : tuple | str, optional
            Color of the combined volume.

    Returns
    -------
    [`navis.Volume`][]

    """
    if isinstance(x, Volume):
        return x

    if isinstance(x, dict):
        x = list(x.values())

    if not utils.is_iterable(x):
        x = [x]  # type: ignore

    if False in [isinstance(v, Volume) for v in x]:
        raise TypeError("Input must be list of volumes")

    vertices: np.ndarray = np.empty((0, 3))
    faces: List[List[int]] = []

    # Reindex faces
    for vol in x:
        offs = len(vertices)
        vertices = np.append(vertices, vol.vertices, axis=0)
        faces += [[f[0] + offs, f[1] + offs, f[2] + offs] for f in vol.faces]

    return cls(vertices=vertices, faces=faces, name=name, color=color)

Load volume from csv files containing vertices and faces.

PARAMETER DESCRIPTION
vertices
        CSV file containing vertices.

TYPE: filepath | file-like

faces
        CSV file containing faces.

TYPE: filepath | file-like

**kwargs
        Keyword arguments passed to `csv.reader`.

DEFAULT: {}

RETURNS DESCRIPTION
navis.Volume
Source code in navis/core/volumes.py
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
@classmethod
def from_csv(
    cls,
    vertices: str,
    faces: str,
    name: Optional[str] = None,
    color: Union[str, Sequence[Union[int, float]]] = (0.85, 0.85, 0.85, 0.2),
    volume_id: Optional[int] = None,
    **kwargs,
) -> "Volume":
    """Load volume from csv files containing vertices and faces.

    Parameters
    ----------
    vertices :      filepath | file-like
                    CSV file containing vertices.
    faces :         filepath | file-like
                    CSV file containing faces.
    **kwargs
                    Keyword arguments passed to `csv.reader`.

    Returns
    -------
    navis.Volume

    """
    if not os.path.isfile(vertices) or not os.path.isfile(faces):
        raise ValueError("File(s) not found.")

    with open(vertices, "r") as f:
        reader = csv.reader(f, **kwargs)
        vertices = np.array([r for r in reader]).astype(float)

    with open(faces, "r") as f:
        reader = csv.reader(f, **kwargs)
        faces = np.array([r for r in reader]).astype(int)

    return cls(
        faces=faces, vertices=vertices, name=name, color=color, volume_id=volume_id
    )

Load volume from file.

PARAMETER DESCRIPTION
filename
        File to load from.

TYPE: str

import_kwargs
        Keyword arguments passed to importer:
          - `json.load` for JSON file
          - `trimesh.load_mesh` for OBJ and STL files

TYPE: Dict DEFAULT: {}

**init_kwargs
    Keyword arguments passed to navis.Volume upon
    initialization.

DEFAULT: {}

RETURNS DESCRIPTION
navis.Volume
Source code in navis/core/volumes.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
@classmethod
def from_file(
    cls, filename: str, import_kwargs: Dict = {}, **init_kwargs
) -> "Volume":
    """Load volume from file.

    Parameters
    ----------
    filename :      str
                    File to load from.
    import_kwargs
                    Keyword arguments passed to importer:
                      - `json.load` for JSON file
                      - `trimesh.load_mesh` for OBJ and STL files
    **init_kwargs
                Keyword arguments passed to navis.Volume upon
                initialization.

    Returns
    -------
    navis.Volume

    """
    if not os.path.isfile(filename):
        raise ValueError("File not found.")

    f, ext = os.path.splitext(filename)

    if ext == ".json":
        return cls.from_json(
            filename=filename, import_kwargs=import_kwargs, **init_kwargs
        )

    try:
        import trimesh
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            "Unable to import: trimesh missing - please "
            'install: "pip install trimesh"'
        )
    except BaseException:
        raise

    tm = trimesh.load_mesh(filename, **import_kwargs)

    return cls.from_object(tm, **init_kwargs)

Load volume from json file containing vertices and faces.

PARAMETER DESCRIPTION
filename

TYPE: str

import_kwargs
        Keyword arguments passed to `json.load`.

TYPE: Dict DEFAULT: {}

**init_kwargs
    Keyword arguments passed to navis.Volume upon
    initialization.

DEFAULT: {}

RETURNS DESCRIPTION
navis.Volume
Source code in navis/core/volumes.py
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
@classmethod
def from_json(
    cls, filename: str, import_kwargs: Dict = {}, **init_kwargs
) -> "Volume":
    """Load volume from json file containing vertices and faces.

    Parameters
    ----------
    filename
    import_kwargs
                    Keyword arguments passed to `json.load`.
    **init_kwargs
                Keyword arguments passed to navis.Volume upon
                initialization.

    Returns
    -------
    navis.Volume

    """
    if not os.path.isfile(filename):
        raise ValueError("File not found.")

    with open(filename, "r") as f:
        data = json.load(f, **import_kwargs)

    return cls(faces=data["faces"], vertices=data["vertices"], **init_kwargs)

Load volume from generic object that has .vertices and .faces attributes.

PARAMETER DESCRIPTION
obj

TYPE: Any

**init_kwargs
    Keyword arguments passed to navis.Volume upon
    initialization.

DEFAULT: {}

RETURNS DESCRIPTION
navis.Volume
Source code in navis/core/volumes.py
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
@classmethod
def from_object(cls, obj: Any, **init_kwargs) -> "Volume":
    """Load volume from generic object that has `.vertices` and
    `.faces` attributes.

    Parameters
    ----------
    obj
    **init_kwargs
                Keyword arguments passed to navis.Volume upon
                initialization.

    Returns
    -------
    navis.Volume

    """
    if not hasattr(obj, "vertices") or not hasattr(obj, "faces"):
        raise ValueError("Object must have faces and vertices attributes.")

    return cls(faces=obj.faces, vertices=obj.vertices, **init_kwargs)

Plot volume using navis.plot3d.

PARAMETER DESCRIPTION
**kwargs
Keyword arguments. Will be passed to [`navis.plot3d`][].
See `help(navis.plot3d)` for a list of keywords.

DEFAULT: {}

See Also

navis.plot3d Function called to generate 3d plot.

Examples:

>>> import navis
>>> vol = navis.example_volume('LH')
>>> v = vol.plot3d(color = (255, 0, 0))
Source code in navis/core/volumes.py
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
def plot3d(self, **kwargs):
    """Plot volume using [`navis.plot3d`][].

    Parameters
    ----------
    **kwargs
            Keyword arguments. Will be passed to [`navis.plot3d`][].
            See `help(navis.plot3d)` for a list of keywords.

    See Also
    --------
    [`navis.plot3d`][]
                Function called to generate 3d plot.

    Examples
    --------
    >>> import navis
    >>> vol = navis.example_volume('LH')
    >>> v = vol.plot3d(color = (255, 0, 0))

    """
    from .. import plotting

    if "color" in kwargs:
        self.color = kwargs["color"]

    return plotting.plot3d(self, **kwargs)

Resize volume.

PARAMETER DESCRIPTION
x
    Resizing factor. For methods "center", "centroid" and
    "origin" this is the fraction of original size (e.g.
    `.5` for half size). For method "normals", this is
    is the absolute displacement (e.g. `-1000` to shrink
    volume by that many units)!

TYPE: int | float

method
    Point in space to use for resizing.

    .. list-table::
        :widths: 15 75
        :header-rows: 1

        * - method
          - explanation
        * - center
          - average of all vertices
        * - centroid
          - average of the triangle centroids weighted by the
            area of each triangle.
        * - origin
          - resizes relative to origin of coordinate system
            (0, 0, 0)
        * - normals
          - resize using face normals. Note that this method
            uses absolute displacement for parameter `x`.

TYPE: "center" | "centroid" | "normals" | "origin" DEFAULT: 'center'

inplace
    If False, will return resized copy.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
[`navis.Volume`][]

Resized copy of original volume. Only if inplace=False.

None

If inplace=True.

Source code in navis/core/volumes.py
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
def resize(
    self,
    x: Union[float, int],
    method: Union[
        Literal["center"],
        Literal["centroid"],
        Literal["normals"],
        Literal["origin"],
    ] = "center",
    inplace: bool = False,
) -> Optional["Volume"]:
    """Resize volume.

    Parameters
    ----------
    x :         int | float
                Resizing factor. For methods "center", "centroid" and
                "origin" this is the fraction of original size (e.g.
                `.5` for half size). For method "normals", this is
                is the absolute displacement (e.g. `-1000` to shrink
                volume by that many units)!
    method :    "center" | "centroid" | "normals" | "origin"
                Point in space to use for resizing.

                .. list-table::
                    :widths: 15 75
                    :header-rows: 1

                    * - method
                      - explanation
                    * - center
                      - average of all vertices
                    * - centroid
                      - average of the triangle centroids weighted by the
                        area of each triangle.
                    * - origin
                      - resizes relative to origin of coordinate system
                        (0, 0, 0)
                    * - normals
                      - resize using face normals. Note that this method
                        uses absolute displacement for parameter `x`.

    inplace :   bool, optional
                If False, will return resized copy.

    Returns
    -------
    [`navis.Volume`][]
                Resized copy of original volume. Only if `inplace=False`.
    None
                If `inplace=True`.

    """
    assert isinstance(method, str)

    method = method.lower()

    perm_methods = ["center", "origin", "normals", "centroid"]
    if method not in perm_methods:
        raise ValueError(
            f'Unknown method "{method}". Allowed '
            f'methods: {", ".join(perm_methods)}'
        )

    if not inplace:
        v = self.copy()
    else:
        v = self

    if method == "normals":
        v.vertices = v.vertices + (v.vertex_normals * x)
    else:
        # Get the center
        if method == "center":
            cn = np.mean(v.vertices, axis=0)
        elif method == "centroid":
            cn = v.centroid
        elif method == "origin":
            cn = np.array([0, 0, 0])

        # Get vector from center to each vertex
        vec = v.vertices - cn

        # Multiply vector by resize factor
        vec *= x

        # Recalculate vertex positions
        v.vertices = vec + cn

    # Make sure to reset any pyoctree data on this volume
    if hasattr(v, "pyoctree"):
        delattr(v, "pyoctree")

    if not inplace:
        return v

See .plot3d.

Source code in navis/core/volumes.py
550
551
552
553
def show(self, **kwargs):
    """See `.plot3d`."""
    # This is mostly to override trimesh.Trimesh method
    return self.plot3d(**kwargs)

Compute the 2d alpha shape (concave hull) this volume.

Uses Scipy Delaunay and shapely.

PARAMETER DESCRIPTION
alpha
    Alpha value to influence the gooeyness of the border.
    Smaller numbers don't fall inward as much as larger
    numbers. Too large, and you lose everything!

TYPE: float DEFAULT: 0.00017

view
    Determines axis.

TYPE: tuple DEFAULT: ('x', 'y')

RETURNS DESCRIPTION
list

Coordinates of 2d circumference e.g. [(x1, y1), (x2, y2), (x3, y3), ...]

Source code in navis/core/volumes.py
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
def to_2d(
    self, alpha: float = 0.00017, view: tuple = ("x", "y"), invert_y: bool = False
) -> Sequence[Union[float, int]]:
    """Compute the 2d alpha shape (concave hull) this volume.

    Uses Scipy Delaunay and shapely.

    Parameters
    ----------
    alpha:      float, optional
                Alpha value to influence the gooeyness of the border.
                Smaller numbers don't fall inward as much as larger
                numbers. Too large, and you lose everything!
    view :      tuple
                Determines axis.

    Returns
    -------
    list
                Coordinates of 2d circumference
                e.g. `[(x1, y1), (x2, y2), (x3, y3), ...]`

    """

    def add_edge(edges, edge_points, coords, i, j):
        """Add line between the i-th and j-th points."""
        if (i, j) in edges or (j, i) in edges:
            # already added
            return
        edges.add((i, j))
        edge_points.append(coords[[i, j]])

    accepted_views = ["x", "z", "y", "-x", "-z", "-y"]

    for ax in view:
        if ax not in accepted_views:
            raise ValueError(f'Unable to parse "{ax}" view')

    try:
        from shapely.ops import unary_union, polygonize  # type: ignore
        import shapely.geometry as geometry  # type: ignore
    except ModuleNotFoundError:
        raise ModuleNotFoundError("This function needs the shapely>=1.8.0")

    coords: np.ndarray

    map = {"x": 0, "y": 1, "z": 2}

    x_ix = map[view[0].replace("-", "").replace("+", "")]
    y_ix = map[view[1].replace("-", "").replace("+", "")]

    coords = self.vertices[:, [x_ix, y_ix]]

    tri = scipy.spatial.Delaunay(coords)
    edges: set = set()
    edge_points: list = []
    # loop over triangles:
    # ia, ib, ic = indices of corner points of the triangle
    # Note that "vertices" property was renamed to "simplices"
    for ia, ib, ic in getattr(tri, "simplices", getattr(tri, "vertices", [])):
        pa: np.ndarray = coords[ia]  # type: ignore
        pb: np.ndarray = coords[ib]  # type: ignore
        pc: np.ndarray = coords[ic]  # type: ignore
        # Lengths of sides of triangle
        a = math.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)  # type: ignore
        b = math.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)  # type: ignore
        c = math.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)  # type: ignore
        # Semiperimeter of triangle
        s = (a + b + c) / 2.0
        # Area of triangle by Heron's formula
        area = math.sqrt(s * (s - a) * (s - b) * (s - c))
        circum_r = a * b * c / (4.0 * area)
        # Here's the radius filter.
        if circum_r < 1.0 / alpha:
            add_edge(edges, edge_points, coords, ia, ib)
            add_edge(edges, edge_points, coords, ib, ic)
            add_edge(edges, edge_points, coords, ic, ia)

    m = geometry.MultiLineString(edge_points)
    triangles = list(polygonize(m))
    concave_hull = unary_union(triangles)

    # Try with current settings, if this is not successful, try again
    # with lower alpha
    try:
        return list(concave_hull.exterior.coords)
    except AttributeError:
        return self.to_2d(alpha=alpha / 10, view=view, invert_y=invert_y)
    except BaseException:
        raise

Save volume as two separated csv files containing vertices and faces.

PARAMETER DESCRIPTION
filename
        Filename to use. Will get a `_vertices.csv` and
        `_faces.csv` suffix.

TYPE: str

**kwargs
        Keyword arguments passed to `csv.reader`.

DEFAULT: {}

Source code in navis/core/volumes.py
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
def to_csv(self, filename: str, **kwargs) -> None:
    """Save volume as two separated csv files containing vertices and faces.

    Parameters
    ----------
    filename :      str
                    Filename to use. Will get a `_vertices.csv` and
                    `_faces.csv` suffix.
    **kwargs
                    Keyword arguments passed to `csv.reader`.

    """
    for data, suffix in zip(
        [self.faces, self.vertices], ["_faces.csv", "_vertices.csv"]
    ):
        with open(filename + suffix, "w") as csvfile:
            writer = csv.writer(csvfile)
            writer.writerows(data)

Save volume as json file.

PARAMETER DESCRIPTION
filename
        Filename to use.

TYPE: str

Source code in navis/core/volumes.py
280
281
282
283
284
285
286
287
288
289
290
291
292
def to_json(self, filename: str) -> None:
    """Save volume as json file.

    Parameters
    ----------
    filename :      str
                    Filename to use.

    """
    with open(filename, "w") as f:
        json.dump(
            {"vertices": self.vertices.tolist(), "faces": self.faces.tolist()}, f
        )

Use trimesh to try and fix issues (holes/normals).

Source code in navis/core/volumes.py
673
674
675
676
677
678
679
680
681
682
683
684
def validate(self):
    """Use trimesh to try and fix issues (holes/normals)."""
    if not self.is_volume:
        logger.info("Mesh not valid, attempting to fix")
        self.fill_holes()
        self.fix_normals()
        if not self.is_volume:
            raise utils.VolumeError(
                "Mesh is not a volume "
                "(e.g. not watertight, incorrect "
                "winding) and could not be fixed."
            )

Neuron represented as voxels.

PARAMETER DESCRIPTION
x
        Data to construct neuron from:
         - a 2D (N, 3) array of voxel positions (x, y, z)
         - a 2D (N, 4) array of voxel positions + values (x, y, z, value)
         - a 3D (N, M, J) array representing the voxel grid

TYPE: Union[np.ndarray]

offset
        An (optional) offset in voxels. This is useful to keep the
        voxel grid small while still maintaining correct positioning
        e.g. for plotting.

TYPE: (3, ) array DEFAULT: None

cache
        Whether to cache different representations (i.e. grid
        and voxels) of the data. Set to False to save some memory.

TYPE: bool DEFAULT: True

units
        Units (scales) for voxels. Defaults to `1` (dimensionless).
        Strings must be parsable by pint: e.g. "nm", "um",
        "micrometer" or "8 nanometers".

TYPE: str | pint.Units | pint.Quantity DEFAULT: None

**metadata
        Any additional data to attach to neuron.

DEFAULT: {}

Source code in navis/core/voxel.py
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
class VoxelNeuron(BaseNeuron):
    """Neuron represented as voxels.

    Parameters
    ----------
    x
                    Data to construct neuron from:
                     - a 2D (N, 3) array of voxel positions (x, y, z)
                     - a 2D (N, 4) array of voxel positions + values (x, y, z, value)
                     - a 3D (N, M, J) array representing the voxel grid

    offset :        (3, ) array, optional
                    An (optional) offset in voxels. This is useful to keep the
                    voxel grid small while still maintaining correct positioning
                    e.g. for plotting.
    cache :         bool
                    Whether to cache different representations (i.e. grid
                    and voxels) of the data. Set to False to save some memory.
    units :         str | pint.Units | pint.Quantity
                    Units (scales) for voxels. Defaults to `1` (dimensionless).
                    Strings must be parsable by pint: e.g. "nm", "um",
                    "micrometer" or "8 nanometers".
    **metadata
                    Any additional data to attach to neuron.

    """

    connectors: Optional[pd.DataFrame]

    #: (N, 3) array of x/y/z voxels locations
    voxels: np.ndarray
    #: (N, ) array of values for each voxel
    values: np.ndarray
    # (N, M, K) voxel grid
    grid: np.ndarray
    # shape of voxel grid
    shape: tuple

    soma: Optional[Union[list, np.ndarray]]

    #: Attributes used for neuron summary
    SUMMARY_PROPS = ['type', 'name', 'units', 'shape', 'dtype']

    #: Attributes to be used when comparing two neurons.
    EQ_ATTRIBUTES = ['name', 'shape', 'dtype']

    #: Temporary attributes that need clearing when neuron data changes
    TEMP_ATTR = ['_memory_usage', '_shape', '_voxels', '_grid']

    #: Core data table(s) used to calculate hash
    CORE_DATA = ['_data']

    def __init__(self,
                 x: Union[np.ndarray],
                 offset: Optional[np.ndarray] = None,
                 cache: bool = True,
                 units: Union[pint.Unit, str] = None,
                 **metadata
                 ):
        """Initialize Voxel Neuron."""
        super().__init__()

        if not isinstance(x, (np.ndarray, type(None))):
            raise utils.ConstructionError(f'Unable to construct VoxelNeuron from "{type(x)}".')

        if isinstance(x, np.ndarray):
            if x.ndim == 2 and x.shape[1] in [3, 4]:
                # Contiguous arrays are required for hashing and we save a lot
                # of time by doing this once up-front
                self._data = np.ascontiguousarray(x)
            elif x.ndim == 3:
                self._data = np.ascontiguousarray(x)
            else:
                raise utils.ConstructionError(f'Unable to construct VoxelNeuron from {x.shape} array.')

        for k, v in metadata.items():
            try:
                setattr(self, k, v)
            except AttributeError:
                raise AttributeError(f"Unable to set neuron's `{k}` attribute.")

        self.cache = cache
        self.units = units
        self.offset = offset

    def __getstate__(self):
        """Get state (used e.g. for pickling)."""
        state = {k: v for k, v in self.__dict__.items() if not callable(v)}

        SKIP = []
        for s in SKIP:
            if s in state:
                _ = state.pop(s)

        return state

    def __setstate__(self, d):
        """Update state (used e.g. for pickling)."""
        self.__dict__.update(d)

    def __truediv__(self, other, copy=True):
        """Implement division for coordinates (units, connectors, offset)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self

            # Convert units
            # Note: .to_compact() throws a RuntimeWarning and returns unchanged
            # values  when `units` is a iterable
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                n.units = (n.units / other).to_compact()

            n.offset = n.offset / other
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] /= other

            self._clear_temp_attr()

            return n
        return NotImplemented

    def __mul__(self, other, copy=True):
        """Implement multiplication for coordinates (units, connectors, offset)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self

            # Convert units
            # Note: .to_compact() throws a RuntimeWarning and returns unchanged
            # values  when `units` is a iterable
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                n.units = (n.units * other).to_compact()

            n.offset = n.offset * other
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] *= other

            self._clear_temp_attr()

            return n
        return NotImplemented

    def __add__(self, other, copy=True):
        """Implement addition for coordinates (offset, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self

            n.offset = n.offset + other
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] += other

            self._clear_temp_attr()

            return n
        return NotImplemented

    def __sub__(self, other, copy=True):
        """Implement subtraction for coordinates (offset, connectors)."""
        if isinstance(other, numbers.Number) or utils.is_iterable(other):
            # If a number, consider this an offset for coordinates
            n = self.copy() if copy else self

            n.offset = n.offset - other
            if n.has_connectors:
                n.connectors.loc[:, ['x', 'y', 'z']] -= other

            self._clear_temp_attr()

            return n
        return NotImplemented

    @property
    def _base_data_type(self) -> str:
        """Type of data (grid or voxels) underlying this neuron."""
        if self._data.ndim == 3:
            return 'grid'
        else:
            return 'voxels'

    @property
    def dtype(self) -> type:
        """Data type of voxel values."""
        return self._data.dtype

    @property
    def bbox(self) -> np.ndarray:
        """Bounding box (includes connectors) in units."""
        mn = self.offset
        if self._base_data_type == 'voxels':
            mx = np.max(self.voxels, axis=0) * self.units.magnitude + self.offset
        else:
            mx = np.array(self.grid.shape) * self.units.magnitude + self.offset

        if self.has_connectors:
            cn_mn = np.min(self.connectors[['x', 'y', 'z']].values, axis=0)
            cn_mx = np.max(self.connectors[['x', 'y', 'z']].values, axis=0)

            mn = np.min(np.vstack((mn, cn_mn)), axis=0)
            mx = np.max(np.vstack((mx, cn_mx)), axis=0)

        return np.vstack((mn, mx)).T

    @property
    @add_units(compact=True, power=3)
    def volume(self) -> float:
        """Volume of neuron."""
        # Get volume of a single voxel
        voxel_volume = self.units_xyz[0] * self.units_xyz[2] * self.units_xyz[2]
        return (self.nnz * voxel_volume).to_compact()

    @property
    @temp_property
    def voxels(self):
        """Voxels making up the neuron."""
        if self._base_data_type == 'voxels':
            return self._data[:, :3]

        if hasattr(self, '_voxels'):
            return self._voxels

        voxels = np.dstack(np.where(self._data))[0]
        if self.cache:
            self._voxels = voxels
        return voxels

    @voxels.setter
    def voxels(self, voxels):
        if not isinstance(voxels, np.ndarray):
            raise TypeError(f'Voxels must be numpy array, got "{type(voxels)}"')
        if voxels.ndim != 2 or voxels.shape[1] != 3:
            raise ValueError('Voxels must be (N, 3) array')
        if 'float' in str(voxels.dtype):
            voxels = voxels.astype(np.int64)
        self._data = voxels
        self._clear_temp_attr()

    @property
    @temp_property
    def grid(self):
        """Voxel grid representation."""
        if self._base_data_type == 'grid':
            return self._data

        if hasattr(self, '_grid'):
            return self._grid

        grid = np.zeros(self.shape, dtype=self.values.dtype)
        grid[self._data[:, 0],
             self._data[:, 1],
             self._data[:, 2]] = self.values

        if self.cache:
            self._grid = grid
        return grid

    @grid.setter
    def grid(self, grid):
        if not isinstance(grid, np.ndarray):
            raise TypeError(f'Grid must be numpy array, got "{type(grid)}"')
        if grid.ndim != 3:
            raise ValueError('Grid must be 3D array')
        self._data = grid
        self._clear_temp_attr()

    @property
    @temp_property
    def values(self):
        """Values for each voxel (can be None)."""
        if self._base_data_type == 'grid':
            values = self._data.flatten()
            return values[values > 0]
        else:
            if not isinstance(getattr(self, '_values', None), type(None)):
                return self._values
            else:
                return np.ones(self._data.shape[0])

    @values.setter
    def values(self, values):
        if self._base_data_type == 'grid':
            raise ValueError('Unable to set values for VoxelNeurons that were '
                             'initialized with a grid')

        if isinstance(values, type(None)):
            if hasattr(self, '_values'):
                delattr(self, '_values')
            return

        if not isinstance(values, np.ndarray):
            raise TypeError(f'Values must be numpy array, got "{type(values)}"')
        elif values.ndim != 1 or values.shape[0] != self.voxels.shape[0]:
            raise ValueError('Voxels must be (N, ) array of the same length as voxels')

        self._values = values
        self._clear_temp_attr()

    @property
    def offset(self) -> np.ndarray:
        """Offset (in voxels)."""
        return self._offset

    @offset.setter
    def offset(self, offset):
        if isinstance(offset, type(None)):
            self._offset = np.array((0, 0, 0))
        else:
            offset = np.asarray(offset)
            if offset.ndim != 1 or offset.shape[0] != 3:
                raise ValueError('Offset must be (3, ) array of x/y/z coordinates.')
            self._offset = offset

        self._clear_temp_attr()

    @property
    @temp_property
    def shape(self):
        """Shape of voxel grid."""
        if not hasattr(self, '_shape'):
            if self._base_data_type == 'voxels':
                self._shape = tuple(self.voxels.max(axis=0) + 1)
            else:
                self._shape = self._data.shape
        return self._shape

    @property
    def type(self) -> str:
        """Neuron type."""
        return 'navis.VoxelNeuron'

    @property
    def density(self) -> float:
        """Fraction of filled voxels."""
        return self.nnz / np.product(self.shape)

    @property
    def nnz(self) -> int:
        """Number of non-zero voxels."""
        return self.count_nonzero()

    def count_nonzero(self) -> int:
        """Count non-zero voxels."""
        if self._base_data_type == "grid":
            return np.count_nonzero(self.grid)
        elif self._base_data_type == "voxels":
            return np.count_nonzero(self.values)

        raise TypeError(f"Unexpected data type: {self._base_data_type}")

    def copy(self) -> 'VoxelNeuron':
        """Return a copy of the neuron."""
        no_copy = ['_lock']

        # Generate new neuron
        x = self.__class__(None)
        # Override with this neuron's data
        x.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy})

        return x

    def strip(self, inplace=False) -> 'VoxelNeuron':
        """Strip empty voxels (leading/trailing planes of zeros)."""
        x = self
        if not inplace:
            x = x.copy()

        # Get offset until first filled voxel
        voxels = x.voxels
        mn = voxels.min(axis=0)
        x.offset = np.array(x.offset) + mn * x.units_xyz.magnitude

        # Drop empty planes
        if x._base_data_type == 'voxels':
            x._data = voxels - mn
        else:
            mx = voxels.max(axis=0)
            x._data = x._data[mn[0]: mx[0] + 1,
                              mn[1]: mx[1] + 1,
                              mn[2]: mx[2] + 1]

        if not inplace:
            return x

    def threshold(self, threshold, inplace=False) -> 'VoxelNeuron':
        """Drop below-threshold voxels."""
        x = self
        if not inplace:
            x = x.copy()

        if x._base_data_type == 'grid':
            x._data[x._data < threshold] = 0
        else:
            x._data = x._data[x.values >= threshold]

        x._clear_temp_attr()

        if not inplace:
            return x

    def min(self) -> Union[int, float]:
        """Minimum value (excludes zeros)."""
        return self.values.min()

    def max(self) -> Union[int, float]:
        """Maximum value (excludes zeros)."""
        return self.values.max()

Bounding box (includes connectors) in units.

Fraction of filled voxels.

Data type of voxel values.

Voxel grid representation.

Number of non-zero voxels.

Offset (in voxels).

Shape of voxel grid.

Neuron type.

Values for each voxel (can be None).

Volume of neuron.

Voxels making up the neuron.

Initialize Voxel Neuron.

Source code in navis/core/voxel.py
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
def __init__(self,
             x: Union[np.ndarray],
             offset: Optional[np.ndarray] = None,
             cache: bool = True,
             units: Union[pint.Unit, str] = None,
             **metadata
             ):
    """Initialize Voxel Neuron."""
    super().__init__()

    if not isinstance(x, (np.ndarray, type(None))):
        raise utils.ConstructionError(f'Unable to construct VoxelNeuron from "{type(x)}".')

    if isinstance(x, np.ndarray):
        if x.ndim == 2 and x.shape[1] in [3, 4]:
            # Contiguous arrays are required for hashing and we save a lot
            # of time by doing this once up-front
            self._data = np.ascontiguousarray(x)
        elif x.ndim == 3:
            self._data = np.ascontiguousarray(x)
        else:
            raise utils.ConstructionError(f'Unable to construct VoxelNeuron from {x.shape} array.')

    for k, v in metadata.items():
        try:
            setattr(self, k, v)
        except AttributeError:
            raise AttributeError(f"Unable to set neuron's `{k}` attribute.")

    self.cache = cache
    self.units = units
    self.offset = offset

Return a copy of the neuron.

Source code in navis/core/voxel.py
396
397
398
399
400
401
402
403
404
405
def copy(self) -> 'VoxelNeuron':
    """Return a copy of the neuron."""
    no_copy = ['_lock']

    # Generate new neuron
    x = self.__class__(None)
    # Override with this neuron's data
    x.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy})

    return x

Count non-zero voxels.

Source code in navis/core/voxel.py
387
388
389
390
391
392
393
394
def count_nonzero(self) -> int:
    """Count non-zero voxels."""
    if self._base_data_type == "grid":
        return np.count_nonzero(self.grid)
    elif self._base_data_type == "voxels":
        return np.count_nonzero(self.values)

    raise TypeError(f"Unexpected data type: {self._base_data_type}")

Maximum value (excludes zeros).

Source code in navis/core/voxel.py
450
451
452
def max(self) -> Union[int, float]:
    """Maximum value (excludes zeros)."""
    return self.values.max()

Minimum value (excludes zeros).

Source code in navis/core/voxel.py
446
447
448
def min(self) -> Union[int, float]:
    """Minimum value (excludes zeros)."""
    return self.values.min()

Strip empty voxels (leading/trailing planes of zeros).

Source code in navis/core/voxel.py
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
def strip(self, inplace=False) -> 'VoxelNeuron':
    """Strip empty voxels (leading/trailing planes of zeros)."""
    x = self
    if not inplace:
        x = x.copy()

    # Get offset until first filled voxel
    voxels = x.voxels
    mn = voxels.min(axis=0)
    x.offset = np.array(x.offset) + mn * x.units_xyz.magnitude

    # Drop empty planes
    if x._base_data_type == 'voxels':
        x._data = voxels - mn
    else:
        mx = voxels.max(axis=0)
        x._data = x._data[mn[0]: mx[0] + 1,
                          mn[1]: mx[1] + 1,
                          mn[2]: mx[2] + 1]

    if not inplace:
        return x

Drop below-threshold voxels.

Source code in navis/core/voxel.py
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
def threshold(self, threshold, inplace=False) -> 'VoxelNeuron':
    """Drop below-threshold voxels."""
    x = self
    if not inplace:
        x = x.copy()

    if x._base_data_type == 'grid':
        x._data[x._data < threshold] = 0
    else:
        x._data = x._data[x.values >= threshold]

    x._clear_temp_attr()

    if not inplace:
        return x

Per arbor seggregation index (SI).

The segregation index (SI) as established by Schneider-Mizell et al. (eLife, 2016) is a measure for how polarized a neuron is. SI of 1 indicates total segregation of inputs and outputs into dendrites and axon, respectively. SI of 0 indicates homogeneous distribution. Here, we apply this to each arbour within a neuron by asking "If we were to cut a neuron at this node, what would the SI of the two resulting fragments be?"

PARAMETER DESCRIPTION
x
    Neuron(s) to calculate segregation indices for. Must have
    connectors!

TYPE: TreeNeuron | MeshNeuron | NeuronList

RETURNS DESCRIPTION
neuron

Adds "segregation_index" as column in the node table (for TreeNeurons) or as .segregation_index property (for MeshNeurons).

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> n.reroot(n.soma, inplace=True)
>>> _ = navis.arbor_segregation_index(n)
>>> n.nodes.segregation_index.max().round(3)
0.277
See Also

navis.segregation_index Calculate segregation score (polarity) between two fragments of a neuron. navis.synapse_flow_centrality Calculate synapse flow centrality after Schneider-Mizell et al. navis.bending_flow Variation on the Schneider-Mizell et al. synapse flow. navis.split_axon_dendrite Split the neuron into axon, dendrite and primary neurite.

Source code in navis/morpho/mmetrics.py
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
@utils.map_neuronlist(desc="Calc. seg.", allow_parallel=True)
@utils.meshneuron_skeleton(method="node_properties", node_props=["segregation_index"])
def arbor_segregation_index(x: "core.NeuronObject") -> "core.NeuronObject":
    """Per arbor seggregation index (SI).

    The segregation index (SI) as established by Schneider-Mizell et al. (eLife,
    2016) is a measure for how polarized a neuron is. SI of 1 indicates total
    segregation of inputs and outputs into dendrites and axon, respectively.
    SI of 0 indicates homogeneous distribution. Here, we apply this to
    each arbour within a neuron by asking "If we were to cut a neuron at this
    node, what would the SI of the two resulting fragments be?"

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | NeuronList
                Neuron(s) to calculate segregation indices for. Must have
                connectors!

    Returns
    -------
    neuron
                Adds "segregation_index" as column in the node table (for
                TreeNeurons) or as `.segregation_index` property
                (for MeshNeurons).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n.reroot(n.soma, inplace=True)
    >>> _ = navis.arbor_segregation_index(n)
    >>> n.nodes.segregation_index.max().round(3)
    0.277

    See Also
    --------
    [`navis.segregation_index`][]
            Calculate segregation score (polarity) between two fragments of
            a neuron.
    [`navis.synapse_flow_centrality`][]
            Calculate synapse flow centrality after Schneider-Mizell et al.
    [`navis.bending_flow`][]
            Variation on the Schneider-Mizell et al. synapse flow.
    [`navis.split_axon_dendrite`][]
            Split the neuron into axon, dendrite and primary neurite.

    """
    if not isinstance(x, core.TreeNeuron):
        raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"')

    if not x.has_connectors:
        raise ValueError("Neuron must have connectors.")

    # Figure out how connector types are labeled
    cn_types = x.connectors.type.unique()
    if all(np.isin(["pre", "post"], cn_types)):
        pre, post = "pre", "post"
    elif all(np.isin([0, 1], cn_types)):
        pre, post = 0, 1
    else:
        raise ValueError(f"Unable to parse connector types for neuron {x.id}")

    # Get list of nodes with pre/postsynapses
    pre_node_ids = x.connectors[x.connectors.type == pre].node_id.values
    post_node_ids = x.connectors[x.connectors.type == post].node_id.values

    # Get list of points to calculate SI for:
    # branches points and their children plus nodes with connectors
    is_bp = x.nodes["type"].isin(["branch", "root"])
    is_bp_child = x.nodes.parent_id.isin(x.nodes.loc[is_bp, "node_id"].values)
    is_cn = x.nodes.node_id.isin(x.connectors.node_id)
    calc_node_ids = x.nodes[is_bp | is_bp_child | is_cn].node_id.values

    # We will be processing a super downsampled version of the neuron to speed
    # up calculations
    current_level = logger.level
    logger.setLevel("ERROR")
    y = x.downsample(factor=float("inf"), preserve_nodes=calc_node_ids, inplace=False)
    logger.setLevel(current_level)

    # Get number of pre/postsynapses distal to each branch's childs
    # Note that we're using geodesic matrix here because it is much more
    # efficient than for `distal_to` for larger queries/neurons
    dists = graph.geodesic_matrix(
        y, from_=np.append(pre_node_ids, post_node_ids), directed=True, weight=None
    )
    distal = dists[calc_node_ids] < np.inf

    # Since nodes can have multiple pre-/postsynapses but they show up only
    # once in distal, we have to reindex to reflect the correct number of synapes
    distal_pre = distal.loc[pre_node_ids]
    distal_post = distal.loc[post_node_ids]

    # Sum up columns: now each row represents the number of pre/postsynapses
    # distal to that node
    distal_pre_sum = distal_pre.sum(axis=0)
    distal_post_sum = distal_post.sum(axis=0)

    # Now go over all branch points and check flow between branches
    # (centrifugal) vs flow from branches to root (centripetal)
    SI = {}
    total_pre = pre_node_ids.shape[0]
    total_post = post_node_ids.shape[0]
    for n in calc_node_ids:
        # Get the SI if we were to cut at this point
        post = distal_post_sum[n]
        pre = distal_pre_sum[n]
        n_syn = [
            {"presynapses": pre, "postsynapses": post},
            {"presynapses": total_pre - pre, "postsynapses": total_post - post},
        ]
        SI[n] = segregation_index(n_syn)

    # At this point there are only segregation indices for branch points and
    # their childs. Let's complete that mapping by adding SI for the nodes
    # between branch points.
    for s in x.small_segments:
        # Segments' orientation goes from distal -> proximal
        # Each segment will have at least its last (branch point) and
        # second last (branch point's child) node mapped

        # Drop first (distal) node if it is not a leaf
        if s[0] in SI:
            s = s[1:]

        # If shorter than 3 nodes all nodes should already have an SI
        if len(s) <= 2:
            continue

        # Update remaining nodes with the SI of the first child
        this_SI = SI[s[-2]]
        SI.update({n: this_SI for n in s[:-2]})

    # Add segregation index to node table
    x.nodes["segregation_index"] = x.nodes.node_id.map(SI)

    return x

Compute an average from a list of skeletons.

This is a very simple implementation which may give odd results if used on complex neurons. Works fine on e.g. backbones or tracts.

PARAMETER DESCRIPTION
x
        Neurons to be averaged.

TYPE: NeuronList

limit
        Max distance for nearest neighbour search. If the neurons
        have `.units` set, you can also pass a string such as e.g.
        "2 microns".

TYPE: int | str DEFAULT: 10

base_neuron
        Neuron to use as template for averaging. If not provided,
        the first neuron in the list is used as template!

TYPE: neuron id | TreeNeuron DEFAULT: None

RETURNS DESCRIPTION
TreeNeuron

Examples:

>>> # Get a bunch of neurons
>>> import navis
>>> da2 = navis.example_neurons()
>>> # Prune down to longest neurite
>>> for n in da2:
...     if n.has_soma:
...         n.reroot(n.soma, inplace=True)
>>> da2_pr = da2.prune_by_longest_neurite(inplace=False)
>>> # Make average
>>> da2_avg = navis.average_skeletons(da2_pr, limit=10e3)
>>> # Plot
>>> da2.plot3d()
>>> da2_avg.plot3d()
Source code in navis/morpho/manipulation.py
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
def average_skeletons(
    x: "core.NeuronList",
    limit: Union[int, str] = 10,
    base_neuron: Optional[Union[int, "core.TreeNeuron"]] = None,
) -> "core.TreeNeuron":
    """Compute an average from a list of skeletons.

    This is a very simple implementation which may give odd results if used
    on complex neurons. Works fine on e.g. backbones or tracts.

    Parameters
    ----------
    x :             NeuronList
                    Neurons to be averaged.
    limit :         int | str
                    Max distance for nearest neighbour search. If the neurons
                    have `.units` set, you can also pass a string such as e.g.
                    "2 microns".
    base_neuron :   neuron id | TreeNeuron, optional
                    Neuron to use as template for averaging. If not provided,
                    the first neuron in the list is used as template!

    Returns
    -------
    TreeNeuron

    Examples
    --------
    >>> # Get a bunch of neurons
    >>> import navis
    >>> da2 = navis.example_neurons()
    >>> # Prune down to longest neurite
    >>> for n in da2:
    ...     if n.has_soma:
    ...         n.reroot(n.soma, inplace=True)
    >>> da2_pr = da2.prune_by_longest_neurite(inplace=False)
    >>> # Make average
    >>> da2_avg = navis.average_skeletons(da2_pr, limit=10e3)
    >>> # Plot
    >>> da2.plot3d() # doctest: +SKIP
    >>> da2_avg.plot3d() # doctest: +SKIP

    """
    if not isinstance(x, core.NeuronList):
        raise TypeError(f'Need NeuronList, got "{type(x)}"')

    if len(x) < 2:
        raise ValueError("Need at least 2 neurons to average!")

    # Map limit into unit space, if applicable
    limit = x[0].map_units(limit, on_error="raise")

    # Generate KDTrees for each neuron
    for n in x:
        n.tree = graph.neuron2KDTree(n, tree_type="c", data="nodes")  # type: ignore  # TreeNeuron has no tree

    # Set base for average: we will use this neurons nodes to query
    # the KDTrees
    if isinstance(base_neuron, core.TreeNeuron):
        bn = base_neuron.copy()
    elif isinstance(base_neuron, int):
        bn = x[base_neuron].copy()
    elif isinstance(base_neuron, type(None)):
        bn = x[0].copy()
    else:
        raise ValueError(
            f'Unable to interpret base_neuron of type "{type(base_neuron)}"'
        )

    base_nodes = bn.nodes[["x", "y", "z"]].values
    other_neurons = x[[n != bn for n in x]]

    # Make sure these stay 2-dimensional arrays -> will add a colum for each
    # "other" neuron
    base_x = base_nodes[:, 0:1]
    base_y = base_nodes[:, 1:2]
    base_z = base_nodes[:, 2:3]

    # For each "other" neuron, collect nearest neighbour coordinates
    for n in other_neurons:
        nn_dist, nn_ix = n.tree.query(base_nodes, k=1, distance_upper_bound=limit)

        # Translate indices into coordinates
        # First, make empty array
        this_coords = np.zeros((len(nn_dist), 3))
        # Set coords without a nearest neighbour within distances to "None"
        this_coords[nn_dist == float("inf")] = None
        # Fill in coords of nearest neighbours
        this_coords[nn_dist != float("inf")] = n.tree.data[
            nn_ix[nn_dist != float("inf")]
        ]
        # Add coords to base coords
        base_x = np.append(base_x, this_coords[:, 0:1], axis=1)
        base_y = np.append(base_y, this_coords[:, 1:2], axis=1)
        base_z = np.append(base_z, this_coords[:, 2:3], axis=1)

    # Calculate means
    mean_x = np.mean(base_x, axis=1)
    mean_y = np.mean(base_y, axis=1)
    mean_z = np.mean(base_z, axis=1)

    # If any of the base coords has NO nearest neighbour within limit
    # whatsoever, the average of that row will be "NaN" -> in this case we
    # will fall back to the base coordinate
    mean_x[np.isnan(mean_x)] = base_nodes[np.isnan(mean_x), 0]
    mean_y[np.isnan(mean_y)] = base_nodes[np.isnan(mean_y), 1]
    mean_z[np.isnan(mean_z)] = base_nodes[np.isnan(mean_z), 2]

    # Change coordinates accordingly
    bn.nodes["x"] = mean_x
    bn.nodes["y"] = mean_y
    bn.nodes["z"] = mean_z

    return bn

Calculate synapse "bending" flow.

This is a variation of the algorithm for calculating synapse flow from Schneider-Mizell et al. (eLife, 2016).

The way this implementation works is by iterating over each branch point and counting the number of pre->post synapse paths that "flow" from one child branch to the other(s).

Notes

This is algorithm appears to be more reliable than synapse flow centrality for identifying the main branch point for neurons that have incompletely annotated synapses.

PARAMETER DESCRIPTION
x
    Neuron(s) to calculate bending flow for. Must have connectors!

TYPE: TreeNeuron | MeshNeuron | NeuronList

RETURNS DESCRIPTION
neuron

Adds "bending_flow" as column in the node table (for TreeNeurons) or as .bending_flow property (for MeshNeurons).

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> n.reroot(n.soma, inplace=True)
>>> _ = navis.bending_flow(n)
>>> n.nodes.bending_flow.max()
785645
See Also

navis.synapse_flow_centrality Calculate synapse flow centrality after Schneider-Mizell et al. navis.segregation_index Calculate segregation score (polarity). navis.arbor_segregation_index Calculate the a by-arbor segregation index. navis.split_axon_dendrite Split the neuron into axon, dendrite and primary neurite.

Source code in navis/morpho/mmetrics.py
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
@utils.map_neuronlist(desc="Calc. flow", allow_parallel=True)
@utils.meshneuron_skeleton(
    method="node_properties",
    include_connectors=True,
    heal=True,
    node_props=["bending_flow"],
)
def bending_flow(x: "core.NeuronObject") -> "core.NeuronObject":
    """Calculate synapse "bending" flow.

    This is a variation of the algorithm for calculating synapse flow from
    Schneider-Mizell et al. (eLife, 2016).

    The way this implementation works is by iterating over each branch point
    and counting the number of pre->post synapse paths that "flow" from one
    child branch to the other(s).

    Notes
    -----
    This is algorithm appears to be more reliable than synapse flow
    centrality for identifying the main branch point for neurons that have
    incompletely annotated synapses.

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | NeuronList
                Neuron(s) to calculate bending flow for. Must have connectors!

    Returns
    -------
    neuron
                Adds "bending_flow" as column in the node table (for
                TreeNeurons) or as `.bending_flow` property
                (for MeshNeurons).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n.reroot(n.soma, inplace=True)
    >>> _ = navis.bending_flow(n)
    >>> n.nodes.bending_flow.max()
    785645

    See Also
    --------
    [`navis.synapse_flow_centrality`][]
            Calculate synapse flow centrality after Schneider-Mizell et al.
    [`navis.segregation_index`][]
            Calculate segregation score (polarity).
    [`navis.arbor_segregation_index`][]
            Calculate the a by-arbor segregation index.
    [`navis.split_axon_dendrite`][]
            Split the neuron into axon, dendrite and primary neurite.

    """
    if not isinstance(x, core.TreeNeuron):
        raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"')

    if not x.has_connectors:
        raise ValueError("Neuron must have connectors.")

    if np.any(x.soma) and not np.all(np.isin(x.soma, x.root)):
        logger.warning(f"Neuron {x.id} is not rooted to its soma!")

    # We will be processing a super downsampled version of the neuron to speed
    # up calculations
    current_level = logger.level
    logger.setLevel("ERROR")
    y = x.downsample(factor=float("inf"), preserve_nodes="connectors", inplace=False)
    logger.setLevel(current_level)

    # Figure out how connector types are labeled
    cn_types = y.connectors.type.unique()
    if all(np.isin(["pre", "post"], cn_types)):
        pre, post = "pre", "post"
    elif all(np.isin([0, 1], cn_types)):
        pre, post = 0, 1
    else:
        raise ValueError(f"Unable to parse connector types for neuron {y.id}")

    # Get list of nodes with pre/postsynapses
    pre_node_ids = y.connectors[y.connectors.type == pre].node_id.values
    post_node_ids = y.connectors[y.connectors.type == post].node_id.values

    # Get list of branch_points
    bp_node_ids = y.nodes[y.nodes.type == "branch"].node_id.values.tolist()
    # Add root if it is also a branch point
    for root in y.root:
        if y.graph.degree(root) > 1:
            bp_node_ids += [root]

    # Get a list of childs of each branch point
    bp_childs = {t: [e[0] for e in y.graph.in_edges(t)] for t in bp_node_ids}
    childs = [tn for l in bp_childs.values() for tn in l]

    # Get number of pre/postsynapses distal to each branch's childs
    # Note that we're using geodesic matrix here because it is much more
    # efficient than for `distal_to` for larger queries/neurons
    dists = graph.geodesic_matrix(
        y, from_=np.append(pre_node_ids, post_node_ids), directed=True, weight=None
    )
    distal = dists[childs] < np.inf

    # Since nodes can have multiple pre-/postsynapses but they show up only
    # once in distal, we have to reindex to reflect the correct
    # number of synapes
    distal_pre = distal.loc[pre_node_ids]
    distal_post = distal.loc[post_node_ids]

    # Sum up columns: now each row represents the number of pre/postsynapses
    # distal to that node
    distal_pre_sum = distal_pre.sum(axis=0)
    distal_post_sum = distal_post.sum(axis=0)

    # Now go over all branch points and check flow between branches
    # (centrifugal) vs flow from branches to root (centripetal)
    flow = {bp: 0 for bp in bp_childs}
    for bp in bp_childs:
        # We will use left/right to label the different branches here
        # (even if there is more than two)
        for left, right in itertools.permutations(bp_childs[bp], r=2):
            flow[bp] += distal_post_sum.loc[left] * distal_pre_sum.loc[right]

    # At this point there are only flows for the childs of branch points.
    # Let's complete that mapping by adding flow for the nodes
    # between branch points.
    for s in x.small_segments:
        # Segments' orientation goes from distal -> proximal
        # Drop first (distal) node if it is not a leaf
        if s[0] in flow:
            s = s[1:]

        # Update remaining nodes with the flow of the first child
        this_flow = flow.get(s[-1], 0)
        flow.update({n: this_flow for n in s})

    # Set flow centrality to None for all nodes
    x.nodes["bending_flow"] = x.nodes.node_id.map(flow)

    return x

Calculate vertex/node betweenness.

Betweenness is (roughly) defined by the number of shortest paths going through a vertex or an edge.

PARAMETER DESCRIPTION
x

TYPE: TreeNeuron | MeshNeuron | NeuronList

from_
        If provided will only consider paths from given nodes to
        root(s):
          - `leafs` will only use paths from leafs to the root
          - `branch_points` will only use paths from branch points
            to the root
          - `from_` can also be a list/array of node IDs
        Only implemented for `directed=True`!

TYPE: "leafs" | "branch_points" | iterable DEFAULT: None

directed
        Whether to use the directed or undirected graph.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
neuron

Adds "betweenness" as column in the node table (for TreeNeurons) or as .betweenness property (for MeshNeurons).

Examples:

>>> import navis
>>> n = navis.example_neurons(2, kind='skeleton')
>>> n.reroot(n.soma, inplace=True)
>>> _ = navis.betweeness_centrality(n)
>>> n[0].nodes.betweenness.max()
436866
>>> m = navis.example_neurons(1, kind='mesh')
>>> _ = navis.betweeness_centrality(m)
>>> m.betweenness.max()
59637
Source code in navis/morpho/mmetrics.py
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
@utils.map_neuronlist(desc="Calc. betweeness", allow_parallel=True)
@utils.meshneuron_skeleton(
    method="node_properties", reroot_soma=True, node_props=["betweenness"]
)
def betweeness_centrality(
    x: "core.NeuronObject",
    from_: Optional[Union[Literal["leafs"], Literal["branch_points"], Sequence]] = None,
    directed: bool = True,
) -> "core.NeuronObject":
    """Calculate vertex/node betweenness.

    Betweenness is (roughly) defined by the number of shortest paths going
    through a vertex or an edge.

    Parameters
    ----------
    x :             TreeNeuron | MeshNeuron | NeuronList
    from_ :         "leafs" | "branch_points" | iterable, optional
                    If provided will only consider paths from given nodes to
                    root(s):
                      - `leafs` will only use paths from leafs to the root
                      - `branch_points` will only use paths from branch points
                        to the root
                      - `from_` can also be a list/array of node IDs
                    Only implemented for `directed=True`!
    directed :      bool
                    Whether to use the directed or undirected graph.

    Returns
    -------
    neuron
                Adds "betweenness" as column in the node table (for
                TreeNeurons) or as `.betweenness` property
                (for MeshNeurons).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(2, kind='skeleton')
    >>> n.reroot(n.soma, inplace=True)
    >>> _ = navis.betweeness_centrality(n)
    >>> n[0].nodes.betweenness.max()
    436866
    >>> m = navis.example_neurons(1, kind='mesh')
    >>> _ = navis.betweeness_centrality(m)
    >>> m.betweenness.max()
    59637

    """
    utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,))

    if isinstance(from_, str):
        utils.eval_param(from_, name="from_", allowed_values=("leafs", "branch_points"))
    else:
        utils.eval_param(
            from_,
            name="from_",
            allowed_types=(type(None), np.ndarray, list, tuple, set),
        )

    G = x.igraph
    if isinstance(from_, type(None)):
        bc = dict(
            zip(G.vs.get_attribute_values("node_id"), G.betweenness(directed=directed))
        )
    else:
        if not directed:
            raise ValueError("`from_!=None` only implemented for `directed=True`")
        paths = []

        if from_ == "leafs":
            sources = G.vs.select(_indegree=0)
        elif from_ == "branch_points":
            sources = G.vs.select(_indegree_ge=2)
        else:
            sources = G.vs.select(node_id_in=from_)

        roots = G.vs.select(_outdegree=0)
        for r in roots:
            paths += G.get_shortest_paths(r, to=sources, mode="in")
        # Drop too short paths
        paths = [p for p in paths if len(p) > 2]
        flat_ix = [i for p in paths for i in p[:-1]]
        ix, counts = np.unique(flat_ix, return_counts=True)
        ids = [G.vs[i]["node_id"] for i in ix]
        bc = {i: 0 for i in x.nodes.node_id.values}
        bc.update(dict(zip(ids, counts)))

    x.nodes["betweenness"] = x.nodes.node_id.map(bc).astype(int)

    return x

Break neuron into its connected components.

Neurons can consists of several disconnected fragments. This function turns these fragments into separate neurons.

PARAMETER DESCRIPTION
x
        Fragmented neuron.

TYPE: TreeNeuron | MeshNeuron

labels_only
        If True, will only label each node/vertex by which
        fragment it belongs to. For TreeNeurons, this adds a
        `"fragment"` column and for MeshNeurons, it adds a
        `.fragments` property.

TYPE: bool DEFAULT: False

min_size
        Fragments smaller than this (# of nodes/vertices) will be
        dropped. Ignored if `labels_only=True`.

TYPE: int DEFAULT: None

RETURNS DESCRIPTION
NeuronList
See Also

navis.heal_skeleton Use to heal fragmentation instead of breaking it up.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> # Artifically disconnect parts of the neuron
>>> n.nodes.loc[100, 'parent_id'] = -1
>>> # Break into fragments
>>> frags = navis.break_fragments(n)
>>> len(frags)
2
Source code in navis/morpho/manipulation.py
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
def break_fragments(
    x: Union["core.TreeNeuron", "core.MeshNeuron"],
    labels_only: bool = False,
    min_size: Optional[int] = None,
) -> "core.NeuronList":
    """Break neuron into its connected components.

    Neurons can consists of several disconnected fragments. This function
    turns these fragments into separate neurons.

    Parameters
    ----------
    x :             TreeNeuron | MeshNeuron
                    Fragmented neuron.
    labels_only :   bool
                    If True, will only label each node/vertex by which
                    fragment it belongs to. For TreeNeurons, this adds a
                    `"fragment"` column and for MeshNeurons, it adds a
                    `.fragments` property.
    min_size :      int, optional
                    Fragments smaller than this (# of nodes/vertices) will be
                    dropped. Ignored if `labels_only=True`.

    Returns
    -------
    NeuronList

    See Also
    --------
    [`navis.heal_skeleton`][]
                Use to heal fragmentation instead of breaking it up.


    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> # Artifically disconnect parts of the neuron
    >>> n.nodes.loc[100, 'parent_id'] = -1
    >>> # Break into fragments
    >>> frags = navis.break_fragments(n)
    >>> len(frags)
    2

    """
    if isinstance(x, core.NeuronList) and len(x) == 1:
        x = x[0]

    if not isinstance(x, (core.TreeNeuron, core.MeshNeuron)):
        raise TypeError(f'Expected Tree- or MeshNeuron, got "{type(x)}"')

    # Get connected components
    comp = graph._connected_components(x)
    # Sort so that the first component is the largest
    comp = sorted(comp, key=len, reverse=True)

    if labels_only:
        cc_id = {n: i for i, cc in enumerate(comp) for n in cc}
        if isinstance(x, core.TreeNeuron):
            x.nodes["fragment"] = x.nodes.node_id.map(cc_id).astype(str)
        elif isinstance(x, core.MeshNeuron):
            x.fragments = np.array([cc_id[i] for i in range(x.n_vertices)]).astype(str)
        return x

    if min_size:
        comp = [cc for cc in comp if len(cc) >= min_size]

    return core.NeuronList(
        [
            subset.subset_neuron(x, list(ss), inplace=False)
            for ss in config.tqdm(
                comp, desc="Breaking", disable=config.pbar_hide, leave=config.pbar_leave
            )
        ]
    )

Calculate the amount of cable of neuron A within distance of neuron B.

PARAMETER DESCRIPTION
a
    Neuron(s) for which to compute cable within distance. It is
    highly recommended to resample neurons to guarantee an even
    sampling rate.

TYPE: NeuronObject

dist
    Maximum distance. If the neurons have their `.units` set, you
    can also provides this as a string such as "2 microns".

TYPE: int | float DEFAULT: 2

method
    Method by which to calculate the overlapping cable between
    two cables:

      Assuming that neurons A and B have 300 and 150 um of cable
      within given distances, respectively:

        1. 'min' returns 150
        2. 'max' returns 300
        3. 'mean' returns 225
        4. 'forward' returns 300 (i.e. A->B)
        5. 'reverse' returns 150 (i.e. B->A)

TYPE: 'min' | 'max' | 'mean' | 'forward' | 'reverse' DEFAULT: 'min'

RETURNS DESCRIPTION
pandas.DataFrame

Matrix in which neurons A are rows, neurons B are columns. Cable within distance is given in the neuron's native units:

            neuronD  neuronE   neuronF  ...
neuronA         5        1         0
neuronB        10       20         5
neuronC         4        3        15
...

See Also

navis.resample_skeleton Use to resample neurons before calculating overlap.

Examples:

>>> import navis
>>> nl = navis.example_neurons(4)
>>> # Cable overlap is given in the neurons' units
>>> # Converting the example neurons from 8x8x8 voxel space into microns
>>> # make the results easier to interpret
>>> nl = nl.convert_units('um')
>>> # Resample to half a micron
>>> nl_res = nl.resample('.5 micron', inplace=False)
>>> # Get overlapping cable within 2 microns
>>> ol = navis.cable_overlap(nl_res[:2], nl_res[2:], dist='2 microns')
Source code in navis/connectivity/predict.py
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
def cable_overlap(a: NeuronObject,
                  b: NeuronObject,
                  dist: Union[float, str] = 2,
                  method: Union[Literal['min'], Literal['max'], Literal['mean'],
                                Literal['foward'], Literal['reverse']] = 'min'
                  ) -> pd.DataFrame:
    """Calculate the amount of cable of neuron A within distance of neuron B.

    Parameters
    ----------
    a,b :       TreeNeuron | NeuronList
                Neuron(s) for which to compute cable within distance. It is
                highly recommended to resample neurons to guarantee an even
                sampling rate.
    dist :      int | float, optional
                Maximum distance. If the neurons have their `.units` set, you
                can also provides this as a string such as "2 microns".
    method :    'min' | 'max' | 'mean' | 'forward' | 'reverse'
                Method by which to calculate the overlapping cable between
                two cables:

                  Assuming that neurons A and B have 300 and 150 um of cable
                  within given distances, respectively:

                    1. 'min' returns 150
                    2. 'max' returns 300
                    3. 'mean' returns 225
                    4. 'forward' returns 300 (i.e. A->B)
                    5. 'reverse' returns 150 (i.e. B->A)

    Returns
    -------
    pandas.DataFrame
            Matrix in which neurons A are rows, neurons B are columns. Cable
            within distance is given in the neuron's native units:
            ```
                        neuronD  neuronE   neuronF  ...
            neuronA         5        1         0
            neuronB        10       20         5
            neuronC         4        3        15
            ...
            ```

    See Also
    --------
    [`navis.resample_skeleton`][]
                Use to resample neurons before calculating overlap.

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(4)
    >>> # Cable overlap is given in the neurons' units
    >>> # Converting the example neurons from 8x8x8 voxel space into microns
    >>> # make the results easier to interpret
    >>> nl = nl.convert_units('um')
    >>> # Resample to half a micron
    >>> nl_res = nl.resample('.5 micron', inplace=False)
    >>> # Get overlapping cable within 2 microns
    >>> ol = navis.cable_overlap(nl_res[:2], nl_res[2:], dist='2 microns')

    """
    if not isinstance(a, (TreeNeuron, NeuronList)) \
       or not isinstance(b, (TreeNeuron, NeuronList)):
        raise TypeError(f'Expected `TreeNeurons`, got "{type(a)}" and "{type(b)}"')

    if not isinstance(a, NeuronList):
        a = NeuronList(a)

    if not isinstance(b, NeuronList):
        b = NeuronList(b)

    # Make sure neurons have the same units
    # Do not use np.unique here because unit_str can be `None`
    units = set(np.append(a._unit_str, b._unit_str))
    units = np.array(list(units)).astype(str)
    if len(units) > 1:
        logger.warning('Neurons appear to have different units: '
                       f'{", ".join(units)}. If that is the case, cable '
                       'matrix overlap results will be garbage.')

    allowed_methods = ['min', 'max', 'mean', 'forward', 'reverse']
    if method not in allowed_methods:
        raise ValueError(f'Unknown method "{method}". Allowed methods: '
                         f'"{", ".join(allowed_methods)}"')

    dist = a[0].map_units(dist, on_error='raise')

    matrix = pd.DataFrame(np.zeros((a.shape[0], b.shape[0])),
                          index=a.id, columns=b.id)

    # Compute required props
    treesA = []
    lengthsA = []
    for nA in a:
        points, vect, length = graph.neuron2tangents(nA)
        treesA.append(scipy.spatial.cKDTree(points))
        lengthsA.append(length)

    treesB = []
    lengthsB = []
    for nB in b:
        points, vect, length = graph.neuron2tangents(nB)
        treesB.append(scipy.spatial.cKDTree(points))
        lengthsB.append(length)

    with config.tqdm(total=len(a), desc='Calc. overlap',
                     disable=config.pbar_hide,
                     leave=config.pbar_leave) as pbar:
        for i, nA in enumerate(a):
            # Get cKDTree for nA
            tA = treesA[i]

            for k, nB in enumerate(b):
                # Get cKDTree for nB
                tB = treesB[k]

                # Query nB -> nA
                distA, ixA = tA.query(tB.data,
                                      k=1,
                                      distance_upper_bound=dist,
                                      workers=-1
                                      )
                # Query nA -> nB
                distB, ixB = tB.query(tA.data,
                                      k=1,
                                      distance_upper_bound=dist,
                                      workers=-1
                                      )

                nA_lengths = lengthsA[i][ixA[distA != float('inf')]]
                nB_lengths = lengthsB[k][ixB[distB != float('inf')]]

                if method == 'mean':
                    overlap = (nA_lengths.sum() + nB_lengths.sum()) / 2
                elif method == 'max':
                    overlap = max(nA_lengths.sum(), nB_lengths.sum())
                elif method == 'min':
                    overlap = min(nA_lengths.sum(), nB_lengths.sum())
                elif method == 'forward':
                    overlap = nA_lengths.sum()
                elif method == 'reverse':
                    overlap = nB_lengths.sum()

                matrix.iloc[i, k] = overlap

            pbar.update(1)

    return matrix

Prune neuron to its cell body fiber.

Here, "cell body fiber" (CBF) refers to the tract connecting the soma to the backbone in unipolar neuron (common in e.g. insects). This function works best for typical neurons with clean skeletons.

PARAMETER DESCRIPTION
x

TYPE: TreeNeuron | MeshNeuron | NeuronList

method
        The method to use:
          - "longest_neurite" assumes that the main branch point
            is where the two largest branches converge
          - "betweenness" uses centrality to determine the point
            which most shortest paths traverse

TYPE: "longest_neurite" | "betweenness" DEFAULT: 'betweenness'

reroot_soma
        If True (recommended) and neuron has a soma, it will be
        rerooted to its soma.

TYPE: bool DEFAULT: True

heal
        If True (recommended), will heal fragmented neurons.
        Fragmented neurons are not guaranteed to have correct CBFs.

TYPE: bool DEFAULT: True

threshold
        For method "betweenness" only: threshold at which to cut the
        cell body fiber. Lower thresholds produce longer CBFs.

TYPE: float [0-1] DEFAULT: 0.95

inverse
        If True, will instead *remove* the cell body fiber.

TYPE: bool DEFAULT: False

inplace
        If False, pruning is performed on copy of original neuron
        which is then returned.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

Pruned neuron(s). Neurons without branches (i.e. w/ a single long segment) will be returned unaltered.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> cbf = navis.cell_body_fiber(n, inplace=False)
>>> # Neuron now has only a single segment from the soma to the main fork
>>> len(cbf.segments)
1
See Also

navis.find_main_branchpoint Find the main branch point.

navis.betweeness_centrality Calculate the per-node betweeness centrality. This is used under the hood for method='betweeness'.

Source code in navis/morpho/manipulation.py
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
@utils.map_neuronlist(desc="Pruning", allow_parallel=True)
@utils.meshneuron_skeleton(method="subset")
def cell_body_fiber(
    x: NeuronObject,
    method: Union[Literal["longest_neurite"], Literal["betweenness"]] = "betweenness",
    reroot_soma: bool = True,
    heal: bool = True,
    threshold: float = 0.95,
    inverse: bool = False,
    inplace: bool = False,
):
    """Prune neuron to its cell body fiber.

    Here, "cell body fiber" (CBF) refers to the tract connecting the soma to the
    backbone in unipolar neuron (common in e.g. insects). This function works
    best for typical neurons with clean skeletons.

    Parameters
    ----------
    x :             TreeNeuron | MeshNeuron | NeuronList
    method :        "longest_neurite" | "betweenness"
                    The method to use:
                      - "longest_neurite" assumes that the main branch point
                        is where the two largest branches converge
                      - "betweenness" uses centrality to determine the point
                        which most shortest paths traverse
    reroot_soma :   bool
                    If True (recommended) and neuron has a soma, it will be
                    rerooted to its soma.
    heal :          bool
                    If True (recommended), will heal fragmented neurons.
                    Fragmented neurons are not guaranteed to have correct CBFs.
    threshold :     float [0-1]
                    For method "betweenness" only: threshold at which to cut the
                    cell body fiber. Lower thresholds produce longer CBFs.
    inverse :       bool
                    If True, will instead *remove* the cell body fiber.
    inplace :       bool, optional
                    If False, pruning is performed on copy of original neuron
                    which is then returned.

    Returns
    -------
    TreeNeuron/List
                    Pruned neuron(s). Neurons without branches (i.e. w/ a single
                    long segment) will be returned unaltered.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> cbf = navis.cell_body_fiber(n, inplace=False)
    >>> # Neuron now has only a single segment from the soma to the main fork
    >>> len(cbf.segments)
    1

    See Also
    --------
    [`navis.find_main_branchpoint`][]
                    Find the main branch point.

    [`navis.betweeness_centrality`][]
                    Calculate the per-node betweeness centrality. This is used
                    under the hood for `method='betweeness'`.

    """
    utils.eval_param(
        method, "method", allowed_values=("longest_neurite", "betweenness")
    )

    # The decorator makes sure that at this point we have single neurons
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Expected TreeNeuron(s), got {type(x)}")

    if not inplace:
        x = x.copy()

    if x.n_trees > 1 and heal:
        _ = heal_skeleton(x, method="LEAFS", inplace=True)

    # If no branches, just return the neuron
    if "branch" not in x.nodes.type.values:
        return x

    if reroot_soma and not isinstance(x.soma, type(None)):
        x.reroot(x.soma, inplace=True)

    # Find main branch point
    cut = graph.find_main_branchpoint(
        x, method=method, threshold=threshold, reroot_soma=False
    )

    # Find the path to root (and account for multiple roots)
    for r in x.root:
        try:
            path = nx.shortest_path(x.graph, target=r, source=cut)
            break
        except nx.NetworkXNoPath:
            continue
        except BaseException:
            raise

    if not inverse:
        keep = path
    else:
        keep = x.nodes.node_id.values[~np.isin(x.nodes.node_id, path)]

    _ = subset.subset_neuron(x, keep, inplace=True)

    return x

Clear viewer 3D canvas.

Source code in navis/plotting/vispy/vputils.py
45
46
47
48
49
50
def clear3d():
    """Clear viewer 3D canvas."""
    viewer = get_viewer()

    if viewer:
        viewer.clear()

Close existing 3D viewer (wipes memory).

Source code in navis/plotting/vispy/vputils.py
53
54
55
56
57
58
59
60
61
def close3d():
    """Close existing 3D viewer (wipes memory)."""
    try:
        viewer = get_viewer()
        viewer.close()
        globals().pop('viewer')
        del viewer
    except BaseException:
        pass

Combine multiple neurons into one.

PARAMETER DESCRIPTION
x
            Neurons to combine. Must all be of the same type. Does
            not yet work with VoxelNeurons. The combined neuron will
            inherit its name, id, units, etc. from the first neuron
            in `x`.

TYPE: NeuronList | Neuron/List DEFAULT: ()

RETURNS DESCRIPTION
Neuron

Combined neuron.

See Also

navis.stitch_skeletons Stitches multiple skeletons together to create one continuous neuron.

Examples:

Combine skeletons:

>>> import navis
>>> nl = navis.example_neurons(3)
>>> comb = navis.combine_neurons(nl)

Combine meshes:

>>> import navis
>>> nl = navis.example_neurons(3, kind='mesh')
>>> comb = navis.combine_neurons(nl)

Combine dotprops:

>>> import navis
>>> nl = navis.example_neurons(3)
>>> dp = navis.make_dotprops(nl)
>>> comb = navis.combine_neurons(dp)
Source code in navis/morpho/manipulation.py
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
def combine_neurons(
    *x: Union[Sequence[NeuronObject], "core.NeuronList"],
) -> "core.NeuronObject":
    """Combine multiple neurons into one.

    Parameters
    ----------
    x :                 NeuronList | Neuron/List
                        Neurons to combine. Must all be of the same type. Does
                        not yet work with VoxelNeurons. The combined neuron will
                        inherit its name, id, units, etc. from the first neuron
                        in `x`.

    Returns
    -------
    Neuron
                        Combined neuron.

    See Also
    --------
    [`navis.stitch_skeletons`][]
                        Stitches multiple skeletons together to create one
                        continuous neuron.

    Examples
    --------
    Combine skeletons:

    >>> import navis
    >>> nl = navis.example_neurons(3)
    >>> comb = navis.combine_neurons(nl)

    Combine meshes:

    >>> import navis
    >>> nl = navis.example_neurons(3, kind='mesh')
    >>> comb = navis.combine_neurons(nl)

    Combine dotprops:

    >>> import navis
    >>> nl = navis.example_neurons(3)
    >>> dp = navis.make_dotprops(nl)
    >>> comb = navis.combine_neurons(dp)

    """
    # Compile list of individual neurons
    nl = utils.unpack_neurons(x)
    nl = core.NeuronList(nl)

    # Check that neurons are all of the same type
    if len(nl.types) > 1:
        raise TypeError("Unable to combine neurons of different types")

    if isinstance(nl[0], core.TreeNeuron):
        x = stitch_skeletons(*nl, method="NONE", master="FIRST")
    elif isinstance(nl[0], core.MeshNeuron):
        x = nl[0].copy()
        comb = tm.util.concatenate([n.trimesh for n in nl])
        x._vertices = comb.vertices
        x._faces = comb.faces

        if any(nl.has_connectors):
            x._connectors = pd.concat(
                [n.connectors for n in nl],  # type: ignore  # no stubs for concat
                ignore_index=True,
            )
    elif isinstance(nl[0], core.Dotprops):
        x = nl[0].copy()
        x._points = np.vstack(nl._points)

        x._vect = np.vstack(nl.vect)

        if not any([isinstance(n._alpha, type(None)) for n in nl]):
            x._alpha = np.hstack(nl.alpha)

        if any(nl.has_connectors):
            x._connectors = pd.concat(
                [n.connectors for n in nl],  # type: ignore  # no stubs for concat
                ignore_index=True,
            )
    elif isinstance(nl[0], core.VoxelNeuron):
        raise TypeError("Combining VoxelNeuron not (yet) supported")
    else:
        raise TypeError(f"Unable to combine {type(nl[0])}")

    return x

Calculate connectivity similarity.

This functions offers a selection of metrics to compare connectivity:

  • cosine: Cosine similarity (see here)
  • rank_index: Normalized difference in rank of synaptic partners.
  • matching_index: Number of shared partners divided by total number of partners.
  • matching_index_synapses: Number of shared synapses (i.e. number of connections from/onto the same partners) divided by total number of synapses.

    Attention

    This metric is tricky when there is a disparity of total number of connections between neuron A and B. For example, consider 100/200 and 1/50 shared/total synapse: 101/250 results in a fairly high matching index of 0.404.

  • matching_index_weighted_synapses: Similar to matching_index_synapses but slightly less prone to above mentioned error as it uses the percentage of shared synapses:

    \[ S = \frac{\mathrm{NeuronA}_{\mathrm{sharedSynapses}}}{\mathrm{NeuronA}_{\mathrm{totalSynapses}}} \times \frac{\mathrm{NeuronB}_{\mathrm{sharedSynapses}}}{\mathrm{NeuronB}_{\mathrm{totalSynapses}}} \]
  • vertex: Matching index that rewards shared and punishes non-shared partners. Based on Jarrell et al., 2012: $$ f(x,y) = min(x,y) - C1 \times max(x,y) \times \exp(-C2 * min(x,y)) $$ The final score is the sum of \(f(x,y)\) over all edges x, y between neurons A+B and their partners. C1 determines how negatively a case where one edge is much stronger than another is punished. C2 determines the point where the similarity switches from negative to positive. C1 and C2 default to 0.5 and 1, respectively, but can be changed by passing them in a dictionary as **kwargs.

  • vertex_normalized: This is vertex similarity normalized by the lowest (hypothetical total dissimilarity) and highest (all edge weights the same) achievable score.
PARAMETER DESCRIPTION
adjacency
            (N, M) observation vector with M observations for N
            neurons - e.g. an adjacency matrix. Will calculate
            similarity for all rows using the columns as observations.

TYPE: pandas DataFrame | numpy array

metric
            Metric used to compare connectivity. See notes for
            detailed explanation.

TYPE: 'cosine' | 'rank_index'| 'matching_index' | 'matching_index_synapses' | 'matching_index_weighted_synapses' | 'vertex' | 'vertex_normalized' DEFAULT: 'vertex_normalized'

threshold
            Connections weaker than this will be set to zero.

TYPE: int DEFAULT: None

n_cores
            Number of parallel processes to use. Defaults to half
            the available cores.

TYPE: int DEFAULT: max(1, os.cpu_count() // 2)

**kwargs
            Additional keyword arguments to pass to the metric function.
            See notes above for details.

DEFAULT: {}

RETURNS DESCRIPTION
DataFrame

Pandas DataFrame with similarity scores. Neurons without any connectivity will show up with np.nan for scores.

Source code in navis/connectivity/similarity.py
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
def connectivity_similarity(adjacency: Union[pd.DataFrame, np.ndarray],
                            metric: Union[Literal['matching_index'],
                                          Literal['matching_index_synapses'],
                                          Literal['matching_index_weighted_synapses'],
                                          Literal['vertex'],
                                          Literal['vertex_normalized'],
                                          Literal['cosine'],
                                          ] = 'vertex_normalized',
                            threshold: Optional[int] = None,
                            n_cores: int = max(1, os.cpu_count() // 2),
                            **kwargs) -> pd.DataFrame:
    r"""Calculate connectivity similarity.

    This functions offers a selection of metrics to compare connectivity:

    - **cosine**: Cosine similarity (see [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cosine.html))
    - **rank_index**: Normalized difference in rank of synaptic partners.
    - **matching_index**: Number of shared partners divided by total number of partners.
    - **matching_index_synapses**: Number of shared synapses (i.e. number of connections from/onto the same partners)
      divided by total number of synapses.

        !!! info "Attention"
            This metric is tricky when there is a disparity of total number of connections between neuron A and B.
            For example, consider 100/200 and 1/50 shared/total synapse: 101/250 results in a fairly high matching
            index of 0.404.

    - **matching_index_weighted_synapses**: Similar to *matching_index_synapses* but slightly less prone to above
      mentioned error as it uses the percentage of shared synapses:

        $$
        S = \frac{\mathrm{NeuronA}_{\mathrm{sharedSynapses}}}{\mathrm{NeuronA}_{\mathrm{totalSynapses}}} \times \frac{\mathrm{NeuronB}_{\mathrm{sharedSynapses}}}{\mathrm{NeuronB}_{\mathrm{totalSynapses}}}
        $$

    - **vertex**: Matching index that rewards shared and punishes non-shared partners. Based on
      [Jarrell et al., 2012](http://science.sciencemag.org/content/337/6093/437):
       $$
       f(x,y) = min(x,y) - C1 \times max(x,y) \times \exp(-C2 * min(x,y))
       $$
       The final score is the sum of $f(x,y)$ over all edges x, y between neurons A+B and their partners. C1 determines
       how negatively a case where one edge is much stronger than another is punished. C2 determines the point where the
       similarity switches from negative to positive. C1 and C2 default to 0.5 and 1, respectively, but can be changed
       by passing them in a dictionary as `**kwargs`.
    - **vertex_normalized**: This is *vertex* similarity normalized by the lowest (hypothetical total dissimilarity)
      and highest (all edge weights the same) achievable score.

    Parameters
    ----------
    adjacency :         pandas DataFrame | numpy array
                        (N, M) observation vector with M observations for N
                        neurons - e.g. an adjacency matrix. Will calculate
                        similarity for all rows using the columns as observations.
    metric :            'cosine' | 'rank_index'| 'matching_index' | 'matching_index_synapses' | 'matching_index_weighted_synapses' | 'vertex' | 'vertex_normalized'
                        Metric used to compare connectivity. See notes for
                        detailed explanation.
    threshold :         int, optional
                        Connections weaker than this will be set to zero.
    n_cores :           int
                        Number of parallel processes to use. Defaults to half
                        the available cores.
    **kwargs
                        Additional keyword arguments to pass to the metric function.
                        See notes above for details.

    Returns
    -------
    DataFrame
                        Pandas DataFrame with similarity scores. Neurons without
                        any connectivity will show up with `np.nan` for scores.

    """
    FUNC_MAP = {'rank_index': _calc_rank_index,
                'matching_index': _calc_matching_index,
                'matching_index_synapses': _calc_matching_index_synapses,
                'matching_index_weighted_synapses': partial(_calc_matching_index_synapses, weighted=True),
                'vertex': _calc_vertex_similarity,
                'vertex_normalized': partial(_calc_vertex_similarity, normalize=True),
                'cosine': _calc_cosine_similarity
                }

    if not isinstance(metric, str) or metric.lower() not in FUNC_MAP:
        raise ValueError(f'"metric" must be either: {", ".join(FUNC_MAP.keys())}')

    score_func = FUNC_MAP[metric.lower()]

    if isinstance(adjacency, np.ndarray):
        adjacency = pd.DataFrame(adjacency)
    elif not isinstance(adjacency, pd.DataFrame):
        raise TypeError(f'Expected DataFrame, got "{type(adjacency)}"')

    if threshold:
        # Do not manipulate original
        adjacency = adjacency.copy()
        adjacency[adjacency < threshold] = 0

    # Skip expensive checks if no empty vectors
    if (adjacency.max(axis=1) == 0).any():
        kwargs['validate'] = True
    else:
        kwargs['validate'] = False

    # Prepare combinations matching scores
    comb = combinations_generator(score_func, adjacency, **kwargs)

    # Note that while we are mapping from a generator (`comb`), the pool will
    # unfortunately not evaluate this lazily. This is a "bug" in the standard
    # library that might get fixed at some point.
    if n_cores > 1:
        with ProcessPoolExecutor(max_workers=n_cores) as e:
            futures = e.map(_distributor, comb, chunksize=50000)

            matching_indices = [n for n in config.tqdm(futures,
                                                       total=adjacency.shape[0]**2,
                                                       desc='Calc. similarity',
                                                       disable=config.pbar_hide,
                                                       leave=config.pbar_leave)]
    else:
        matching_indices = []
        for c in config.tqdm(comb,
                             total=adjacency.shape[0]**2,
                             desc='Calc. similarity',
                             disable=config.pbar_hide,
                             leave=config.pbar_leave):
            matching_indices.append(_distributor(c))

    # Create empty scores matrix
    neurons = adjacency.index.values
    matching_scores = pd.DataFrame(np.zeros((len(neurons), len(neurons))),
                                   index=neurons, columns=neurons)
    # Populate scores matrix
    comb_id = product(neurons, neurons)
    for i, v in enumerate(comb_id):
        matching_scores.at[v[0], v[1]] = matching_indices[i]

    return matching_scores

Calculate sparseness.

Sparseness comes in three flavors:

Lifetime kurtosis (LTK) quantifies the widths of tuning curves (according to Muench & Galizia, 2016):

\[ S = \Bigg\{ \frac{1}{N} \sum^N_{i=1} \Big[ \frac{r_i - \overline{r}}{\sigma_r} \Big] ^4 \Bigg\} - 3 \]

where \(N\) is the number of observations, \(r_i\) the value of observation \(i\), and \(\overline{r}\) and \(\sigma_r\) the mean and the standard deviation of the observations' values, respectively. LTK is assuming a normal, or at least symmetric distribution.

Lifetime sparseness (LTS) quantifies selectivity (Bhandawat et al., 2007):

\[ S = \frac{1}{1-1/N} \Bigg[1- \frac{\big(\sum^N_{j=1} r_j / N\big)^2}{\sum^N_{j=1} r_j^2 / N} \Bigg] \]

where \(N\) is the number of observations, and \(r_j\) is the value of an observation.

Activity ratio describes distributions with heavy tails (Rolls and Tovee, 1995).

Notes

NaN values will be ignored. You can use that to e.g. ignore zero values in a large connectivity matrix by changing these values to NaN before passing it to navis.sparseness.

PARAMETER DESCRIPTION
x
    (N, M) dataset with N (rows) observations for M (columns)
    neurons. One-dimensional data will be converted to two
    dimensions (N rows, 1 column).

TYPE: DataFrame | array-like

which
    Determines whether lifetime sparseness (LTS) or lifetime
    kurtosis (LTK) is returned.

TYPE: "LTS" | "LTK" | "activity_ratio" DEFAULT: 'LTS'

RETURNS DESCRIPTION
sparseness

pandas.Series if input was pandas DataFrame, else numpy.array.

Examples:

Calculate sparseness of olfactory inputs to group of neurons:

>>> import navis
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> # Get ORN response matrix from DoOR database
>>> url = 'https://raw.githubusercontent.com/ropensci/DoOR.data/master/data/door_response_matrix.csv'
>>> adj = pd.read_csv(url, delimiter=';')
>>> # Calculate lifetime sparseness
>>> S = navis.connectivity_sparseness(adj, which='LTS')
>>> # Plot distribution
>>> ax = S.plot.hist(bins=np.arange(0, 1, .1))
>>> _ = ax.set_xlabel('LTS')
>>> plt.show()
Source code in navis/connectivity/cnmetrics.py
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
def connectivity_sparseness(x: Union[pd.DataFrame, np.ndarray],
                            which: Union[Literal['LTS'],
                                         Literal['LTK'],
                                         Literal['activity_ratio']] = 'LTS') -> Union[pd.Series, np.ndarray]:
    r"""Calculate sparseness.

    Sparseness comes in three flavors:

    **Lifetime kurtosis (LTK)** quantifies the widths of tuning curves
    (according to Muench & Galizia, 2016):

    $$
    S = \Bigg\{ \frac{1}{N} \sum^N_{i=1} \Big[ \frac{r_i - \overline{r}}{\sigma_r} \Big] ^4  \Bigg\} - 3
    $$

    where $N$ is the number of observations, $r_i$ the value of
    observation $i$, and $\overline{r}$ and $\sigma_r$ the mean and
    the standard deviation of the observations' values, respectively.
    LTK is assuming a normal, or at least symmetric distribution.

    **Lifetime sparseness (LTS)** quantifies selectivity
    (Bhandawat et al., 2007):

    $$
    S = \frac{1}{1-1/N} \Bigg[1- \frac{\big(\sum^N_{j=1} r_j / N\big)^2}{\sum^N_{j=1} r_j^2 / N} \Bigg]
    $$

    where $N$ is the number of observations, and $r_j$ is the
    value of an observation.

    **Activity ratio** describes distributions with heavy tails (Rolls and
    Tovee, 1995).


    Notes
    -----
    `NaN` values will be ignored. You can use that to e.g. ignore zero
    values in a large connectivity matrix by changing these values to `NaN`
    before passing it to `navis.sparseness`.


    Parameters
    ----------
    x :         DataFrame | array-like
                (N, M) dataset with N (rows) observations for M (columns)
                neurons. One-dimensional data will be converted to two
                dimensions (N rows, 1 column).
    which :     "LTS" | "LTK" | "activity_ratio"
                Determines whether lifetime sparseness (LTS) or lifetime
                kurtosis (LTK) is returned.

    Returns
    -------
    sparseness
                `pandas.Series` if input was pandas DataFrame, else
                `numpy.array`.

    Examples
    --------
    Calculate sparseness of olfactory inputs to group of neurons:

    >>> import navis
    >>> import numpy as np
    >>> import matplotlib.pyplot as plt
    >>> # Get ORN response matrix from DoOR database
    >>> url = 'https://raw.githubusercontent.com/ropensci/DoOR.data/master/data/door_response_matrix.csv'
    >>> adj = pd.read_csv(url, delimiter=';')
    >>> # Calculate lifetime sparseness
    >>> S = navis.connectivity_sparseness(adj, which='LTS')
    >>> # Plot distribution
    >>> ax = S.plot.hist(bins=np.arange(0, 1, .1))
    >>> _ = ax.set_xlabel('LTS')
    >>> plt.show()                                              # doctest: +SKIP

    """
    if not isinstance(x, (pd.DataFrame, np.ndarray)):
        x = np.array(x)

    # Make sure we are working with 2 dimensional data
    if isinstance(x, np.ndarray) and x.ndim == 1:
        x = x.reshape(x.shape[0], 1)

    N = np.sum(~np.isnan(x), axis=0)

    if which == 'LTK':
        return np.nansum(((x - np.nanmean(x, axis=0)) / np.nanstd(x, axis=0)) ** 4, axis=0) / N - 3
    elif which == 'LTS':
        return 1 / (1 - (1 / N)) * (1 - np.nansum(x / N, axis=0) ** 2 / np.nansum(x**2 / N, axis=0))
    elif which == 'activity_ratio':
        a = (np.nansum(x, axis=0) / N) ** 2 / (np.nansum(x, axis=0) ** 2 / N)
        return 1 - a
    else:
        raise ValueError('Parameter "which" must be either "LTS", "LTK" or '
                         '"activity_ratio"')

Split skeleton at given point and returns two new neurons.

Split is performed between cut node and its parent node. The cut node itself will still be present in both resulting neurons.

PARAMETER DESCRIPTION
x
   Must be a single skeleton.

TYPE: TreeNeuron | NeuronList

where
   Node ID(s) or tag(s) of the node(s) to cut. The edge that is
   cut is the one between this node and its parent. So cut node
   must not be a root node! Multiple cuts are performed in the
   order of `cut_node`. Fragments are ordered distal -> proximal.

TYPE: int | str | list

ret
   Define which parts of the neuron to return. Use this to speed
   up processing when you need only parts of the neuron.

TYPE: 'proximal' | 'distal' | 'both' DEFAULT: 'both'

RETURNS DESCRIPTION
split

Fragments of the input neuron after cutting sorted such that distal parts come before proximal parts. For example, with a single cut you can expect to return a NeuronList containing two neurons: the first contains the part distal and the second the part proximal to the cut node.

The distal->proximal order of fragments is tried to be maintained for multiple cuts but this is not guaranteed.

TYPE: NeuronList

Examples:

Cut skeleton at a (somewhat random) branch point

>>> import navis
>>> n = navis.example_neurons(1)
>>> bp = n.nodes[n.nodes.type=='branch'].node_id.values
>>> dist, prox = navis.cut_skeleton(n, bp[0])

Make cuts at multiple branch points

>>> import navis
>>> n = navis.example_neurons(1)
>>> bp = n.nodes[n.nodes.type=='branch'].node_id.values
>>> splits = navis.cut_skeleton(n, bp[:10])
See Also

navis.TreeNeuron.prune_distal_to navis.TreeNeuron.prune_proximal_to TreeNeuron/List shorthands to this function. navis.subset_neuron Returns a neuron consisting of a subset of its nodes.

Source code in navis/graph/graph_utils.py
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
def cut_skeleton(
    x: "core.NeuronObject",
    where: Union[int, str, List[Union[int, str]]],
    ret: Union[Literal["both"], Literal["proximal"], Literal["distal"]] = "both",
) -> "core.NeuronList":
    """Split skeleton at given point and returns two new neurons.

    Split is performed between cut node and its parent node. The cut node itself
    will still be present in both resulting neurons.

    Parameters
    ----------
    x :        TreeNeuron | NeuronList
               Must be a single skeleton.
    where :    int | str | list
               Node ID(s) or tag(s) of the node(s) to cut. The edge that is
               cut is the one between this node and its parent. So cut node
               must not be a root node! Multiple cuts are performed in the
               order of `cut_node`. Fragments are ordered distal -> proximal.
    ret :      'proximal' | 'distal' | 'both', optional
               Define which parts of the neuron to return. Use this to speed
               up processing when you need only parts of the neuron.

    Returns
    -------
    split :    NeuronList
               Fragments of the input neuron after cutting sorted such that
               distal parts come before proximal parts. For example, with a
               single cut you can expect to return a NeuronList containing two
               neurons: the first contains the part distal and the second the
               part proximal to the cut node.

               The distal->proximal order of fragments is tried to be maintained
               for multiple cuts but this is not guaranteed.

    Examples
    --------
    Cut skeleton at a (somewhat random) branch point

    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> bp = n.nodes[n.nodes.type=='branch'].node_id.values
    >>> dist, prox = navis.cut_skeleton(n, bp[0])

    Make cuts at multiple branch points

    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> bp = n.nodes[n.nodes.type=='branch'].node_id.values
    >>> splits = navis.cut_skeleton(n, bp[:10])

    See Also
    --------
    [`navis.TreeNeuron.prune_distal_to`][]
    [`navis.TreeNeuron.prune_proximal_to`][]
            `TreeNeuron/List` shorthands to this function.
    [`navis.subset_neuron`][]
            Returns a neuron consisting of a subset of its nodes.

    """
    utils.eval_param(ret, name="ret", allowed_values=("proximal", "distal", "both"))

    if isinstance(x, core.NeuronList):
        if len(x) == 1:
            x = x[0]
        else:
            raise Exception(f"Expected a single TreeNeuron, got {len(x)}")

    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Expected a single TreeNeuron, got "{type(x)}"')

    if x.n_trees != 1:
        raise ValueError(
            f"Unable to cut: neuron {x.id} consists of multiple "
            "disconnected trees. Use navis.heal_skeleton()"
            " to fix."
        )

    # At this point x is TreeNeuron
    x: core.TreeNeuron

    # Turn cut node into iterable
    if not utils.is_iterable(where):
        where = [where]

    # Process cut nodes (i.e. if tag)
    cn_ids: List[int] = []
    for cn in where:
        # If cut_node is a tag (rather than an ID), try finding that node
        if isinstance(cn, str):
            if x.tags is None:
                raise ValueError(f"Neuron {x.id} has no tags")
            if cn not in x.tags:
                raise ValueError(
                    f"#{x.id}: Found no node with tag {cn}" " - please double check!"
                )
            cn_ids += x.tags[cn]
        elif cn not in x.nodes.node_id.values:
            raise ValueError(f'No node with ID "{cn}" found.')
        elif cn in x.root:
            raise ValueError(f'Unable to cut at node "{cn}" - node is root')
        else:
            cn_ids.append(cn)

    # Remove duplicates while retaining order - set() would mess that up
    seen: Set[int] = set()
    cn_ids = [cn for cn in cn_ids if not (cn in seen or seen.add(cn))]

    # Warn if not all returned
    if len(cn_ids) > 1 and ret != "both":
        logger.warning('Multiple cuts should use `ret = "both"`.')

    # Go over all cut_nodes -> order matters!
    res = [x]
    for cn in cn_ids:
        # First, find out in which neuron the cut node is
        to_cut = [n for n in res if cn in n.nodes.node_id.values][0]
        to_cut_ix = res.index(to_cut)

        # Remove this neuron from results (will be cut into two)
        res.remove(to_cut)

        # Cut neuron
        if x.igraph and config.use_igraph:
            cut = _cut_igraph(to_cut, cn, ret)
        else:
            cut = _cut_networkx(to_cut, cn, ret)

        # If ret != 'both', we will get only a single neuron - therefore
        # make sure cut is iterable
        cut = utils.make_iterable(cut)

        # Add results back to results at same index, proximal first
        for c in cut[::-1]:
            res.insert(to_cut_ix, c)

    return core.NeuronList(res)

Remove spikes in skeleton (e.g. from jumps in image data).

For each node A, the Euclidean distance to its next successor (parent) B and that node's successor C (i.e A->B->C) is computed. If \(\frac{dist(A,B)}{dist(A,C)}>sigma\), node B is considered a spike and realigned between A and C.

PARAMETER DESCRIPTION
x
            Neuron(s) to be processed.

TYPE: TreeNeuron | NeuronList

sigma
            Threshold for spike detection. Smaller sigma = more
            aggressive spike detection.

TYPE: float | int DEFAULT: 5

max_spike_length
            Determines how long (# of nodes) a spike can be.

TYPE: int DEFAULT: 1

inplace
            If False, a copy of the neuron is returned.

TYPE: bool DEFAULT: False

reverse
            If True, will **also** walk the segments from proximal
            to distal. Use this to catch spikes on e.g. terminal
            nodes.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

Despiked neuron(s). Only if inplace=False.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> despiked = navis.despike_skeleton(n)
Source code in navis/morpho/manipulation.py
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
@utils.map_neuronlist(desc="Despiking", allow_parallel=True)
def despike_skeleton(
    x: NeuronObject,
    sigma: int = 5,
    max_spike_length: int = 1,
    inplace: bool = False,
    reverse: bool = False,
) -> Optional[NeuronObject]:
    r"""Remove spikes in skeleton (e.g. from jumps in image data).

    For each node A, the Euclidean distance to its next successor (parent)
    B and that node's successor C (i.e A->B->C) is computed. If
    $\frac{dist(A,B)}{dist(A,C)}>sigma$, node B is considered a spike
    and realigned between A and C.

    Parameters
    ----------
    x :                 TreeNeuron | NeuronList
                        Neuron(s) to be processed.
    sigma :             float | int, optional
                        Threshold for spike detection. Smaller sigma = more
                        aggressive spike detection.
    max_spike_length :  int, optional
                        Determines how long (# of nodes) a spike can be.
    inplace :           bool, optional
                        If False, a copy of the neuron is returned.
    reverse :           bool, optional
                        If True, will **also** walk the segments from proximal
                        to distal. Use this to catch spikes on e.g. terminal
                        nodes.

    Returns
    -------
    TreeNeuron/List
                Despiked neuron(s). Only if `inplace=False`.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> despiked = navis.despike_skeleton(n)

    """
    # TODO:
    # - flattening all segments first before Spike detection should speed up
    #   quite a lot
    # -> as intermediate step: assign all new positions at once

    # The decorator makes sure that we have single neurons at this point
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Can only process TreeNeurons, not {type(x)}")

    if not inplace:
        x = x.copy()

    # Index nodes table by node ID
    this_nodes = x.nodes.set_index("node_id", inplace=False)

    segs_to_walk = x.segments

    if reverse:
        segs_to_walk += x.segments[::-1]

    # For each spike length do -> do this in reverse to correct the long
    # spikes first
    for l in list(range(1, max_spike_length + 1))[::-1]:
        # Go over all segments
        for seg in segs_to_walk:
            # Get nodes A, B and C of this segment
            this_A = this_nodes.loc[seg[: -l - 1]]
            this_B = this_nodes.loc[seg[l:-1]]
            this_C = this_nodes.loc[seg[l + 1 :]]

            # Get coordinates
            A = this_A[["x", "y", "z"]].values
            B = this_B[["x", "y", "z"]].values
            C = this_C[["x", "y", "z"]].values

            # Calculate euclidean distances A->B and A->C
            dist_AB = np.linalg.norm(A - B, axis=1)
            dist_AC = np.linalg.norm(A - C, axis=1)

            # Get the spikes
            spikes_ix = np.where(
                np.divide(dist_AB, dist_AC, where=dist_AC != 0) > sigma
            )[0]
            spikes = this_B.iloc[spikes_ix]

            if not spikes.empty:
                # Interpolate new position(s) between A and C
                new_positions = A[spikes_ix] + (C[spikes_ix] - A[spikes_ix]) / 2

                this_nodes.loc[spikes.index, ["x", "y", "z"]] = new_positions

    # Reassign node table
    x.nodes = this_nodes.reset_index(drop=False, inplace=False)

    # The weights in the graph have changed, we need to update that
    x._clear_temp_attr(exclude=["segments", "small_segments", "classify_nodes"])

    return x

Get the geodesic distance between nodes in nanometers.

PARAMETER DESCRIPTION
x
        If NeuronList must contain only a single neuron.

TYPE: TreeNeuron | MeshNeuron | NeuronList

a
        Node IDs (for TreeNeurons) or vertex indices (MeshNeurons)
        to check the distance between.

TYPE: int

RETURNS DESCRIPTION
int

distance in nm

See Also

navis.distal_to Check if a node A is distal to node B. navis.geodesic_matrix Get all-by-all geodesic distance matrix. navis.segment_length Much faster if you have a linear segment and know all node IDs.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> d = navis.dist_between(n,
...                        n.nodes.node_id.values[0],
...                        n.nodes.node_id.values[1])
Source code in navis/graph/graph_utils.py
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
@utils.lock_neuron
def dist_between(x: "core.NeuronObject", a: int, b: int) -> float:
    """Get the geodesic distance between nodes in nanometers.

    Parameters
    ----------
    x :             TreeNeuron | MeshNeuron | NeuronList
                    If NeuronList must contain only a single neuron.
    a,b :           int
                    Node IDs (for TreeNeurons) or vertex indices (MeshNeurons)
                    to check the distance between.

    Returns
    -------
    int
                    distance in nm

    See Also
    --------
    [`navis.distal_to`][]
        Check if a node A is distal to node B.
    [`navis.geodesic_matrix`][]
        Get all-by-all geodesic distance matrix.
    [`navis.segment_length`][]
        Much faster if you have a linear segment and know all node IDs.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> d = navis.dist_between(n,
    ...                        n.nodes.node_id.values[0],
    ...                        n.nodes.node_id.values[1])

    """
    if isinstance(x, core.NeuronList):
        if len(x) == 1:
            x = x[0]
        else:
            raise ValueError(f"Need a single TreeNeuron, got {len(x)}")

    if isinstance(x, (core.TreeNeuron, core.MeshNeuron)):
        G: Union[
            "igraph.Graph",  # noqa
            "nx.DiGraph",
        ] = x.igraph if (x.igraph and config.use_igraph) else x.graph
    elif isinstance(x, nx.DiGraph):
        G = x
    elif "igraph" in str(type(x.igraph)):
        # We can't use isinstance here because igraph library might not be installed
        G = x
    else:
        raise ValueError(f"Unable to process data of type {type(x)}")

    if (
        (utils.is_iterable(a) and len(a) > 1)  # type: ignore  # this is just a check
        or (utils.is_iterable(b) and len(b) > 1)
    ):  # type: ignore  # this is just a check
        raise ValueError(
            "Can only process single nodes/vertices. Use "
            "navis.geodesic_matrix instead."
        )

    a = utils.make_non_iterable(a)
    b = utils.make_non_iterable(b)

    try:
        _ = int(a)
        _ = int(b)
    except BaseException:
        raise ValueError("a, b need to be node IDs or vertex indices!")

    # If we're working with network X DiGraph
    if isinstance(G, nx.DiGraph):
        return int(
            nx.algorithms.shortest_path_length(
                G.to_undirected(as_view=True), a, b, weight="weight"
            )
        )
    else:
        if isinstance(x, core.TreeNeuron):
            a = G.vs.find(node_id=a)
            b = G.vs.find(node_id=b)

        # If not, we're assuming g is an iGraph object
        return G.distances(a, b, weights="weight", mode="ALL")[0][0]

Calculate distance to root for each node.

PARAMETER DESCRIPTION
x

TYPE: TreeNeuron

weight
            Use "weight" if you want geodesic distance and `None`
            if you want node count.

TYPE: str DEFAULT: None

igraph_indices
            Whether to return igraph node indices instead of node
            IDs. This is mainly used for internal functions.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
dist

Dictionary with root distances.

TYPE: dict

Examples:

For doctest only

>>> import navis
>>> n = navis.example_neurons(1)
>>> seg = navis.graph.dist_to_root(n)
See Also

navis.geodesic_matrix For distances between all points.

Source code in navis/graph/graph_utils.py
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
@utils.lock_neuron
def dist_to_root(
    x: "core.TreeNeuron", weight=None, igraph_indices: bool = False
) -> dict:
    """Calculate distance to root for each node.

    Parameters
    ----------
    x :                 TreeNeuron
    weight :            str, optional
                        Use "weight" if you want geodesic distance and `None`
                        if you want node count.
    igraph_indices :    bool
                        Whether to return igraph node indices instead of node
                        IDs. This is mainly used for internal functions.

    Returns
    -------
    dist :              dict
                        Dictionary with root distances.

    Examples
    --------
    For doctest only

    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> seg = navis.graph.dist_to_root(n)

    See Also
    --------
    [`navis.geodesic_matrix`][]
                        For distances between all points.

    """
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Expected TreeNeuron, got {type(x)}")

    dist = {}
    for root in x.root:
        dist.update(nx.shortest_path_length(x.graph, target=root, weight=weight))

    # Map node ID to vertex index for igraph
    if igraph_indices:
        if not x.igraph:
            raise ValueError("Neuron does not have an igraph representation.")
        id2ix = dict(zip(x.igraph.vs["node_id"], range(len(x.igraph.vs))))
        dist = {id2ix[k]: v for k, v in dist.items()}

    return dist
distal_to
distal_to
distal_to

Check if nodes A are distal to nodes B.

Important

Please note that if node A is not distal to node B, this does not automatically mean it is proximal instead: if nodes are on different branches, they are neither distal nor proximal to one another! To test for this case run a->b and b->a - if both return False, nodes are on different branches.

Also: if a and b are the same node, this function will return True!

PARAMETER DESCRIPTION
x

TYPE: TreeNeuron

a
If no node IDs are provided, will consider all node. Note that for
large sets of nodes it might be more efficient to use
[`navis.geodesic_matrix`][] (see examples).

TYPE: Optional[Union[str, int, List[Union[str, int]]]] DEFAULT: None

RETURNS DESCRIPTION
bool

If a and b are single node IDs respectively.

pd.DataFrame

If a and/or b are lists of node IDs. Columns and rows (index) represent node IDs. Neurons a are rows, neurons b are columns.

Examples:

>>> import navis
>>> # Get a neuron
>>> x = navis.example_neurons(1)
>>> # Get a random node
>>> n = x.nodes.iloc[100].node_id
>>> # Check all nodes if they are distal or proximal to that node
>>> df = navis.distal_to(x, n)
>>> # Get the IDs of the nodes that are distal
>>> dist = df.loc[n, df.loc[n]].index.values
>>> len(dist)
101

For large neurons and/or large sets of a/b it can be much faster to use geodesic_matrix instead:

>>> import navis
>>> import numpy as np
>>> x = navis.example_neurons(1)
>>> # Get an all-by-all distal_to
>>> df = navis.geodesic_matrix(x, weight=None, directed=True) < np.inf
>>> # Get distal_to for specific nodes
>>> df = navis.geodesic_matrix(x, weight=None, directed=True) < np.inf
>>> # Get distal_to for specific nodes
>>> a, b = x.nodes.node_id.values[:100], x.nodes.node_id.values[-100:]
>>> dist = navis.geodesic_matrix(x, weight=None, directed=True, from_=a)
>>> distal_to = dist[b] < np.inf
See Also

navis.geodesic_matrix Depending on your neuron and how many nodes you're asking for, this function can be considerably faster! See examples.

Source code in navis/graph/graph_utils.py
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
@utils.lock_neuron
def distal_to(
    x: "core.TreeNeuron",
    a: Optional[Union[str, int, List[Union[str, int]]]] = None,
    b: Optional[Union[str, int, List[Union[str, int]]]] = None,
) -> Union[bool, pd.DataFrame]:
    """Check if nodes A are distal to nodes B.

    Important
    ---------
    Please note that if node A is not distal to node B, this does **not**
    automatically mean it is proximal instead: if nodes are on different
    branches, they are neither distal nor proximal to one another! To test
    for this case run a->b and b->a - if both return `False`, nodes are on
    different branches.

    Also: if a and b are the same node, this function will return `True`!

    Parameters
    ----------
    x :     TreeNeuron
    a,b :   single node ID | list of node IDs | None, optional
            If no node IDs are provided, will consider all node. Note that for
            large sets of nodes it might be more efficient to use
            [`navis.geodesic_matrix`][] (see examples).

    Returns
    -------
    bool
            If `a` and `b` are single node IDs respectively.
    pd.DataFrame
            If `a` and/or `b` are lists of node IDs. Columns and rows
            (index) represent node IDs. Neurons `a` are rows, neurons
            `b` are columns.

    Examples
    --------
    >>> import navis
    >>> # Get a neuron
    >>> x = navis.example_neurons(1)
    >>> # Get a random node
    >>> n = x.nodes.iloc[100].node_id
    >>> # Check all nodes if they are distal or proximal to that node
    >>> df = navis.distal_to(x, n)
    >>> # Get the IDs of the nodes that are distal
    >>> dist = df.loc[n, df.loc[n]].index.values
    >>> len(dist)
    101

    For large neurons and/or large sets of `a`/`b` it can be much faster to use
    `geodesic_matrix` instead:

    >>> import navis
    >>> import numpy as np
    >>> x = navis.example_neurons(1)
    >>> # Get an all-by-all distal_to
    >>> df = navis.geodesic_matrix(x, weight=None, directed=True) < np.inf
    >>> # Get distal_to for specific nodes
    >>> df = navis.geodesic_matrix(x, weight=None, directed=True) < np.inf
    >>> # Get distal_to for specific nodes
    >>> a, b = x.nodes.node_id.values[:100], x.nodes.node_id.values[-100:]
    >>> dist = navis.geodesic_matrix(x, weight=None, directed=True, from_=a)
    >>> distal_to = dist[b] < np.inf

    See Also
    --------
    [`navis.geodesic_matrix`][]
            Depending on your neuron and how many nodes you're asking for,
            this function can be considerably faster! See examples.

    """
    if isinstance(x, core.NeuronList) and len(x) == 1:
        x = x[0]

    if not isinstance(x, core.TreeNeuron):
        raise ValueError(f"Please pass a single TreeNeuron, got {type(x)}")

    # At this point x is TreeNeuron
    x: core.TreeNeuron

    if not isinstance(a, type(None)):
        tnA = utils.make_iterable(a)
        # Make sure we're dealing with integers
        tnA = np.unique(tnA).astype(int)
    else:
        tnA = x.nodes.node_id.values

    if not isinstance(b, type(None)):
        tnB = utils.make_iterable(b)
        # Make sure we're dealing with integers
        tnB = np.unique(tnB).astype(int)
    else:
        tnB = x.nodes.node_id.values

    if x.igraph and config.use_igraph:
        # Map node ID to index
        id2ix = {n: v for v, n in zip(x.igraph.vs.indices, x.igraph.vs["node_id"])}

        # Convert node IDs to indices
        tnA = [id2ix[n] for n in tnA]  # type: ignore
        tnB = [id2ix[n] for n in tnB]  # type: ignore

        # Get path lengths
        le = x.igraph.distances(tnA, tnB, mode="OUT")

        # Converting to numpy array first is ~2X as fast
        le = np.asarray(le)

        # Convert to True/False
        le = le != float("inf")

        df = pd.DataFrame(
            le, index=x.igraph.vs[tnA]["node_id"], columns=x.igraph.vs[tnB]["node_id"]
        )
    else:
        # Generate empty DataFrame
        df = pd.DataFrame(
            np.zeros((len(tnA), len(tnB)), dtype=bool), columns=tnB, index=tnA
        )

        # Iterate over all targets
        # Grab graph once to avoid overhead from stale checks
        g = x.graph
        for nB in config.tqdm(
            tnB,
            desc="Querying paths",
            disable=(len(tnB) < 1000) | config.pbar_hide,
            leave=config.pbar_leave,
        ):
            # Get all paths TO this target. This function returns a dictionary:
            # { source1 : path_length, source2 : path_length, ... } containing
            # all nodes distal to this node.
            paths = nx.shortest_path_length(g, source=None, target=nB)
            # Check if sources are among our targets
            df[nB] = [nA in paths for nA in tnA]

    if df.shape == (1, 1):
        return df.values[0][0]
    else:
        # Return boolean
        return df

Downsample neuron(s) by a given factor.

For skeletons: preserves root, leafs, branchpoints by default. Preservation of nodes with synapses can be toggled - see preserve_nodes parameter. Use downsampling_factor=float('inf') to get a skeleton consisting only of root, branch and end points.

PARAMETER DESCRIPTION
x
                Neuron(s) to downsample. Note that for MeshNeurons
                we use the first available backend.

TYPE: single neuron | NeuronList

downsampling_factor
                Factor by which downsample. For TreeNeuron, Dotprops
                and MeshNeurons this reduces the node, point
                and face count, respectively. For VoxelNeurons it
                reduces the dimensions by given factor.

TYPE: int | float('inf')

preserve_nodes
                Can be either list of node IDs to exclude from
                downsampling or a string to a DataFrame attached
                to the neuron (e.g. "connectors"). DataFrame must
                have `node_id` column. Only relevant for
                TreeNeurons.

TYPE: str | list DEFAULT: None

inplace
                If True, will modify original neuron. If False, we
                will operate and return o a copy.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / Dotprops / VoxelNeurons / NeuronList

Same datatype as input.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> n_ds = navis.downsample_neuron(n,
...                                downsampling_factor=5,
...                                inplace=False)
>>> n.n_nodes > n_ds.n_nodes
True
See Also

navis.resample_skeleton This function resamples a neuron to given resolution. This will change node IDs! navis.simplify_mesh This is the function used for MeshNeurons. Use directly for more control of the simplification.

Source code in navis/sampling/downsampling.py
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
@utils.map_neuronlist(desc="Downsampling", allow_parallel=True)
def downsample_neuron(
    x: "core.NeuronObject",
    downsampling_factor: Union[int, float],
    inplace: bool = False,
    preserve_nodes: Optional[List[int]] = None,
) -> Optional["core.NeuronObject"]:
    """Downsample neuron(s) by a given factor.

    For skeletons: preserves root, leafs, branchpoints by default. Preservation
    of nodes with synapses can be toggled - see `preserve_nodes` parameter.
    Use `downsampling_factor=float('inf')` to get a skeleton consisting only
    of root, branch and end points.

    Parameters
    ----------
    x :                     single neuron | NeuronList
                            Neuron(s) to downsample. Note that for MeshNeurons
                            we use the first available backend.
    downsampling_factor :   int | float('inf')
                            Factor by which downsample. For TreeNeuron, Dotprops
                            and MeshNeurons this reduces the node, point
                            and face count, respectively. For VoxelNeurons it
                            reduces the dimensions by given factor.
    preserve_nodes :        str | list, optional
                            Can be either list of node IDs to exclude from
                            downsampling or a string to a DataFrame attached
                            to the neuron (e.g. "connectors"). DataFrame must
                            have `node_id` column. Only relevant for
                            TreeNeurons.
    inplace :               bool, optional
                            If True, will modify original neuron. If False, we
                            will operate and return o a copy.

    Returns
    -------
    TreeNeuron/Dotprops/VoxelNeurons/NeuronList
                            Same datatype as input.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n_ds = navis.downsample_neuron(n,
    ...                                downsampling_factor=5,
    ...                                inplace=False)
    >>> n.n_nodes > n_ds.n_nodes
    True

    See Also
    --------
    [`navis.resample_skeleton`][]
                             This function resamples a neuron to given
                             resolution. This will change node IDs!
    [`navis.simplify_mesh`][]
                             This is the function used for `MeshNeurons`. Use
                             directly for more control of the simplification.

    """
    if downsampling_factor <= 1:
        raise ValueError('Downsampling factor must be greater than 1.')

    if not inplace:
        x = x.copy()

    if isinstance(x, core.TreeNeuron):
        _ = _downsample_treeneuron(x,
                                   downsampling_factor=downsampling_factor,
                                   preserve_nodes=preserve_nodes)
    elif isinstance(x, core.Dotprops):
        _ = _downsample_dotprops(x,
                                 downsampling_factor=downsampling_factor)
    elif isinstance(x, core.VoxelNeuron):
        _ = _downsample_voxels(x,
                               downsampling_factor=downsampling_factor)
    elif isinstance(x, core.MeshNeuron):
        _ = meshes.simplify_mesh(x,
                                 F=1/downsampling_factor,
                                 inplace=True)
    else:
        raise TypeError(f'Unable to downsample data of type "{type(x)}"')

    return x

Remove small disconnected pieces of "fluff".

By default, this function will remove all but the largest connected component from the neuron. You can change that behavior using the keep_size and n_largest parameters. Connectors (if present) will be remapped to the closest surviving vertex/node.

PARAMETER DESCRIPTION
x
    The neuron(s) to remove fluff from.

TYPE: TreeNeuron | MeshNeuron | Dotprops | NeuronList

keep_size
    Use this to set a size (in number of nodes/vertices) for small
    bits to keep. If `keep_size` < 1 it will be intepreted as
    fraction of total nodes/vertices/points.

TYPE: float DEFAULT: None

n_largest
    If set, will keep the `n_largest` connected components. Note:
    if provided, `keep_size` will be applied first!

TYPE: int DEFAULT: None

epsilon
    For Dotprops: distance at which to consider two points to be
    connected. If `None`, will use the default value of 5 times
    the average node distance (`x.sampling_resolution`).

TYPE: float DEFAULT: None

inplace
    If False, pruning is performed on copy of original neuron
    which is then returned.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
Neuron / List

Neuron(s) without fluff.

Examples:

>>> import navis
>>> m = navis.example_neurons(1, kind='mesh')
>>> m.n_vertices
6309
>>> # Remove all but the largest connected component
>>> top = navis.drop_fluff(m)
>>> top.n_vertices
5951
>>> # Keep the ten largest connected components
>>> two = navis.drop_fluff(m, n_largest=10)
>>> two.n_vertices
6069
>>> # Keep all fragments with at least 100 vertices
>>> clean = navis.drop_fluff(m, keep_size=100)
>>> clean.n_vertices
5951
>>> # Keep the two largest fragments with at least 50 vertices each
>>> # (for this neuron the result is just the largest fragment)
>>> clean2 = navis.drop_fluff(m, keep_size=50, n_largest=2)
>>> clean2.n_vertices
6037
Source code in navis/morpho/manipulation.py
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
@utils.map_neuronlist(desc="Removing fluff", allow_parallel=True)
def drop_fluff(
    x: Union["core.TreeNeuron", "core.MeshNeuron", "core.NeuronList"],
    keep_size: Optional[float] = None,
    n_largest: Optional[int] = None,
    epsilon: Optional[float] = None,
    inplace: bool = False,
):
    """Remove small disconnected pieces of "fluff".

    By default, this function will remove all but the largest connected
    component from the neuron. You can change that behavior using the
    `keep_size` and `n_largest` parameters. Connectors (if present) will
    be remapped to the closest surviving vertex/node.

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | Dotprops | NeuronList
                The neuron(s) to remove fluff from.
    keep_size : float, optional
                Use this to set a size (in number of nodes/vertices) for small
                bits to keep. If `keep_size` < 1 it will be intepreted as
                fraction of total nodes/vertices/points.
    n_largest : int, optional
                If set, will keep the `n_largest` connected components. Note:
                if provided, `keep_size` will be applied first!
    epsilon :   float, optional
                For Dotprops: distance at which to consider two points to be
                connected. If `None`, will use the default value of 5 times
                the average node distance (`x.sampling_resolution`).
    inplace :   bool, optional
                If False, pruning is performed on copy of original neuron
                which is then returned.

    Returns
    -------
    Neuron/List
                Neuron(s) without fluff.

    Examples
    --------
    >>> import navis
    >>> m = navis.example_neurons(1, kind='mesh')
    >>> m.n_vertices
    6309
    >>> # Remove all but the largest connected component
    >>> top = navis.drop_fluff(m)
    >>> top.n_vertices
    5951
    >>> # Keep the ten largest connected components
    >>> two = navis.drop_fluff(m, n_largest=10)
    >>> two.n_vertices
    6069
    >>> # Keep all fragments with at least 100 vertices
    >>> clean = navis.drop_fluff(m, keep_size=100)
    >>> clean.n_vertices
    5951
    >>> # Keep the two largest fragments with at least 50 vertices each
    >>> # (for this neuron the result is just the largest fragment)
    >>> clean2 = navis.drop_fluff(m, keep_size=50, n_largest=2)
    >>> clean2.n_vertices
    6037

    """
    utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron, core.MeshNeuron, core.Dotprops))

    if isinstance(x, (core.MeshNeuron, core.TreeNeuron)):
        G = x.graph
        # Skeleton graphs are directed
        if G.is_directed():
            G = G.to_undirected()
    elif isinstance(x, core.Dotprops):
        G = graph.neuron2nx(x, epsilon=epsilon)

    cc = sorted(nx.connected_components(G), key=lambda x: len(x), reverse=True)

    # Translate keep_size to number of nodes
    if keep_size and keep_size < 1:
        keep_size = len(G.nodes) * keep_size

    if keep_size:
        cc = [c for c in cc if len(c) >= keep_size]
        if not n_largest:
            keep = [i for c in cc for i in c]
        else:
            keep = [i for c in cc[:n_largest] for i in c]
    elif n_largest:
        keep = [i for c in cc[:n_largest] for i in c]
    else:
        keep = cc[0]

    # Subset neuron
    x = subset.subset_neuron(x, subset=keep, inplace=inplace, keep_disc_cn=True)

    # See if we need to/can re-attach any connectors
    if x.has_connectors:
        id_col = [c for c in ('node_id', 'vertex_id', 'point_id') if c in x.connectors.columns]
        if id_col:
            id_col = id_col[0]
            disc = ~x.connectors[id_col].isin(x.graph.nodes).values
            if any(disc):
                xyz = x.connectors.loc[disc, ["x", "y", "z"]].values
                x.connectors.loc[disc, id_col] = x.snap(xyz)[0]

    return x

Create TreeNeuron from edges and (optional) vertex coordinates.

PARAMETER DESCRIPTION
edges
        Edges between vertices.

TYPE: (N, 2) array

vertices
        Vertex positions. If not provided, will position
        all vertices at (0, 0, 0).

TYPE: (N, 3) array DEFAULT: None

validate
        If True (default) will fix issues with cycles
        and edges orientation. Only skip this if
        you are absolutely sure your data are good.

TYPE: bool DEFAULT: True

**kwargs
        Additional keyword arguments are passed to
        initialization of the TreeNeuron.

DEFAULT: {}

RETURNS DESCRIPTION
TreeNeuron

Examples:

>>> import navis
>>> import numpy as np
>>> verts = np.random.rand(5, 3)
>>> edges = np.array([(0, 1), (1, 2), (2, 3), (2, 4)])
>>> sk = navis.edges2neuron(edges, vertices=verts)
Source code in navis/graph/converters.py
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
def edges2neuron(edges, vertices=None, validate=True, **kwargs):
    """Create TreeNeuron from edges and (optional) vertex coordinates.

    Parameters
    ----------
    edges :         (N, 2) array
                    Edges between vertices.
    vertices :      (N, 3) array, optional
                    Vertex positions. If not provided, will position
                    all vertices at (0, 0, 0).
    validate :      bool
                    If True (default) will fix issues with cycles
                    and edges orientation. Only skip this if
                    you are absolutely sure your data are good.
    **kwargs
                    Additional keyword arguments are passed to
                    initialization of the TreeNeuron.

    Returns
    -------
    TreeNeuron

    Examples
    --------

    >>> import navis
    >>> import numpy as np
    >>> verts = np.random.rand(5, 3)
    >>> edges = np.array([(0, 1), (1, 2), (2, 3), (2, 4)])
    >>> sk = navis.edges2neuron(edges, vertices=verts)

    """
    # Make sure we're dealing with arrays
    edges = np.asarray(edges)

    if vertices is not None:
        vertices = np.asarray(vertices)
    else:
        vertices = np.zeros((edges.max() + 1, 3))

    if vertices.ndim != 2 or vertices.shape[1] != 3:
        raise ValueError(
            f"Expected `vertices` to be of shape (N, 3), got {vertices.shape}"
        )
    if edges.ndim != 2 or edges.shape[1] != 2:
        raise ValueError(
            f"Expected `edges` to be of shape (N, 2), got {edges.shape}"
        )

    if edges.max() > (len(vertices)-1):
        raise IndexError("vertex index out of range")

    G = nx.Graph()
    G.add_nodes_from(np.arange(len(vertices)))
    G.add_edges_from(edges)

    # Note: at this point we could just pass the graph to nx2neuron
    # But because we know it came from from vertices and edges, we
    # can skip certain checks and make the process a bit faster

    if validate:
        if not nx.is_forest(G):
            while True:
                try:
                    # Find cycle
                    cycle = nx.find_cycle(G)
                except nx.exception.NetworkXNoCycle:
                    break
                except BaseException:
                    raise

                # Sort by degree
                cycle = sorted(cycle, key=lambda x: G.degree[x[0]])

                # Remove the edge with the lowest degree
                G.remove_edge(cycle[0][0], cycle[0][1])

        parents = {}
        for cc in nx.connected_components(G):
            # If this is a disconnected node
            if len(cc) == 1:
                parents[cc.pop()] = -1
                continue

            sg = nx.subgraph(G, cc)
            # Pick a random root
            r = cc.pop()
            # Generate parent->child dictionary
            this = nx.predecessor(sg, r)

            # Update overall parent dictionary
            # (note that we assign -1 as root's parent)
            parents.update({k: v[0] if v else -1 for k, v in this.items()})

    nodes = pd.DataFrame(vertices, columns=['x', 'y', 'z'])
    nodes.insert(0, 'node_id', nodes.index)
    nodes.insert(1, 'parent_id', nodes.index.map(parents))

    return core.TreeNeuron(nodes, **kwargs)

Load example neuron(s).

These example neurons are skeletons and meshes of the same olfactory projection neurons from the DA1 glomerulus which have been automatically segmented in the Janelia hemibrain data set [1]. See also https://neuprint.janelia.org.

Coordinates are in voxels which equal 8 x 8 x 8 nanometers.

PARAMETER DESCRIPTION
n
    Number of neurons to return. If None, will return all available
    example neurons. Can never return more than the maximum number
    of available example neurons.

TYPE: int | None DEFAULT: None

kind
    What kind of neurons to return.

TYPE: "skeleton" | "mesh" | "mix" DEFAULT: 'skeleton'

synapses
    If True, will also load synapses.

TYPE: bool, DEFAULT: True

source
    Only relevant for skeletons. Skeletons can be generated from SWC
    files or GML graphs (this is really only used for testing).

TYPE: 'swc' | 'gml' DEFAULT: 'swc'

RETURNS DESCRIPTION
TreeNeuron

If n=1 and kind='skeleton'.

MeshNeuron

If n=1 and kind='mesh'.

NeuronList

List of the above neuron types if n>1.

References

[1] Louis K. Scheffer et al., bioRxiv. 2020. doi: https://doi.org/10.1101/2020.04.07.030213 A Connectome and Analysis of the Adult Drosophila Central Brain.

Examples:

Load a single neuron

>>> import navis
>>> n = navis.example_neurons(n=1)

Load all example neurons

>>> nl = navis.example_neurons()
Source code in navis/data/load_data.py
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def example_neurons(n: Optional[int] = None,
                    kind:  Union[Literal['mesh'],
                                 Literal['skeleton'],
                                 Literal['mix']] = 'skeleton',
                    synapses: bool = True,
                    source: Union[Literal['swc'],
                                  Literal['gml']] = 'swc',
                    ) -> NeuronObject:
    """Load example neuron(s).

    These example neurons are skeletons and meshes of the same olfactory
    projection neurons from the DA1 glomerulus which have been automatically
    segmented in the Janelia hemibrain data set [1]. See also
    `https://neuprint.janelia.org`.

    Coordinates are in voxels which equal 8 x 8 x 8 nanometers.

    Parameters
    ----------
    n :         int | None, optional
                Number of neurons to return. If None, will return all available
                example neurons. Can never return more than the maximum number
                of available example neurons.
    kind :      "skeleton" | "mesh" | "mix"
                What kind of neurons to return.
    synapses :  bool,
                If True, will also load synapses.
    source :    'swc' | 'gml', optional
                Only relevant for skeletons. Skeletons can be generated from SWC
                files or GML graphs (this is really only used for testing).

    Returns
    -------
    TreeNeuron
                If `n=1` and `kind='skeleton'`.
    MeshNeuron
                If `n=1` and `kind='mesh'`.
    NeuronList
                List of the above neuron types if `n>1`.

    References
    ----------
    [1] Louis K. Scheffer et al., bioRxiv. 2020. doi: https://doi.org/10.1101/2020.04.07.030213
    A Connectome and Analysis of the Adult Drosophila Central Brain.

    Examples
    --------
    Load a single neuron

    >>> import navis
    >>> n = navis.example_neurons(n=1)

    Load all example neurons

    >>> nl = navis.example_neurons()

    """
    if kind not in ['skeleton', 'mesh', 'mix']:
        raise ValueError(f'Unknown value for `kind`: "{kind}"')

    if isinstance(n, type(None)):
        if kind == 'mix':
            n = len(swc) + len(obj)
        else:
            n = len(swc)
    elif not isinstance(n, int):
        raise TypeError(f'Expected int or None, got "{type(n)}"')

    if isinstance(n, int) and n < 1:
        raise ValueError("Unable to return less than 1 neuron.")

    if kind == 'mix':
        n_mesh = round(n/2)
        n_skel = n - n_mesh
    else:
        n_mesh = n_skel = n

    nl = []
    if kind in ['skeleton', 'mix']:
        if source == 'gml':
            graphs = [nx.read_gml(os.path.join(gml_path, g)) for g in gml[:n_skel]]
            nl += [nx2neuron(g,
                             units='8 nm',
                             id=int(f.split('.')[0])) for f, g in zip(gml, graphs)]
        elif source == 'swc':
            nl += [read_swc(os.path.join(swc_path, f),
                            units='8 nm',
                            id=int(f.split('.')[0])) for f in swc[:n_skel]]
        else:
            raise ValueError(f'Source must be "swc" or "gml", not "{source}"')

    if kind in ['mesh', 'mix']:
        files = [os.path.join(obj_path, f) for f in obj[:n_mesh]]
        nl += [MeshNeuron(fp,
                          units='8 nm',
                          name=f.split('.')[0],
                          id=int(f.split('.')[0])) for f, fp in zip(obj, files)]
        for n in nl:
            n.soma_pos = SOMA_POS[n.id]

    if synapses:
        for n in nl:
            n.connectors = pd.read_csv(os.path.join(syn_path, f'{n.id}.csv'))

            if isinstance(n, MeshNeuron):
                n._connectors.drop('node_id', axis=1, inplace=True)

    with open(os.path.join(fp, 'meta.json'), 'r') as f:
        meta = json.load(f)

    for n in nl:
        n.name = meta[str(n.id)]['instance']

    if len(nl) == 1:
        return nl[0]
    return NeuronList(nl)

Load an example volume.

Volumes are in hemibrain space which means coordinates are in voxels at 8 x 8 x 8 nanometers/voxel.

PARAMETER DESCRIPTION
name
    Name of available volume. Currently available::

      "LH" = lateral horn in hemibrain space
      "neuropil" = neuropil in hemibrain space

TYPE: str

RETURNS DESCRIPTION
navis.Volume

Examples:

Load LH volume

>>> import navis
>>> lh = navis.example_volume('LH')
Source code in navis/data/load_data.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
def example_volume(name: str) -> Volume:
    """Load an example volume.

    Volumes are in hemibrain space which means coordinates are in voxels
    at 8 x 8 x 8 nanometers/voxel.

    Parameters
    ----------
    name :      str
                Name of available volume. Currently available::

                  "LH" = lateral horn in hemibrain space
                  "neuropil" = neuropil in hemibrain space

    Returns
    -------
    navis.Volume

    Examples
    --------
    Load LH volume

    >>> import navis
    >>> lh = navis.example_volume('LH')

    """
    if not isinstance(name, str):
        raise TypeError(f'Expected string, got "{type(name)}"')

    # Force lower case
    name = name.lower()

    # Make sure extension is correct
    if not name.endswith(".obj"):
        name += ".obj"

    if name not in vols:
        raise ValueError(
            f'No volume named "{name}". Available volumes: {",".join(vols)}'
        )

    vol = Volume.from_file(
        os.path.join(vols_path, name), name=name.split(".")[0], units="nm"
    )

    return vol

Find main branch point of unipolar (e.g. insect) neurons.

Note that this might produce garbage if the neuron is fragmented.

PARAMETER DESCRIPTION
x
        May contain multiple neurons.

TYPE: TreeNeuron | NeuronList

method
        The method to use:
          - "longest_neurite" assumes that the main branch point
            is where the two largest branches converge
          - "betweenness" uses centrality to determine the point
            which most shortest paths traverse

TYPE: "longest_neurite" | "centrality" DEFAULT: 'betweenness'

threshold
        Sets the cutoff for method "betweenness". Decrease threshold
        to be more inclusive (useful if the cell body fiber has
        little bristles), increase to be more stringent (i.e. when
        the skeleton is very clean).

TYPE: float [0-1] DEFAULT: 0.95

reroot_soma
        If True, neuron will be rerooted to soma.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
branch_point

Node ID or list of node IDs of the main branch point(s).

TYPE: int | list of int

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> navis.find_main_branchpoint(n, reroot_soma=True)
110
>>> # Cut neuron into axon, dendrites and primary neurite tract:
>>> # for this we need to cut twice - once at the main branch point
>>> # and once at one of its childs
>>> child = n.nodes[n.nodes.parent_id == 2066].node_id.values[0]
>>> split = navis.cut_skeleton(n, [2066, child])
>>> split
<class 'navis.core.neuronlist.NeuronList'> of 3 neurons
          type  n_nodes  n_connectors  n_branches  n_leafs   cable_length    soma
0  TreeNeuron     2572             0         170      176  475078.177926    None
1  TreeNeuron      139             0           1        3   89983.511392  [3490]
2  TreeNeuron     3656             0          63       66  648285.745750    None
Source code in navis/graph/graph_utils.py
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
@utils.map_neuronlist(desc="Searching", allow_parallel=True)
@utils.meshneuron_skeleton(method="node_to_vertex")
def find_main_branchpoint(
    x: "core.NeuronObject",
    method: Union[Literal["longest_neurite"], Literal["betweenness"]] = "betweenness",
    threshold: float = 0.95,
    reroot_soma: bool = False,
) -> Union[int, List[int]]:
    """Find main branch point of unipolar (e.g. insect) neurons.

    Note that this might produce garbage if the neuron is fragmented.

    Parameters
    ----------
    x :             TreeNeuron | NeuronList
                    May contain multiple neurons.
    method :        "longest_neurite" | "centrality"
                    The method to use:
                      - "longest_neurite" assumes that the main branch point
                        is where the two largest branches converge
                      - "betweenness" uses centrality to determine the point
                        which most shortest paths traverse
    threshold :     float [0-1]
                    Sets the cutoff for method "betweenness". Decrease threshold
                    to be more inclusive (useful if the cell body fiber has
                    little bristles), increase to be more stringent (i.e. when
                    the skeleton is very clean).
    reroot_soma :   bool, optional
                    If True, neuron will be rerooted to soma.

    Returns
    -------
    branch_point :  int | list of int
                    Node ID or list of node IDs of the main branch point(s).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> navis.find_main_branchpoint(n, reroot_soma=True)
    110
    >>> # Cut neuron into axon, dendrites and primary neurite tract:
    >>> # for this we need to cut twice - once at the main branch point
    >>> # and once at one of its childs
    >>> child = n.nodes[n.nodes.parent_id == 2066].node_id.values[0]
    >>> split = navis.cut_skeleton(n, [2066, child])
    >>> split                                                   # doctest: +SKIP
    <class 'navis.core.neuronlist.NeuronList'> of 3 neurons
              type  n_nodes  n_connectors  n_branches  n_leafs   cable_length    soma
    0  TreeNeuron     2572             0         170      176  475078.177926    None
    1  TreeNeuron      139             0           1        3   89983.511392  [3490]
    2  TreeNeuron     3656             0          63       66  648285.745750    None

    """
    utils.eval_param(
        method, name="method", allowed_values=("longest_neurite", "betweenness")
    )

    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Expected TreeNeuron(s), got "{type(x)}"')

    # At this point x is TreeNeuron
    x: core.TreeNeuron

    # If no branches
    if x.nodes[x.nodes.type == "branch"].empty:
        raise ValueError("Neuron has no branch points.")

    if reroot_soma and not isinstance(x.soma, type(None)):
        x = x.reroot(x.soma, inplace=False)

    if method == "longest_neurite":
        G = x.graph

        # First, find longest path
        longest = nx.dag_longest_path(G, weight="weight")

        # Remove longest path
        # (use subgraph to avoid editing original or copying raph)
        keep = ~np.isin(G.nodes, longest)
        G = G.subgraph(np.array(G.nodes)[keep])

        # Find second longest path
        sc_longest = nx.dag_longest_path(G, weight="weight")

        # Parent of the last node in sc_longest is the common branch point
        bp = list(x.graph.successors(sc_longest[-1]))[0]
    else:
        # Get betweenness for each node
        x = morpho.betweeness_centrality(x, directed=True, from_="branch_points")
        # Get branch points with highest centrality
        high_between = (
            x.branch_points.betweenness >= x.branch_points.betweenness.max() * threshold
        )
        candidates = x.branch_points[high_between]

        # If only one nodes just go with it
        if candidates.shape[0] == 1:
            bp = candidates.node_id.values[0]
        else:
            # If multiple points get the farthest one from the root
            root_dists = dist_to_root(x)
            bp = sorted(candidates.node_id.values, key=lambda x: root_dists[x])[-1]

    # This makes sure we get the same data type as in the node table
    # -> Network X seems to sometimes convert integers to floats
    return x.nodes.node_id.dtype.type(bp)

Try finding a neuron's soma.

Will use the .soma_detection_radius and .soma_detection_label attribute of a neuron to search for the soma in the node table.

If attributes don't exists, will fallback to defaults: None and 1, respectively.

PARAMETER DESCRIPTION
x

TYPE: Neuron

RETURNS DESCRIPTION
Node ID(s) of potential somata.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> navis.find_soma(n)
array([4177], dtype=int32)
Source code in navis/morpho/analyze.py
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
def find_soma(x: 'core.TreeNeuron') -> Sequence[int]:
    """Try finding a neuron's soma.

    Will use the `.soma_detection_radius` and `.soma_detection_label`
    attribute of a neuron to search for the soma in the node table.

    If attributes don't exists, will fallback to defaults: `None` and
    `1`, respectively.

    Parameters
    ----------
    x :         Neuron

    Returns
    -------
    Node ID(s) of potential somata.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> navis.find_soma(n)
    array([4177], dtype=int32)

    """
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Input must be TreeNeuron, not "{type(x)}"')

    soma_radius = getattr(x, 'soma_detection_radius', None)
    soma_label = getattr(x, 'soma_detection_label', None)

    check_labels = not isinstance(soma_label, type(None)) and 'label' in x.nodes.columns
    check_radius = not isinstance(soma_radius, type(None))

    # If no label or radius is given, return empty array
    if not check_labels and not check_radius:
        return np.array([], dtype=x.nodes.node_id.values.dtype)

    # Note to self: I've optimised the s**t out of this function
    # The reason reason why we're using a mask and this somewhat
    # convoluted logic is to avoid having to subset the node table
    # because that's really slow.

    # Start with a mask that includes all nodes
    mask = np.ones(len(x.nodes), dtype=bool)

    if check_radius:
        # When checking for radii, we use an empty mask and fill it
        # with nodes that have a large enough radius
        mask[:] = False

        # Drop nodes that don't have a radius
        radii = x.nodes.radius.values
        has_radius = ~np.isnan(radii)

        # Filter further to nodes that have a large enough radius
        if has_radius.any():
            if isinstance(soma_radius, pint.Quantity):
                if isinstance(x.units, (pint.Quantity, pint.Unit)) and \
                   not x.units.dimensionless and \
                   not isinstance(x.units._magnitude, np.ndarray) \
                   and x.units != soma_radius:  # only convert if units are different
                    # Do NOT remove the .values here -> otherwise conversion to units won't work
                    is_large = radii * x.units >= soma_radius
                else:
                    # If neurons has no units or if units are non-isotropic,
                    # assume they are the same as the soma radius
                    is_large = radii >= soma_radius._magnitude
            else:
                is_large = radii >= soma_radius

            # Mark nodes that have a large enough radius
            mask[is_large] = True

    # See if we (also) need to check for a specific label
    if check_labels:
        # Important: we need to use np.asarray here because the `label` column
        # can be categorical in which case a `soma_nodes.label.astype(str)` might
        # throw annoying runtime warnings
        soma_node_ids = x.nodes.node_id.values[mask]
        soma_node_labels = np.asarray(x.nodes.label.values[mask]).astype(str)

        return soma_node_ids[soma_node_labels == str(soma_label)]
    # If no labels to check we can return the mask directly
    else:
        return x.nodes.node_id.values[mask]

Try to fix some common problems with mesh.

  1. Remove infinite values
  2. Merge duplicate vertices
  3. Remove duplicate and degenerate faces
  4. Fix normals
  5. Remove unreference vertices
  6. Remove disconnected fragments (Optional)
  7. Fill holes (Optional)
PARAMETER DESCRIPTION
mesh

TYPE: trimesh.Trimesh | navis.MeshNeuron

fill_holes
            If True will try to fix holes in the mesh.

TYPE: bool DEFAULT: False

remove_fragments
            If a number is given, will iterate over the mesh's
            connected components and remove those consisting of less
            than the given number of vertices. For example,
            `remove_fragments=5` will drop parts of the mesh
            that consist of five or less connected vertices.

TYPE: False | int DEFAULT: False

inplace
            If True, will perform fixes on the input mesh. If False,
            will make a copy and leave the original untouched.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
fixed object : trimesh.Trimesh or navis.MeshNeuron
Source code in navis/meshes/mesh_utils.py
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
def fix_mesh(mesh: Union[tm.Trimesh, 'core.MeshNeuron'],
             fill_holes: bool = False,
             remove_fragments: bool = False,
             inplace: bool = False):
    """Try to fix some common problems with mesh.

     1. Remove infinite values
     2. Merge duplicate vertices
     3. Remove duplicate and degenerate faces
     4. Fix normals
     5. Remove unreference vertices
     6. Remove disconnected fragments (Optional)
     7. Fill holes (Optional)

    Parameters
    ----------
    mesh :              trimesh.Trimesh | navis.MeshNeuron
    fill_holes :        bool
                        If True will try to fix holes in the mesh.
    remove_fragments :  False | int
                        If a number is given, will iterate over the mesh's
                        connected components and remove those consisting of less
                        than the given number of vertices. For example,
                        `remove_fragments=5` will drop parts of the mesh
                        that consist of five or less connected vertices.
    inplace :           bool
                        If True, will perform fixes on the input mesh. If False,
                        will make a copy and leave the original untouched.

    Returns
    -------
    fixed object :      trimesh.Trimesh or navis.MeshNeuron

    """
    if not inplace:
        mesh = mesh.copy()

    if isinstance(mesh, core.MeshNeuron):
        m = mesh.trimesh
    else:
        m = mesh

    assert isinstance(m, tm.Trimesh)

    if remove_fragments:
        to_drop = []
        for c in nx.connected_components(m.vertex_adjacency_graph):
            if len(c) <= remove_fragments:
                to_drop += list(c)

        # Remove dropped vertices
        remove = np.isin(np.arange(m.vertices.shape[0]), to_drop)
        m.update_vertices(~remove)

    if fill_holes:
        m.fill_holes()

    m.remove_infinite_values()
    m.merge_vertices()
    m.remove_duplicate_faces()
    m.remove_degenerate_faces()
    m.fix_normals()
    m.remove_unreferenced_vertices()

    # If we started with a MeshNeuron, map back the verts/faces
    if isinstance(mesh, core.MeshNeuron):
        mesh.vertices, mesh.faces = m.vertices, m.faces
        mesh._clear_temp_attr()

    return mesh

Calculate flow between leaf nodes.

PARAMETER DESCRIPTION
x
    Neuron(s) to calculate flow centrality for.

TYPE: TreeNeuron | MeshNeuron | NeuronList

RETURNS DESCRIPTION
neuron

Adds "flow_centrality" as column in the node table (for TreeNeurons) or as .flow_centrality property (for MeshNeurons).

Examples:

>>> import navis
>>> n = navis.example_neurons(2)
>>> n.reroot(n.soma, inplace=True)
>>> _ = navis.flow_centrality(n)
>>> n[0].nodes.flow_centrality.max()
91234
See Also

navis.synapse_flow_centrality Synapse-based flow centrality.

Source code in navis/morpho/mmetrics.py
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
@utils.map_neuronlist(desc="Calc. flow", allow_parallel=True)
@utils.meshneuron_skeleton(
    method="node_properties",
    include_connectors=True,
    heal=True,
    node_props=["flow_centrality"],
)
def flow_centrality(x: "core.NeuronObject") -> "core.NeuronObject":
    """Calculate flow between leaf nodes.

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | NeuronList
                Neuron(s) to calculate flow centrality for.

    Returns
    -------
    neuron
                Adds "flow_centrality" as column in the node table (for
                TreeNeurons) or as `.flow_centrality` property
                (for MeshNeurons).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(2)
    >>> n.reroot(n.soma, inplace=True)
    >>> _ = navis.flow_centrality(n)
    >>> n[0].nodes.flow_centrality.max()
    91234

    See Also
    --------
    [`navis.synapse_flow_centrality`][]
            Synapse-based flow centrality.

    """
    # Quick disclaimer:
    # This function may look unnecessarily complicated. I did also try out an
    # implementation using igraph + shortest paths which works like a charm and
    # causes less headaches. It is, however, about >10X slower than this version!
    # Note to self: do not go down that rabbit hole again!
    msg = (
        "Synapse-based flow centrality has been moved to "
        "`navis.synapse_flow_centrality` in navis "
        "version 1.4.0. `navis.flow_centrality` now calculates "
        "morphology-only flow. "
        "This warning will be removed in a future version of navis."
    )
    warnings.warn(msg, DeprecationWarning)
    logger.warning(msg)

    if not isinstance(x, core.TreeNeuron):
        raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"')

    if np.any(x.soma) and not np.all(np.isin(x.soma, x.root)):
        logger.warning(f"Neuron {x.id} is not rooted to its soma!")

    # Get list of leafs
    leafs = x.leafs.node_id.values
    total_leafs = len(leafs)

    # Get list of points to calculate flow centrality for:
    calc_node_ids = x.branch_points.node_id.values

    # We will be processing a super downsampled version of the neuron to
    # speed up calculations
    current_level = logger.level
    current_state = config.pbar_hide
    logger.setLevel("ERROR")
    config.pbar_hide = True
    y = sampling.downsample_neuron(
        x=x,
        downsampling_factor=float("inf"),
        inplace=False,
        preserve_nodes=calc_node_ids,
    )
    logger.setLevel(current_level)
    config.pbar_hide = current_state

    # Get number of leafs distal to each branch's childs
    # Note that we're using geodesic matrix here because it is much more
    # efficient than for `distal_to` for larger queries/neurons
    dists = graph.geodesic_matrix(y, from_=leafs, directed=True, weight=None)
    distal = (dists[calc_node_ids] < np.inf).sum(axis=0)

    # Calculate the flow
    flow = {n: (total_leafs - distal[n]) * distal[n] for n in calc_node_ids}

    # At this point there is only flow for branch points and connectors nodes.
    # Let's complete that mapping by adding flow for the nodes between branch points.
    for s in x.small_segments:
        # Segments' orientation goes from distal -> proximal

        # If first node in the segment has no flow, set to 0
        flow[s[0]] = flow.get(s[0], 0)

        # For each node get the flow of its child
        for i in range(1, len(s)):
            if s[i] not in flow:
                flow[s[i]] = flow[s[i - 1]]

    x.nodes["flow_centrality"] = x.nodes.node_id.map(flow).fillna(0).astype(int)

    # We need to add a restriction: a branchpoint cannot have a lower
    # flow than its highest child -> this happens at the main branch point to
    # the cell body fiber because the flow doesn't go "through" it in
    # child -> parent direction but rather "across" it from one child to the
    # other
    is_bp = x.nodes["type"] == "branch"
    bp = x.nodes.loc[is_bp, "node_id"].values
    bp_childs = x.nodes[x.nodes.parent_id.isin(bp)]
    max_flow = bp_childs.groupby("parent_id").flow_centrality.max()
    x.nodes.loc[is_bp, "flow_centrality"] = max_flow.loc[bp].values
    x.nodes["flow_centrality"] = x.nodes.flow_centrality.astype(int)

    return x

Calculate form factor for given neuron.

The form factor F(q) is a Fourier transform of density-density correlation of particles used to classify objects in polymer physics. Based on Choi et al., 2022 (bioRxiv). Code adapted from github.com/kirichoi/FqClustering.

PARAMETER DESCRIPTION
x
    Neurons to calculate form factor for. A few notes:
      - data should be in micron - if not, you might want to adjust
        start/stop/min!
      - since this is all about density, it may make sense to
        resample neurons

TYPE: TreeNeuron | Meshneuron | Dotprops | NeuronList

start
    Start/stop/num describe the (log) space over which to calculate
    the form factor. Effectively determining the resolution.
    Assuming `x` is in microns the defaults mean we pay attention
    to densities between 1 nm (1e-3 microns) and 1 mm (1e+3 microns).
    The x-value corresponding to the form factor(s) in `Fq` will
    be `np.logspace(start, stop, num)`.

TYPE: int DEFAULT: -3

parallel
    Whether to use multiple cores when `x` is a NeuronList.

TYPE: bool DEFAULT: False

n_cores
    Number of cores to use when `x` is a NeuronList and
    `parallel=True`. Even on a single core this function makes
    heavy use of numpy which itself uses multiple threads - it is
    therefore not advisable to use all your cores as this would
    create a bottleneck.

TYPE: bool DEFAULT: os.cpu_count() // 2

progress
    Whether to show a progress bar.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
Fq

For single neurons: (num,) array For Neuronlists: (len(x), num) array

TYPE: np.ndarray

References

Polymer physics-based classification of neurons Kiri Choi, Won Kyu Kim, Changbong Hyeon bioRxiv 2022.04.07.487455; doi: https://doi.org/10.1101/2022.04.07.487455

Examples:

>>> import navis
>>> nl = navis.example_neurons(3)
>>> nl = nl.convert_units('microns')
>>> # Resample to 1 node / micron
>>> rs = navis.resample_skeleton(nl, '1 micron')
>>> # Calculate form factor
>>> Fq = navis.form_factor(rs, start=-3, stop=3, num=301,
...                        parallel=True, n_cores=3)
>>> # Plot
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> x = np.logspace(-3, 3,  301)
>>> fig, ax = plt.subplots()
>>> for i in range(len(Fq)):
...     _ = ax.plot(x, Fq[i])
>>> # Make log-log
>>> ax.set_xscale('log')
>>> ax.set_yscale('log')
>>> plt.show()
>>> # Cluster
>>> from scipy.spatial.distance import pdist
>>> from scipy.cluster.hierarchy import dendrogram, linkage
>>> dists = pdist(Fq)
>>> Z = linkage(dists, method='ward')
>>> dn = dendrogram(Z)
Source code in navis/morpho/fq.py
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
def form_factor(x: Union['core.TreeNeuron', 'core.MeshNeuron'],
                start: int = -3,
                stop: int = 3,
                num: int = 601,
                parallel: bool = False,
                n_cores: int = os.cpu_count() // 2,
                progress=True):
    """Calculate form factor for given neuron.

    The form factor F(q) is a Fourier transform of density-density correlation
    of particles used to classify objects in polymer physics. Based on Choi et
    al., 2022 (bioRxiv). Code adapted from github.com/kirichoi/FqClustering.

    Parameters
    ----------
    x :         TreeNeuron | Meshneuron | Dotprops | NeuronList
                Neurons to calculate form factor for. A few notes:
                  - data should be in micron - if not, you might want to adjust
                    start/stop/min!
                  - since this is all about density, it may make sense to
                    resample neurons
    start/stop/num : int
                Start/stop/num describe the (log) space over which to calculate
                the form factor. Effectively determining the resolution.
                Assuming `x` is in microns the defaults mean we pay attention
                to densities between 1 nm (1e-3 microns) and 1 mm (1e+3 microns).
                The x-value corresponding to the form factor(s) in `Fq` will
                be `np.logspace(start, stop, num)`.
    parallel :  bool
                Whether to use multiple cores when `x` is a NeuronList.
    n_cores :   bool
                Number of cores to use when `x` is a NeuronList and
                `parallel=True`. Even on a single core this function makes
                heavy use of numpy which itself uses multiple threads - it is
                therefore not advisable to use all your cores as this would
                create a bottleneck.
    progress :  bool
                Whether to show a progress bar.

    Returns
    -------
    Fq :        np.ndarray
                For single neurons: `(num,)` array
                For Neuronlists: `(len(x), num)` array

    References
    ----------
    Polymer physics-based classification of neurons
    Kiri Choi, Won Kyu Kim, Changbong Hyeon
    bioRxiv 2022.04.07.487455; doi: https://doi.org/10.1101/2022.04.07.487455

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(3)
    >>> nl = nl.convert_units('microns')
    >>> # Resample to 1 node / micron
    >>> rs = navis.resample_skeleton(nl, '1 micron')
    >>> # Calculate form factor
    >>> Fq = navis.form_factor(rs, start=-3, stop=3, num=301,
    ...                        parallel=True, n_cores=3)
    >>> # Plot
    >>> import matplotlib.pyplot as plt
    >>> import numpy as np
    >>> x = np.logspace(-3, 3,  301)
    >>> fig, ax = plt.subplots()
    >>> for i in range(len(Fq)):
    ...     _ = ax.plot(x, Fq[i])
    >>> # Make log-log
    >>> ax.set_xscale('log')
    >>> ax.set_yscale('log')
    >>> plt.show()                                              # doctest: +SKIP
    >>> # Cluster
    >>> from scipy.spatial.distance import pdist
    >>> from scipy.cluster.hierarchy import dendrogram, linkage
    >>> dists = pdist(Fq)
    >>> Z = linkage(dists, method='ward')
    >>> dn = dendrogram(Z)                                      # doctest: +SKIP

    """
    if isinstance(x, core.NeuronList):
        pbar = partial(
            config.tqdm,
            desc='Calc. form factor',
            total=len(x),
            disable=config.pbar_hide or not progress,
            leave=config.pbar_leave
        )
        _calc_form_factor = partial(form_factor, progress=False,
                                    start=start, stop=stop, num=num)

        if parallel:
            with mp.Pool(processes=n_cores) as pool:
                results = pool.imap(_calc_form_factor, x)
                Fq = list(pbar(results))
        else:
            Fq = [_calc_form_factor(n) for n in pbar(x)]

        return np.vstack(Fq)

    utils.eval_param(x, name='x', allowed_types=(core.TreeNeuron,
                                                 core.Dotprops,
                                                 core.MeshNeuron))

    if isinstance(x, core.TreeNeuron):
        coor = x.nodes[['x', 'y', 'z']].values
    elif isinstance(x, core.MeshNeuron):
        coor = x.vertices
    elif isinstance(x, core.Dotprops):
        coor = x.points

    ucoor = np.unique(coor, axis=0)
    lenucoor = len(ucoor)

    q_range = np.logspace(start, stop, num)
    Fq = np.empty(len(q_range))
    ccdisttri = scipy.spatial.distance.pdist(ucoor)

    for q in config.trange(len(q_range),
                           desc='Calc. form factor',
                           disable=config.pbar_hide or not progress,
                           leave=config.pbar_leave):
        qrvec = q_range[q] * ccdisttri
        Fq[q] = np.divide(np.divide(2 * np.sum(np.sin(qrvec) / qrvec), lenucoor), lenucoor) + 1 / lenucoor

    return Fq

Generate geodesic ("along-the-arbor") distance matrix between nodes/vertices.

PARAMETER DESCRIPTION
x
    If list, must contain a SINGLE neuron.

TYPE: TreeNeuron | MeshNeuron | NeuronList

from_
    Node IDs (for TreeNeurons) or vertex indices (for MeshNeurons).
    If provided, will compute distances only FROM this subset to
    all other nodes/vertices.

TYPE: list | numpy.ndarray DEFAULT: None

directed
    For TreeNeurons only: if True, pairs without a child->parent
    path will be returned with `distance = "inf"`.

TYPE: bool DEFAULT: False

weight
    If "weight" distances are given as physical length.
    If `None` distance is the number of nodes.

TYPE: 'weight' | None DEFAULT: 'weight'

limit
    Use to limit distance calculations. Nodes that are not within
    `limit` will have distance `np.inf`. If neuron has its
    `.units` set, you can also pass a string such as "10 microns".

TYPE: int | float DEFAULT: np.inf

RETURNS DESCRIPTION
pd.DataFrame

Geodesic distance matrix. If the neuron is fragmented or directed=True, unreachable node pairs will have distance np.inf.

See Also

navis.distal_to Check if a node A is distal to node B. navis.dist_between Get point-to-point geodesic distances. navis.dist_to_root Distances from all skeleton node to their root(s). navis.graph.skeleton_adjacency_matrix Generate adjacency matrix for a skeleton.

Examples:

Find average geodesic distance between all leaf nodes

>>> import navis
>>> n = navis.example_neurons(1)
>>> # Generate distance matrix
>>> m = navis.geodesic_matrix(n)
>>> # Subset matrix to leaf nodes
>>> leafs = n.nodes[n.nodes.type=='end'].node_id.values
>>> l_dist = m.loc[leafs, leafs]
>>> # Get mean
>>> round(l_dist.mean().mean())
12983
Source code in navis/graph/graph_utils.py
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
def geodesic_matrix(
    x: "core.NeuronObject",
    from_: Optional[Iterable[int]] = None,
    directed: bool = False,
    weight: Optional[str] = "weight",
    limit: Union[float, int] = np.inf,
) -> pd.DataFrame:
    """Generate geodesic ("along-the-arbor") distance matrix between nodes/vertices.

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | NeuronList
                If list, must contain a SINGLE neuron.
    from_ :     list | numpy.ndarray, optional
                Node IDs (for TreeNeurons) or vertex indices (for MeshNeurons).
                If provided, will compute distances only FROM this subset to
                all other nodes/vertices.
    directed :  bool, optional
                For TreeNeurons only: if True, pairs without a child->parent
                path will be returned with `distance = "inf"`.
    weight :    'weight' | None, optional
                If "weight" distances are given as physical length.
                If `None` distance is the number of nodes.
    limit :     int | float, optional
                Use to limit distance calculations. Nodes that are not within
                `limit` will have distance `np.inf`. If neuron has its
                `.units` set, you can also pass a string such as "10 microns".

    Returns
    -------
    pd.DataFrame
                Geodesic distance matrix. If the neuron is fragmented or
                `directed=True`, unreachable node pairs will have distance `np.inf`.

    See Also
    --------
    [`navis.distal_to`][]
        Check if a node A is distal to node B.
    [`navis.dist_between`][]
        Get point-to-point geodesic distances.
    [`navis.dist_to_root`][]
        Distances from all skeleton node to their root(s).
    [`navis.graph.skeleton_adjacency_matrix`][]
        Generate adjacency matrix for a skeleton.

    Examples
    --------
    Find average geodesic distance between all leaf nodes

    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> # Generate distance matrix
    >>> m = navis.geodesic_matrix(n)
    >>> # Subset matrix to leaf nodes
    >>> leafs = n.nodes[n.nodes.type=='end'].node_id.values
    >>> l_dist = m.loc[leafs, leafs]
    >>> # Get mean
    >>> round(l_dist.mean().mean())
    12983

    """
    if isinstance(x, core.NeuronList):
        if len(x) != 1:
            raise ValueError("Input must be a single neuron.")
        x = x[0]

    if not isinstance(x, (core.TreeNeuron, core.MeshNeuron)):
        raise ValueError(f'Unable to process data of type "{type(x)}"')

    limit = x.map_units(limit, on_error="raise")

    # Use fastcore if available
    if utils.fastcore and isinstance(x, core.TreeNeuron):
        # Calculate node distances
        if weight == "weight":
            weight = utils.fastcore.dag.parent_dist(
                x.nodes.node_id.values,
                x.nodes.parent_id.values,
                x.nodes[["x", "y", "z"]].values,
                root_dist=0,
            )

        # Check for missing sources
        if not isinstance(from_, type(None)):
            from_ = np.unique(utils.make_iterable(from_))

            miss = from_[~np.isin(from_, x.nodes.node_id.values)]
            if len(miss):
                raise ValueError(
                    f"Node/vertex IDs not present: {', '.join(miss.astype(str))}"
                )
            ix = from_
        else:
            ix = x.nodes.node_id.values

        dmat = utils.fastcore.geodesic_matrix(
            x.nodes.node_id.values,
            x.nodes.parent_id.values,
            weights=weight,
            directed=directed,
            sources=from_,
        )

        # Fastcore returns -1 for unreachable node pairs
        dmat[dmat < 0] = np.inf

        if limit is not None and limit is not np.inf:
            dmat[dmat > limit] = np.inf

        return pd.DataFrame(dmat, index=ix, columns=x.nodes.node_id.values)

    # Makes no sense to use directed for MeshNeurons
    if isinstance(x, core.MeshNeuron):
        directed = False

    if x.igraph and config.use_igraph:
        if isinstance(x, core.TreeNeuron):
            nodeList = np.array(x.igraph.vs.get_attribute_values("node_id"))
        else:
            nodeList = np.arange(len(x.igraph.vs))

        # Matrix is ordered by vertex number
        m = _igraph_to_sparse(x.igraph, weight_attr=weight)
    else:
        nodeList = np.array(x.graph.nodes())

        if hasattr(nx, "to_scipy_sparse_matrix"):
            m = nx.to_scipy_sparse_matrix(x.graph, nodeList, weight=weight)
        else:
            m = nx.to_scipy_sparse_array(x.graph, nodeList, weight=weight)

    if not isinstance(from_, type(None)):
        from_ = np.unique(utils.make_iterable(from_))

        miss = from_[~np.isin(from_, nodeList)].astype(str)
        if len(miss):
            raise ValueError(f'Node/vertex IDs not present: {", ".join(miss)}')

        indices = np.where(np.isin(nodeList, from_))[0]
        ix = nodeList[indices]
    else:
        indices = None
        ix = nodeList

    # For some reason csgraph.dijkstra expects indices/indptr as int32
    # igraph seems to do that by default but networkx uses int64 for indices
    m.indptr = m.indptr.astype("int32", copy=False)
    m.indices = m.indices.astype("int32", copy=False)
    dmat = csgraph.dijkstra(m, directed=directed, indices=indices, limit=limit)

    return pd.DataFrame(dmat, columns=nodeList, index=ix)  # type: ignore  # no stubs

Grab active 3D viewer.

RETURNS DESCRIPTION
[`navis.Viewer`][]

Examples:

>>> import navis
>>> from vispy import scene
>>> # Get and plot neuron in 3d
>>> n = navis.example_neurons(1)
>>> _ = n.plot3d(color='red', backend='vispy')
>>> # Grab active viewer and add custom text
>>> viewer = navis.get_viewer()
>>> text = scene.visuals.Text(text='TEST',
...                           pos=(0, 0, 0))
>>> viewer.add(text)
>>> # Close viewer
>>> viewer.close()
Source code in navis/plotting/vispy/vputils.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def get_viewer():
    """Grab active 3D viewer.

    Returns
    -------
    [`navis.Viewer`][]

    Examples
    --------
    >>> import navis
    >>> from vispy import scene
    >>> # Get and plot neuron in 3d
    >>> n = navis.example_neurons(1)
    >>> _ = n.plot3d(color='red', backend='vispy')
    >>> # Grab active viewer and add custom text
    >>> viewer = navis.get_viewer()
    >>> text = scene.visuals.Text(text='TEST',
    ...                           pos=(0, 0, 0))
    >>> viewer.add(text)
    >>> # Close viewer
    >>> viewer.close()

    """
    return getattr(config, 'primary_viewer', None)

Guess radii for skeleton nodes.

Uses distance between connectors and nodes to guess radii. Interpolate for nodes without connectors. Fills in radius column in node table.

PARAMETER DESCRIPTION
x
        Neuron(s) to be processed.

TYPE: TreeNeuron | NeuronList

method
        Method to be used to interpolate unknown radii. See
        `pandas.DataFrame.interpolate` for details.

TYPE: str DEFAULT: 'linear'

limit
        Maximum number of consecutive missing radii to fill.
        Must be greater than 0.

TYPE: int DEFAULT: None

smooth
        If True, will smooth radii after interpolation using a
        rolling window. If `int`, will use to define size of
        window.

TYPE: bool | int DEFAULT: True

inplace
        If False, will use and return copy of original neuron(s).

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

Examples:

>>> import navis
>>> nl = navis.example_neurons(2)
>>> nl_radius = navis.guess_radius(nl)
Source code in navis/morpho/manipulation.py
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
@utils.map_neuronlist(desc="Guessing", allow_parallel=True)
def guess_radius(
    x: NeuronObject,
    method: str = "linear",
    limit: Optional[int] = None,
    smooth: bool = True,
    inplace: bool = False,
) -> Optional[NeuronObject]:
    """Guess radii for skeleton nodes.

    Uses distance between connectors and nodes to guess radii. Interpolate for
    nodes without connectors. Fills in `radius` column in node table.

    Parameters
    ----------
    x :             TreeNeuron | NeuronList
                    Neuron(s) to be processed.
    method :        str, optional
                    Method to be used to interpolate unknown radii. See
                    `pandas.DataFrame.interpolate` for details.
    limit :         int, optional
                    Maximum number of consecutive missing radii to fill.
                    Must be greater than 0.
    smooth :        bool | int, optional
                    If True, will smooth radii after interpolation using a
                    rolling window. If `int`, will use to define size of
                    window.
    inplace :       bool, optional
                    If False, will use and return copy of original neuron(s).

    Returns
    -------
    TreeNeuron/List

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(2)
    >>> nl_radius = navis.guess_radius(nl)

    """
    # The decorator makes sure that at this point we have single neurons
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Can only process TreeNeurons, not {type(x)}")

    if not hasattr(x, "connectors") or x.connectors.empty:
        raise ValueError("Neuron must have connectors!")

    if not inplace:
        x = x.copy()

    # Set default rolling window size
    if isinstance(smooth, bool) and smooth:
        smooth = 5

    # We will be using the index as distance to interpolate. For this we have
    # to change method 'linear' to 'index'
    method = "index" if method == "linear" else method

    # Collect connectors and calc distances
    cn = x.connectors.copy()

    # Prepare nodes (add parent_dist for later, set index)
    x.nodes["parent_dist"] = mmetrics.parent_dist(x, root_dist=0)
    nodes = x.nodes.set_index("node_id", inplace=False)

    # For each connector (pre and post), get the X/Y distance to its node
    cn_locs = cn[["x", "y"]].values
    tn_locs = nodes.loc[cn.node_id.values, ["x", "y"]].values
    dist = np.sqrt(np.sum((tn_locs - cn_locs) ** 2, axis=1).astype(int))
    cn["dist"] = dist

    # Get max distance per node (in case of multiple connectors per
    # node)
    cn_grouped = cn.groupby("node_id").dist.max()

    # Set undefined radii to None so that they are ignored for interpolation
    nodes.loc[nodes.radius <= 0, "radius"] = None

    # Assign radii to nodes
    nodes.loc[cn_grouped.index, "radius"] = cn_grouped.values.astype(
        nodes.radius.dtype, copy=False
    )

    # Go over each segment and interpolate radii
    for s in config.tqdm(
        x.segments, desc="Interp.", disable=config.pbar_hide, leave=config.pbar_leave
    ):
        # Get this segments radii and parent dist
        this_radii = nodes.loc[s, ["radius", "parent_dist"]]
        this_radii["parent_dist_cum"] = this_radii.parent_dist.cumsum()

        # Set cumulative distance as index and drop parent_dist
        this_radii = this_radii.set_index("parent_dist_cum", drop=True).drop(
            "parent_dist", axis=1
        )

        # Interpolate missing radii
        interp = this_radii.interpolate(
            method=method, limit_direction="both", limit=limit
        )

        if smooth:
            interp = interp.rolling(smooth, min_periods=1).max()

        nodes.loc[s, "radius"] = interp.values

    # Set non-interpolated radii back to -1
    nodes.loc[nodes.radius.isnull(), "radius"] = -1

    # Reassign nodes
    x.nodes = nodes.reset_index(drop=False, inplace=False)

    return x

Heal fragmented skeleton(s).

Tries to heal a fragmented skeleton (i.e. a neuron with multiple roots) using a minimum spanning tree.

PARAMETER DESCRIPTION
x
    Fragmented skeleton(s).

TYPE: TreeNeuron/List

method
    Method used to heal fragments:
     - 'LEAFS': Only leaf (including root) nodes will be used to
       heal gaps. This can be much faster depending on the size of
       the neuron
     - 'ALL': All nodes can be used to reconnect fragments.

TYPE: 'LEAFS' | 'ALL' DEFAULT: 'ALL'

max_dist
    This effectively sets the max length for newly added edges. Use
    it to prevent far away fragments to be forcefully connected.
    If the neurons have `.units` set, you can also pass a string
    such as e.g. "2 microns".

TYPE: float | str DEFAULT: None

min_size
    Minimum size in nodes for fragments to be reattached. Fragments
    smaller than `min_size` will be ignored during stitching and
    hence remain disconnected.

TYPE: int DEFAULT: None

drop_disc
    If True and the neuron remains fragmented after healing (i.e.
    `max_dist` or `min_size` prevented a full connect), we will
    keep only the largest (by number of nodes) connected component
    and discard all other fragments.

TYPE: bool DEFAULT: False

mask
    Either a boolean mask or a list of node IDs. If provided will
    only heal breaks between these nodes.

TYPE: list-like DEFAULT: None

inplace
    If False, will perform healing on and return a copy.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
None

If inplace=True.

CatmaidNeuron / List

If inplace=False.

See Also

navis.stitch_skeletons Use to stitch multiple skeletons together. navis.break_fragments Use to produce individual neurons from disconnected fragments.

Examples:

>>> import navis
>>> n = navis.example_neurons(1, kind='skeleton')
>>> # Disconnect parts of the neuron
>>> n.nodes.loc[100, 'parent_id'] = -1
>>> len(n.root)
2
>>> # Heal neuron
>>> healed = navis.heal_skeleton(n)
>>> len(healed.root)
1
Source code in navis/morpho/manipulation.py
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
@utils.map_neuronlist(desc="Healing", allow_parallel=True)
def heal_skeleton(
    x: "core.NeuronList",
    method: Union[Literal["LEAFS"], Literal["ALL"]] = "ALL",
    max_dist: Optional[float] = None,
    min_size: Optional[float] = None,
    drop_disc: float = False,
    mask: Optional[Sequence] = None,
    inplace: bool = False,
) -> Optional[NeuronObject]:
    """Heal fragmented skeleton(s).

    Tries to heal a fragmented skeleton (i.e. a neuron with multiple roots)
    using a minimum spanning tree.

    Parameters
    ----------
    x :         TreeNeuron/List
                Fragmented skeleton(s).
    method :    'LEAFS' | 'ALL', optional
                Method used to heal fragments:
                 - 'LEAFS': Only leaf (including root) nodes will be used to
                   heal gaps. This can be much faster depending on the size of
                   the neuron
                 - 'ALL': All nodes can be used to reconnect fragments.
    max_dist :  float | str, optional
                This effectively sets the max length for newly added edges. Use
                it to prevent far away fragments to be forcefully connected.
                If the neurons have `.units` set, you can also pass a string
                such as e.g. "2 microns".
    min_size :  int, optional
                Minimum size in nodes for fragments to be reattached. Fragments
                smaller than `min_size` will be ignored during stitching and
                hence remain disconnected.
    drop_disc : bool
                If True and the neuron remains fragmented after healing (i.e.
                `max_dist` or `min_size` prevented a full connect), we will
                keep only the largest (by number of nodes) connected component
                and discard all other fragments.
    mask :      list-like, optional
                Either a boolean mask or a list of node IDs. If provided will
                only heal breaks between these nodes.
    inplace :   bool, optional
                If False, will perform healing on and return a copy.

    Returns
    -------
    None
                If `inplace=True`.
    CatmaidNeuron/List
                If `inplace=False`.


    See Also
    --------
    [`navis.stitch_skeletons`][]
                Use to stitch multiple skeletons together.
    [`navis.break_fragments`][]
                Use to produce individual neurons from disconnected fragments.


    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1, kind='skeleton')
    >>> # Disconnect parts of the neuron
    >>> n.nodes.loc[100, 'parent_id'] = -1
    >>> len(n.root)
    2
    >>> # Heal neuron
    >>> healed = navis.heal_skeleton(n)
    >>> len(healed.root)
    1

    """
    method = str(method).upper()

    if method not in ("LEAFS", "ALL"):
        raise ValueError(f'Unknown method "{method}"')

    # The decorator makes sure that at this point we have single neurons
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Expected TreeNeuron(s), got "{type(x)}"')

    if not isinstance(max_dist, type(None)):
        max_dist = x.map_units(max_dist, on_error="raise")

    if not inplace:
        x = x.copy()

    _ = _stitch_mst(
        x, nodes=method, max_dist=max_dist, min_size=min_size, mask=mask, inplace=True
    )

    # See if we need to drop remaining disconnected fragments
    if drop_disc:
        # Compute this property only once
        trees = x.subtrees
        if len(trees) > 1:
            # Tree is sorted such that the largest component is the first
            _ = subset.subset_neuron(x, subset=trees[0], inplace=True)

    return x

Run a health check on TreeNeurons and flag potential issues.

PARAMETER DESCRIPTION
x
    Neuron(s) whose nodes to classify nodes.

TYPE: TreeNeuron | NeuronList

verbose
    If True, will print errors in addition to returning them.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
list of issues or None

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> navis.health_check(n)
Neuron 1734350788 seems perfectly fine.
Source code in navis/graph/clinic.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def health_check(x: 'core.NeuronObject', verbose: bool = True) -> None:
    """Run a health check on TreeNeurons and flag potential issues.

    Parameters
    ----------
    x :         TreeNeuron | NeuronList
                Neuron(s) whose nodes to classify nodes.
    verbose :   bool
                If True, will print errors in addition to returning them.

    Returns
    -------
    list of issues or None

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> navis.health_check(n)
    Neuron 1734350788 seems perfectly fine.

    """
    if isinstance(x, core.NeuronList):
        for n in x:
            _ = health_check(x)
        return
    elif not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Excepted TreeNeuron/List, got "{type(x)}"')

    issues = []

    # Check if neuron is not a tree
    if not x.is_tree:
        issues.append('is not a tree (networkx.is_forest)')
    # See if there are any cycles
    if x.cycles:
        issues.append(f'has cycles (networkx.find_cycles): {str(x.cycles)}')
    # See if any node has more than one parent
    od = [n[0] for n in x.graph.out_degree if n[1] > 1]
    if od:
        issues.append(f'has nodes with multiple parents (graph.out_degree): {", ".join(od)}')

    locs, counts = np.unique(x.nodes[['x', 'y', 'z']].values,
                             axis=0,
                             return_counts=True)
    dupl = counts > 1
    if any(dupl):
        issues.append(f'has {sum(dupl)} node positions that are occupied by multiple nodes')

    if verbose:
        if issues:
            print(f'Neuron {str(x.id)} has issues:')
            for i in issues:
                print(f' - {i}')
        else:
            print(f'Neuron {str(x.id)} seems perfectly fine.')

    return issues if issues else None
in_volume
in_volume
in_volume
in_volume
in_volume

Test if points/neurons are within a given volume.

Notes

This function requires ncollpyde (recommended and installed with navis) or pyoctree as backends for raycasting. If neither is installed, we can fall back to using scipy's ConvexHull instead. This is, however, slower and will give wrong positives for concave meshes!

PARAMETER DESCRIPTION
x
            Object(s) to intersect with the volume.
             - Neuron(s) will be subset to parts within the volume
             - Array-like is treated as list of x/y/z coordinates;
               has to be of shape `(N, 3)`
             - `pandas.DataFrame` needs to have `x, y, z`
               columns

TYPE: (N, 3) array-like | pandas.DataFrame | Neuron/List

volume
            Multiple volumes can be given as list
            (`[volume1, volume2, ...]`) or dict
            (`{'label1': volume1, ...}`).

TYPE: Volume | mesh-like | dict or list thereof

mode
            If 'IN', parts of the neuron that are within the volume
            are kept.

TYPE: 'IN' | 'OUT' DEFAULT: 'IN'

backend
            Which backend so be used (see Notes). If multiple
            backends are given, will use the first backend that is
            available.

TYPE: 'ncollpyde' | 'pyoctree' | 'scipy' | iterable thereof DEFAULT: ('ncollpyde', 'pyoctree')

n_rays
            Number of rays used to determine if a point is inside
            a volume. More rays give more reliable results but are
            slower (especially with pyoctree backend). If `None`
            will use default number of rays (3 for ncollpyde, 1 for
            pyoctree).

TYPE: int | None DEFAULT: None

prevent_fragments
            Only relevant if input is TreeNeuron(s). If True, will
            attempt to keep neuron from fragmenting.

TYPE: bool DEFAULT: False

validate
            If True, validate `volume` and try to fix issues using
            trimesh. Will raise ValueError if issue could not be
            fixed.

TYPE: bool DEFAULT: False

inplace
            Only relevant if input is Neuron/List. Ignored
            if multiple volumes are provided.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
Neuron

If input is a single neuron or NeuronList, will return subset of the neuron(s) (nodes and connectors) that are within given volume.

list of bools

If input is (N, 3) array of coordinates, returns a (N, ) boolean array: True if in volume, False if not in order.

dict

If multiple volumes are provided, results will be returned in dictionary with volumes as keys::

{'volume1': in_volume(x, volume1), 'volume2': in_volume(x, volume2), ... }

Examples:

Prune neuron to volume

>>> import navis
>>> n = navis.example_neurons(1)
>>> lh = navis.example_volume('LH')
>>> n_lh = navis.in_volume(n, lh, inplace=False)
>>> n_lh
type            navis.TreeNeuron
name                  1734350788
id                    1734350788
n_nodes                      344
n_connectors                None
n_branches                    49
n_leafs                       50
cable_length             32313.5
soma                        None
units                8 nanometer
dtype: object

Find out which points are inside a volume

>>> in_v = navis.in_volume(n.nodes[['x', 'y', 'z']].values, lh)
>>> in_v
array([False, False, False, ..., False, False, False])
>>> in_v.sum()
344
Source code in navis/intersection/intersect.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
def in_volume(x: Union['core.NeuronObject', Sequence, pd.DataFrame],
              volume: Union[core.Volume,
                            Dict[str, core.Volume],
                            Sequence[core.Volume]],
              mode: Modes = 'IN',
              backend: Backends = ('ncollpyde', 'pyoctree'),
              n_rays: Optional[int] = None,
              prevent_fragments: bool = False,
              validate: bool = False,
              inplace: bool = False,) -> Optional[Union['core.NeuronObject',
                                                        Sequence[bool],
                                                        Dict[str, Union[Sequence[bool],
                                                                        'core.NeuronObject']]
                                                        ]]:
    """Test if points/neurons are within a given volume.

    Notes
    -----
    This function requires [ncollpyde](https://github.com/clbarnes/ncollpyde)
    (recommended and installed with `navis`) or
    [pyoctree](https://github.com/mhogg/pyoctree) as backends for raycasting.
    If neither is installed, we can fall back to using scipy's ConvexHull
    instead. This is, however, slower and will give wrong positives for concave
    meshes!

    Parameters
    ----------
    x :                 (N, 3) array-like | pandas.DataFrame | Neuron/List
                        Object(s) to intersect with the volume.
                         - Neuron(s) will be subset to parts within the volume
                         - Array-like is treated as list of x/y/z coordinates;
                           has to be of shape `(N, 3)`
                         - `pandas.DataFrame` needs to have `x, y, z`
                           columns

    volume :            Volume | mesh-like | dict or list thereof
                        Multiple volumes can be given as list
                        (`[volume1, volume2, ...]`) or dict
                        (`{'label1': volume1, ...}`).
    mode :              'IN' | 'OUT', optional
                        If 'IN', parts of the neuron that are within the volume
                        are kept.
    backend :           'ncollpyde' | 'pyoctree' | 'scipy' | iterable thereof
                        Which backend so be used (see Notes). If multiple
                        backends are given, will use the first backend that is
                        available.
    n_rays :            int | None, optional
                        Number of rays used to determine if a point is inside
                        a volume. More rays give more reliable results but are
                        slower (especially with pyoctree backend). If `None`
                        will use default number of rays (3 for ncollpyde, 1 for
                        pyoctree).
    prevent_fragments : bool, optional
                        Only relevant if input is TreeNeuron(s). If True, will
                        attempt to keep neuron from fragmenting.
    validate :          bool, optional
                        If True, validate `volume` and try to fix issues using
                        trimesh. Will raise ValueError if issue could not be
                        fixed.
    inplace :           bool, optional
                        Only relevant if input is Neuron/List. Ignored
                        if multiple volumes are provided.

    Returns
    -------
    Neuron
                      If input is a single neuron or NeuronList, will return
                      subset of the neuron(s) (nodes and connectors) that are
                      within given volume.
    list of bools
                      If input is `(N, 3)` array of coordinates, returns a `(N, )`
                      boolean array: `True` if in volume, `False` if not in
                      order.
    dict
                      If multiple volumes are provided, results will be
                      returned in dictionary with volumes as keys::

                        {'volume1': in_volume(x, volume1),
                         'volume2': in_volume(x, volume2),
                         ... }

    Examples
    --------
    Prune neuron to volume

    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> lh = navis.example_volume('LH')
    >>> n_lh = navis.in_volume(n, lh, inplace=False)
    >>> n_lh                                                    # doctest: +SKIP
    type            navis.TreeNeuron
    name                  1734350788
    id                    1734350788
    n_nodes                      344
    n_connectors                None
    n_branches                    49
    n_leafs                       50
    cable_length             32313.5
    soma                        None
    units                8 nanometer
    dtype: object

    Find out which points are inside a volume

    >>> in_v = navis.in_volume(n.nodes[['x', 'y', 'z']].values, lh)
    >>> in_v
    array([False, False, False, ..., False, False, False])
    >>> in_v.sum()
    344

    """
    allowed_backends = ('ncollpyde', 'pyoctree', 'scipy')

    if not utils.is_iterable(backend):
        backend = [backend]

    if any(set(backend) - set(allowed_backends)):
        raise ValueError(f'Unknown backend in "{backend}". Allowed backends: '
                         f'{allowed_backends}')

    if mode not in ('IN', 'OUT'):
        raise ValueError(f'`mode` must be "IN" or "OUT", not "{mode}"')

    # If we are given multiple volumes
    if isinstance(volume, (list, dict, np.ndarray)):
        # Force into dict
        if not isinstance(volume, dict):
            # Make sure all Volumes can be uniquely indexed
            vnames = [getattr(v, 'name', i) for i, v in enumerate(volume)]
            dupli = [str(v) for v in set(vnames) if vnames.count(v) > 1]
            if dupli:
                raise ValueError('Duplicate Volume names detected: '
                                 f'{", ".join(dupli)}. Volume.name must be '
                                 'unique.')

            volume = {getattr(v, 'name', i): v for i, v in enumerate(volume)}

        # Make sure everything is a volume
        volume = {k: utils.make_volume(v) for k, v in volume.items()}

        # Validate now - this might safe us troubles later
        if validate:
            for v in volume.values():
                msg = 'Mesh is not a volume ' \
                      '(e.g. not watertight, incorrect ' \
                      'winding) and could not be fixed. ' \
                      'Use `validate=False` to skip validation and ' \
                      'perform intersection regardless.'
                try:
                    v.validate()
                except utils.VolumeError as e:
                    raise utils.VolumeError(f'{v}: {msg}') from e
                except BaseException:
                    raise

        data: Dict[str, Any] = dict()
        for v in config.tqdm(volume, desc='Volumes', disable=config.pbar_hide,
                             leave=config.pbar_leave):
            data[v] = in_volume(x,
                                volume=volume[v],
                                inplace=False,
                                n_rays=n_rays,
                                mode=mode,
                                validate=False,
                                backend=backend)
        return data

    # Coerce volume into navis.Volume
    volume = utils.make_volume(volume)

    if not isinstance(volume, core.Volume):
        raise TypeError(f'Expected navis.Volume, got "{type(volume)}"')

    # From here on out volume is a single core.Volume
    vol: 'core.Volume' = volume  # type: ignore

    if validate:
        msg = 'Mesh is not a volume ' \
              '(e.g. not watertight, incorrect ' \
              'winding) and could not be fixed. ' \
              'Use `validate=False` to skip validation and ' \
              'perform intersection regardless.'
        try:
            vol.validate()
        except utils.VolumeError as e:
            raise utils.VolumeError(f'{vol}: {msg}') from e
        except BaseException:
            raise

    # Make copy if necessary
    if isinstance(x, (core.NeuronList, core.BaseNeuron)):
        if inplace is False:
            x = x.copy()

    if isinstance(x, (core.BaseNeuron)):
        if isinstance(x, core.TreeNeuron):
            data = x.nodes[['x', 'y', 'z']].values
        elif isinstance(x, core.Dotprops):
            data = x.points
        elif isinstance(x, core.MeshNeuron):
            data = x.vertices
        elif isinstance(x, core.VoxelNeuron):
            data = x.voxels * x.units_xyz.magnitude + x.units_xyz.magnitude / 2
            data += x.offset

        in_v = in_volume(data,
                         vol,
                         mode='IN',
                         n_rays=n_rays,
                         validate=False,
                         backend=backend)

        # If mode is OUT, invert selection
        if mode == 'OUT':
            in_v = ~np.array(in_v)

        # Only subset if there are actually nodes to remove
        if not all(in_v):
            if isinstance(x, core.TreeNeuron):
                _ = morpho.subset_neuron(x,
                                         subset=x.nodes[in_v].node_id.values,
                                         inplace=True,
                                         prevent_fragments=prevent_fragments)
            elif isinstance(x, (core.MeshNeuron, core.Dotprops)):
                _ = morpho.subset_neuron(x,
                                         subset=in_v,
                                         inplace=True,
                                         prevent_fragments=prevent_fragments)
            elif isinstance(x, core.VoxelNeuron):
                values = x.values[in_v]
                x._data = x.voxels[in_v]
                x.values = values
                x._clear_temp_attr()

        return x
    elif isinstance(x, core.NeuronList):
        for n in config.tqdm(x, desc='Subsetting',
                             leave=config.pbar_leave,
                             disable=config.pbar_hide):
            in_volume(n, vol, inplace=True, mode=mode, backend=backend,
                      validate=False, n_rays=n_rays,
                      prevent_fragments=prevent_fragments)

        return x
    elif isinstance(x, pd.DataFrame):
        points = x[['x', 'y', 'z']].values
    elif isinstance(x, np.ndarray):
        points = x
    elif isinstance(x, (list, tuple)):
        points = np.array(x)

    if points.ndim != 2 or points.shape[1] != 3:  # type: ignore  # does not know about numpy
        raise ValueError('Points must be array of shape (N,3).')

    for b in backend:
        if b == 'ncollpyde' and ncollpyde:
            return in_volume_ncoll(points, vol,
                                   n_rays=n_rays)
        elif b == 'pyoctree' and pyoctree:
            return in_volume_pyoc(points, vol,
                                  n_rays=n_rays)
        elif b == 'scipy':
            return in_volume_convex(points, vol, approximate=False)

    raise ValueError(f'None of the specified backends were available: {backend}')

Insert new nodes between existing nodes.

PARAMETER DESCRIPTION
x
    Neuron to insert new nodes into.

TYPE: TreeNeuron

where
    Must be a list of node ID pairs. A new node will be added
    between the nodes of each pair (see examples).

TYPE: list of node pairs

coords
    Can be:
     - `None`: new nodes will be inserted exactly between the two
                 nodes
     - (N, 3) array of coordinates for the newly inserted nodes
     - (N, ) array of fractional distances [0-1]: e.g. 0.25 means
       that a new node will be inserted a quarter of the way between
       the two nodes (from the child's perspective)

TYPE: None | list of (x, y, z) coordinates | list of fractions DEFAULT: None

validate
    If True, will make sure that pairs in `where` are always
    in (parent, child) order. If you know this to already be the
    case, set `validate=False` to save some time.

TYPE: bool DEFAULT: True

inplace
    If True, will rewire the neuron inplace. If False, will return
    a rewired copy of the neuron.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron

Examples:

Insert new nodes between some random points

>>> import navis
>>> n = navis.example_neurons(1)
>>> n.n_nodes
4465
>>> where = n.nodes[['parent_id', 'node_id']].values[100:200]
>>> _ = navis.insert_nodes(n, where=where, inplace=True)
>>> n.n_nodes
4565
Source code in navis/graph/graph_utils.py
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
def insert_nodes(
    x: "core.TreeNeuron",
    where: List[tuple],
    coords: List[tuple] = None,
    validate: bool = True,
    inplace: bool = False,
) -> Optional["core.TreeNeuron"]:
    """Insert new nodes between existing nodes.

    Parameters
    ----------
    x :         TreeNeuron
                Neuron to insert new nodes into.
    where :     list of node pairs
                Must be a list of node ID pairs. A new node will be added
                between the nodes of each pair (see examples).
    coords :    None | list of (x, y, z) coordinates | list of fractions
                Can be:
                 - `None`: new nodes will be inserted exactly between the two
                             nodes
                 - (N, 3) array of coordinates for the newly inserted nodes
                 - (N, ) array of fractional distances [0-1]: e.g. 0.25 means
                   that a new node will be inserted a quarter of the way between
                   the two nodes (from the child's perspective)
    validate :  bool
                If True, will make sure that pairs in `where` are always
                in (parent, child) order. If you know this to already be the
                case, set `validate=False` to save some time.
    inplace :   bool
                If True, will rewire the neuron inplace. If False, will return
                a rewired copy of the neuron.

    Returns
    -------
    TreeNeuron

    Examples
    --------
    Insert new nodes between some random points

    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n.n_nodes
    4465
    >>> where = n.nodes[['parent_id', 'node_id']].values[100:200]
    >>> _ = navis.insert_nodes(n, where=where, inplace=True)
    >>> n.n_nodes
    4565

    """
    utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,))

    where = np.asarray(where)
    if where.ndim != 2 or where.shape[1] != 2:
        raise ValueError(
            "Expected `where` to be a (N, 2) list of pairs. " f"Got {where.shape}"
        )

    # Validate if that's desired
    if validate:
        # Setup to get parents
        parent = x.nodes.set_index("node_id").parent_id

        # Get parents of the left and the right nodes of each pair
        parent_left = parent.loc[where[:, 0]].values
        parent_right = parent.loc[where[:, 1]].values

        # Check if the right node is parent of the left or the other way around
        correct_order = where[:, 0] == parent_right
        swapped = where[:, 1] == parent_left
        not_connected = ~(correct_order | swapped)

        if np.any(not_connected):
            raise ValueError(
                "The following pairs are not connected: " f"{where[not_connected]}"
            )

        # Flip nodes where necessary to sure we have (parent, child) order
        if np.any(swapped):
            where[swapped, :] = where[swapped][:, [1, 0]]

    # If not provided, generate coordinates in the center between each node pair
    if isinstance(coords, type(None)):
        node_locs = x.nodes.set_index("node_id")[["x", "y", "z"]]
        left_loc = node_locs.loc[where[:, 0]].values
        right_loc = node_locs.loc[where[:, 1]].values

        # Find center between each node
        coords = left_loc + (right_loc - left_loc) / 2

    coords = np.asarray(coords)
    # Make sure we have correct coordinates
    if coords.shape[0] != where.shape[0]:
        raise ValueError(
            f"Expected {where.shape[0]} coordinates or distances, "
            f"got {coords.shape[0]}"
        )

    # If array of fractional distances translate to coordinates
    if coords.ndim == 1:
        node_locs = x.nodes.set_index("node_id")[["x", "y", "z"]]
        left_loc = node_locs.loc[where[:, 0]].values
        right_loc = node_locs.loc[where[:, 1]].values

        # Find center between each node
        coords = left_loc + (right_loc - left_loc) * coords.reshape(-1, 1)

    # For the moment, we will interpolate the radius
    rad = x.nodes.set_index("node_id").radius
    new_rad = (rad.loc[where[:, 0]].values + rad.loc[where[:, 1]].values) / 2

    # Generate table for new nodes
    new_nodes = pd.DataFrame()
    max_id = x.nodes.node_id.max() + 1
    new_nodes["node_id"] = np.arange(max_id, max_id + where.shape[0]).astype(int)
    new_nodes["parent_id"] = where[:, 0]
    new_nodes["x"] = coords[:, 0]
    new_nodes["y"] = coords[:, 1]
    new_nodes["z"] = coords[:, 2]
    new_nodes["radius"] = new_rad

    # Merge tables
    nodes = pd.concat(
        [x.nodes, new_nodes], join="outer", axis=0, sort=True, ignore_index=True
    )

    # Remap nodes
    new_parents = dict(zip(where[:, 1], new_nodes.node_id.values))
    to_rewire = nodes.node_id.isin(new_parents)
    nodes.loc[to_rewire, "parent_id"] = nodes.loc[to_rewire, "node_id"].map(new_parents).values.astype(
        nodes.dtypes["parent_id"], copy=False
    )

    if not inplace:
        x = x.copy()

    x._nodes = nodes

    return x

Extract basic info from Hdf5 file.

PARAMETER DESCRIPTION
filepath
                Path to HDF5 file.

TYPE: str

inspect_neurons
                If True, will return info about the neurons contained.

TYPE: bool DEFAULT: True

inspect_annotations
                If True, include info about annotations associated
                with each neuron.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
dict

Returns a dictionary with basic info about the file. An example::

{ 'format_spec': 'hnf_v1', # format specifier 'neurons': { 'someID': {'skeleton': True, 'mesh': False, 'dotprops': True, 'annotations': ['connectors']}, 'someID2': {'skeleton': False, 'mesh': False, 'dotprops': True, 'annotations': ['connectors']} } }

Source code in navis/io/hdf_io.py
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
def inspect_h5(filepath, inspect_neurons=True, inspect_annotations=True):
    """Extract basic info from Hdf5 file.

    Parameters
    ----------
    filepath :              str
                            Path to HDF5 file.
    inspect_neurons :       bool
                            If True, will return info about the neurons contained.
    inspect_annotations :   bool
                            If True, include info about annotations associated
                            with each neuron.

    Returns
    -------
    dict
                        Returns a dictionary with basic info about the file.
                        An example::

                         {
                          'format_spec': 'hnf_v1', # format specifier
                          'neurons': {
                                      'someID': {'skeleton': True,
                                                 'mesh': False,
                                                 'dotprops': True,
                                                 'annotations': ['connectors']},
                                      'someID2': {'skeleton': False,
                                                  'mesh': False,
                                                  'dotprops': True,
                                                  'annotations': ['connectors']}
                                     }
                            }


    """
    if not isinstance(filepath, str):
        raise TypeError(f'`filepath` must be str, got "{type(filepath)}"')

    if not os.path.isfile(filepath):
        raise ValueError(f'{filepath} does not exist')

    info = dict()
    with h5py.File(filepath, 'r') as f:
        info['format_spec'] = f.attrs.get('format_spec')
        # R strings are automatically stored as vectors
        info['format_spec'] = utils.make_non_iterable(info['format_spec'])

        if inspect_neurons:
            info['neurons'] = {}
            # Go over all top level groups
            for id, grp in f.items():
                # Skip if not a group
                if not isinstance(grp, h5py.Group):
                    continue

                # Do not change this test
                this = {}
                if 'skeleton' in grp:
                    this['skeleton'] = True
                if 'mesh' in grp:
                    this['mesh'] = True
                if 'dotprops' in grp:
                    this['dotprops'] = True

                if this:
                    info['neurons'][id] = this
                    if inspect_annotations:
                        annotations = grp.get('annotations', None)
                        if annotations:
                            info['neurons'][id]['annotations'] = list(annotations.keys())

    return info

Compute intersection matrix between a set of neurons and volumes.

PARAMETER DESCRIPTION
x
          Neuron(s) to intersect.

TYPE: NeuronList | single neuron

volume

TYPE: list or dict of navis.Volume

attr
          Attribute to return for intersected neurons (e.g.
          'cable_length' for TreeNeurons). If None, will return
          the neuron subset to the volumes.

TYPE: str | None DEFAULT: None

**kwargs
          Keyword arguments passed to [`navis.in_volume`][].

DEFAULT: {}

RETURNS DESCRIPTION
pandas DataFrame

Examples:

>>> import navis
>>> # Grab neurons
>>> nl = navis.example_neurons(3)
>>> # Grab a single volume
>>> lh = navis.example_volume("LH")
>>> # Re-use for testing
>>> vols = {'lh1': lh, 'lh2': lh}
>>> # Generate intersection matrix with cable length
>>> m = navis.intersection_matrix(nl, vols, attr='cable_length')
Source code in navis/intersection/intersect.py
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
def intersection_matrix(x: 'core.NeuronObject',
                        volumes: Union[List[core.Volume],
                                       Dict[str, core.Volume]],
                        attr: Optional[str] = None,
                        **kwargs
                        ) -> pd.DataFrame:
    """Compute intersection matrix between a set of neurons and volumes.

    Parameters
    ----------
    x :               NeuronList | single neuron
                      Neuron(s) to intersect.
    volume :          list or dict of navis.Volume
    attr :            str | None, optional
                      Attribute to return for intersected neurons (e.g.
                      'cable_length' for TreeNeurons). If None, will return
                      the neuron subset to the volumes.
    **kwargs
                      Keyword arguments passed to [`navis.in_volume`][].

    Returns
    -------
    pandas DataFrame

    Examples
    --------
    >>> import navis
    >>> # Grab neurons
    >>> nl = navis.example_neurons(3)
    >>> # Grab a single volume
    >>> lh = navis.example_volume("LH")
    >>> # Re-use for testing
    >>> vols = {'lh1': lh, 'lh2': lh}
    >>> # Generate intersection matrix with cable length
    >>> m = navis.intersection_matrix(nl, vols, attr='cable_length')

    """
    # Volumes should be a dict at some point
    volumes_dict: Dict[str, core.Volume]

    if isinstance(x, core.BaseNeuron):
        x = core.NeuronList(x)

    if not isinstance(x, core.NeuronList):
        raise TypeError(f'x must be Neuron/List, not "{type(x)}"')

    if not isinstance(volumes, (list, dict)):
        raise TypeError('Volumes must be given as list or dict, not '
                        f'"{type(volumes)}"')

    if isinstance(volumes, list):
        volumes_dict = {v.name: v for v in volumes}
    else:
        volumes_dict = volumes

    for v in volumes_dict.values():
        if not isinstance(v, core.Volume):
            raise TypeError(f'Wrong data type found in volumes: "{type(v)}"')

    data = in_volume(x, volumes_dict, inplace=False, **kwargs)

    if not attr:
        df = pd.DataFrame([[n for n in data[v]] for v in data],
                          index=list(data.keys()),
                          columns=x.id)
    else:
        df = pd.DataFrame([[getattr(n, attr) for n in data[v]] for v in data],
                          index=list(data.keys()),
                          columns=x.id)

    return df

Calculate IVSCC features for neuron(s).

Please see the IVSCC tutorial for details.

PARAMETER DESCRIPTION
x
                Neuron(s) to calculate IVSCC for.

TYPE: TreeNeuron | NeuronList

features
                Provide specific features to calculate.
                Must be subclasses of `BasicFeatures`.
                If `None`, will use default features.

TYPE: Sequence[Features] DEFAULT: None

missing_compartments
                What to do if a neuron is missing a compartment
                (e.g. no axon or basal dendrite):
                 - "ignore" (default): ignore that compartment
                 - "skip": skip the entire neuron
                 - "raise": raise an exception

TYPE: ignore | skip | 'raise' DEFAULT: 'ignore'

RETURNS DESCRIPTION
ivscc

IVSCC features for the neuron(s).

TYPE: pd.DataFrame

Source code in navis/morpho/ivscc.py
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
def ivscc_features(
    x: "core.TreeNeuron",
    features=None,
    missing_compartments="ignore",
    verbose=False,
    progress=True,
) -> Union[float, pd.DataFrame]:
    """Calculate IVSCC features for neuron(s).

    Please see the `IVSCC` tutorial for details.

    Parameters
    ----------
    x :                     TreeNeuron | NeuronList
                            Neuron(s) to calculate IVSCC for.
    features :              Sequence[Features], optional
                            Provide specific features to calculate.
                            Must be subclasses of `BasicFeatures`.
                            If `None`, will use default features.
    missing_compartments : "ignore" | "skip" | "raise"
                            What to do if a neuron is missing a compartment
                            (e.g. no axon or basal dendrite):
                             - "ignore" (default): ignore that compartment
                             - "skip": skip the entire neuron
                             - "raise": raise an exception

    Returns
    -------
    ivscc :                 pd.DataFrame
                            IVSCC features for the neuron(s).

    """

    if isinstance(x, core.TreeNeuron):
        x = core.NeuronList([x])

    if features is None:
        features = DEFAULT_FEATURES

    data = {}
    for n in config.tqdm(
        x, desc="Calculating IVSCC features", disable=not progress or config.pbar_hide
    ):
        data[n.id] = {}
        for feat in features:
            try:
                f = feat(n, verbose=verbose)
            except CompartmentNotFoundError as e:
                if missing_compartments == "ignore":
                    continue
                elif missing_compartments == "skip":
                    if verbose:
                        print(f"Skipping neuron {n.id}: {e}")
                    data.pop(n.id)
                    break
                else:
                    raise e

            data[n.id].update(f.extract_features())

    return pd.DataFrame(data)

Return a neuron consisting of only the longest neurite(s).

Based on geodesic distances.

PARAMETER DESCRIPTION
x
            Neuron(s) to prune.

TYPE: TreeNeuron | NeuronList

n
            Number of longest neurites to preserve. For example:
             - `n=1` keeps the longest neurites
             - `n=2` keeps the two longest neurites
             - `n=slice(1, None)` removes the longest neurite

TYPE: int | slice DEFAULT: 1

reroot_soma
            If True, neuron will be rerooted to soma.

TYPE: bool DEFAULT: False

from_root
            If True, will look for longest neurite from root.
            If False, will look for the longest neurite between any
            two tips.

TYPE: bool DEFAULT: True

inverse
            If True, will instead *remove* the longest neurite.

TYPE: bool DEFAULT: False

inplace
            If False, copy of the neuron will be trimmed down to
            longest neurite and returned.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

Pruned neuron.

See Also

navis.split_into_fragments Split neuron into fragments based on longest neurites.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> # Keep only the longest neurite
>>> ln1 = navis.longest_neurite(n, n=1, reroot_soma=True)
>>> # Keep the two longest neurites
>>> ln2 = navis.longest_neurite(n, n=2, reroot_soma=True)
>>> # Keep everything but the longest neurite
>>> ln3 = navis.longest_neurite(n, n=slice(1, None), reroot_soma=True)
Source code in navis/graph/graph_utils.py
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
@utils.map_neuronlist(desc="Pruning", allow_parallel=True)
@utils.meshneuron_skeleton(method="subset")
def longest_neurite(
    x: "core.NeuronObject",
    n: int = 1,
    reroot_soma: bool = False,
    from_root: bool = True,
    inverse: bool = False,
    inplace: bool = False,
) -> "core.NeuronObject":
    """Return a neuron consisting of only the longest neurite(s).

    Based on geodesic distances.

    Parameters
    ----------
    x :                 TreeNeuron | NeuronList
                        Neuron(s) to prune.
    n :                 int | slice
                        Number of longest neurites to preserve. For example:
                         - `n=1` keeps the longest neurites
                         - `n=2` keeps the two longest neurites
                         - `n=slice(1, None)` removes the longest neurite
    reroot_soma :       bool
                        If True, neuron will be rerooted to soma.
    from_root :         bool
                        If True, will look for longest neurite from root.
                        If False, will look for the longest neurite between any
                        two tips.
    inverse :           bool
                        If True, will instead *remove* the longest neurite.
    inplace :           bool
                        If False, copy of the neuron will be trimmed down to
                        longest neurite and returned.

    Returns
    -------
    TreeNeuron/List
                        Pruned neuron.

    See Also
    --------
    [`navis.split_into_fragments`][]
            Split neuron into fragments based on longest neurites.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> # Keep only the longest neurite
    >>> ln1 = navis.longest_neurite(n, n=1, reroot_soma=True)
    >>> # Keep the two longest neurites
    >>> ln2 = navis.longest_neurite(n, n=2, reroot_soma=True)
    >>> # Keep everything but the longest neurite
    >>> ln3 = navis.longest_neurite(n, n=slice(1, None), reroot_soma=True)

    """
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Expected TreeNeuron(s), got "{type(x)}"')

    if isinstance(n, numbers.Number) and n < 1:
        raise ValueError("Number of longest neurites to preserve must be >=1")

    # At this point x is TreeNeuron
    x: core.TreeNeuron

    if not inplace:
        x = x.copy()

    if not from_root:
        # Find the two most distal points (N.B. roots can also be "ends")
        leafs = x.nodes.loc[x.nodes.type.isin(("root", "end")), 'node_id'].values
        dists = geodesic_matrix(x, from_=leafs)[leafs]

        # If the neuron is fragmented, we will have infinite distances
        dists[dists == np.inf] = -1

        # This might be multiple values
        mx = np.where(dists == np.max(dists.values))
        start = dists.columns[mx[0][0]]  # translate to node ID

        # Reroot to one of the nodes that gives the longest distance
        x.reroot(start, inplace=True)
    elif reroot_soma and not isinstance(x.soma, type(None)):
        x.reroot(x.soma, inplace=True)

    segments = _generate_segments(x, weight="weight")

    if isinstance(n, (int, np.integer)):
        tn_to_preserve: List[int] = [tn for s in segments[:n] for tn in s]
    elif isinstance(n, slice):
        tn_to_preserve = [tn for s in segments[n] for tn in s]
    else:
        raise TypeError(f'Unable to use `n` of type "{type(n)}"')

    if not inverse:
        _ = morpho.subset_neuron(x, tn_to_preserve, inplace=True)
    else:
        _ = morpho.subset_neuron(
            x, ~np.isin(x.nodes.node_id.values, tn_to_preserve), inplace=True
        )

    return x

Produce dotprops from neurons or point clouds.

This is following the implementation in R's nat library.

PARAMETER DESCRIPTION
x
    Data/object to generate dotprops from. DataFrame must have
    'x', 'y' and 'z' columns.

TYPE: Neuron | NeuronList | pandas.DataFrame | numpy.ndarray

k
    Number of nearest neighbours to use for tangent vector
    calculation:

      - `k=0` or `k=None` is possible but only for
        `TreeNeurons` where we then use the midpoints between
        child -> parent nodes and their vectors
      - `k` is only guaranteed if the input has at least `k`
        points
      - `k` includes self-hits and while `k=1` is not
        strictly forbidden, it makes little sense and will
        likely produce nonsense dotprops

TYPE: int (> 1) DEFAULT: 20

resample
    If provided will resample neurons to the given resolution:

      - for `MeshNeurons`, `VoxelNeurons` and point clouds, we are using
        `trimesh.points.remove_close` to remove surface vertices
        closer than the given resolution. Note that this is only
        approximate and also means that `Mesh/VoxelNeurons`
        can not be up-sampled!
      - if the neuron has `.units` set you can also provide this
        as string, e.g. "1 micron".

TYPE: float | int | str DEFAULT: False

threshold
    Only for `VoxelNeurons`: determines which voxels will be
    converted to dotprops points.

TYPE: float DEFAULT: None

RETURNS DESCRIPTION
navis.Dotprops

If input is multiple neurons, will return a navis.NeuronList of navis.Dotprops.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> dp = navis.make_dotprops(n)
>>> dp
type        navis.Dotprops
name             DA1_lPN_R
id              1734350788
k                       20
units          8 nanometer
n_points              4465
dtype: object
Source code in navis/core/core_utils.py
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
@utils.map_neuronlist(desc='Dotprops', allow_parallel=True)
def make_dotprops(x: Union[pd.DataFrame, np.ndarray,
                           'core.TreeNeuron', 'core.MeshNeuron',
                           'core.VoxelNeuron', 'core.NeuronList'],
                  k: int = 20,
                  resample: Union[float, int, bool, str] = False,
                  threshold: float = None,
                  make_using: Optional = None) -> Union['core.Dotprops', 'core.NeuronList']:
    """Produce dotprops from neurons or point clouds.

    This is following the implementation in R's `nat` library.

    Parameters
    ----------
    x :         Neuron | NeuronList | pandas.DataFrame | numpy.ndarray
                Data/object to generate dotprops from. DataFrame must have
                'x', 'y' and 'z' columns.
    k :         int (> 1), optional
                Number of nearest neighbours to use for tangent vector
                calculation:

                  - `k=0` or `k=None` is possible but only for
                    `TreeNeurons` where we then use the midpoints between
                    child -> parent nodes and their vectors
                  - `k` is only guaranteed if the input has at least `k`
                    points
                  - `k` includes self-hits and while `k=1` is not
                    strictly forbidden, it makes little sense and will
                    likely produce nonsense dotprops

    resample :  float | int | str, optional
                If provided will resample neurons to the given resolution:

                  - for `MeshNeurons`, `VoxelNeurons` and point clouds, we are using
                    `trimesh.points.remove_close` to remove surface vertices
                    closer than the given resolution. Note that this is only
                    approximate and also means that `Mesh/VoxelNeurons`
                    can not be up-sampled!
                  - if the neuron has `.units` set you can also provide this
                    as string, e.g. "1 micron".

    threshold : float, optional
                Only for `VoxelNeurons`: determines which voxels will be
                converted to dotprops points.

    Returns
    -------
    navis.Dotprops

                If input is multiple neurons, will return a
                [`navis.NeuronList`][] of [`navis.Dotprops`][].

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> dp = navis.make_dotprops(n)
    >>> dp
    type        navis.Dotprops
    name             DA1_lPN_R
    id              1734350788
    k                       20
    units          8 nanometer
    n_points              4465
    dtype: object

    """
    if k and k == 1:
        logger.warning('`k=1` is likely to produce nonsense dotprops')

    utils.eval_param(resample, name='resample',
                     allowed_types=(numbers.Number, type(None), str))

    properties = {}
    if isinstance(x, pd.DataFrame):
        if not all(np.isin(['x', 'y', 'z'], x.columns)):
            raise ValueError('DataFrame must contain "x", "y" and "z" columns.')
        x = x[['x', 'y', 'z']].values
    elif isinstance(x, core.TreeNeuron):
        if resample:
            x = x.resample(resample_to=resample, inplace=False)
        properties.update({'units': x.units, 'name': x.name, 'id': x.id})

        if isinstance(k, type(None)) or k <= 0:
            points, vect, length = graph.neuron2tangents(x)
            return core.Dotprops(points=points, vect=vect, length=length, alpha=None,
                                 k=None, **properties)

        x = x.nodes[['x', 'y', 'z']].values
    elif isinstance(x, core.MeshNeuron):
        properties.update({'units': x.units, 'name': x.name, 'id': x.id})
        x = x.vertices
        if resample:
            x, _ = tm.points.remove_close(x, resample)
    elif isinstance(x, core.Dotprops):
        properties.update({'units': x.units, 'name': x.name, 'id': x.id})
        x = x.points
        if resample:
            x, _ = tm.points.remove_close(x, resample)
    elif isinstance(x, core.VoxelNeuron):
        properties.update({'name': x.name, 'id': x.id})
        if not x.units.dimensionless:
            # We are scaling the units - hence all are set to 1
            properties['units'] = [f'1 {u.units}' for u in x.units_xyz]

        if threshold:
            x = x.voxels[x.values >= threshold] * x.units.magnitude
        else:
            x = x.voxels * x.units.magnitude

        if resample:
            x, _ = tm.points.remove_close(x, resample)
    elif isinstance(x, np.ndarray) and resample:
        x, _ = tm.points.remove_close(x, resample)

    if not isinstance(x, np.ndarray):
        raise TypeError(f'Unable to generate dotprops from data of type "{type(x)}"')

    if x.ndim != 2 or x.shape[1] != 3:
        raise ValueError(f'Expected input of shape (N, 3), got {x.shape}')

    if isinstance(k, type(None)) or k <= 0:
        raise ValueError('`k` must be > 0 when converting non-TreeNeurons to '
                         'Dotprops.')

    # Drop rows with NAs
    x = x[~np.any(np.isnan(x), axis=1)]

    # Checks and balances
    n_points = x.shape[0]

    # Make sure we don't ask for more nearest neighbors than we have points
    k = min(n_points, k)

    properties['k'] = k

    # Create the KDTree and get the k-nearest neighbors for each point
    tree = cKDTree(x)
    dist, ix = tree.query(x, k=k)
    # This makes sure we have (N, k) shaped array even if k = 1
    ix = ix.reshape(x.shape[0], k)

    # Get points: array of (N, k, 3)
    pt = x[ix]

    # Generate centers for each cloud of k nearest neighbors
    centers = np.mean(pt, axis=1)

    # Generate vector from center
    cpt = pt - centers.reshape((pt.shape[0], 1, 3))

    # Get inertia (N, 3, 3)
    inertia = cpt.transpose((0, 2, 1)) @ cpt

    # Extract vector and alpha
    u, s, vh = np.linalg.svd(inertia)
    vect = vh[:, 0, :]
    alpha = (s[:, 0] - s[:, 1]) / np.sum(s, axis=1)

    return core.Dotprops(points=x, alpha=alpha, vect=vect, **properties)

Generate mesh from object(s).

VoxelNeurons or (N, 3) arrays of voxel coordinates will be meshed using a marching cubes algorithm. TreeNeurons will be meshed by creating cylinders using the radii.

PARAMETER DESCRIPTION
x
        Object to mesh. See notes above.

TYPE: VoxelNeuron | (N, 3) np.array | TreeNeuron

**kwargs
        Keyword arguments are passed through to the respective
        converters: [`navis.conversion.voxels2mesh`][] and
        [`navis.conversion.tree2meshneuron`][], respectively.

DEFAULT: {}

RETURNS DESCRIPTION
mesh

Returns a trimesh or MeshNeuron depending on the input. Data tables (e.g. connectors) are not carried over from input neuron.

TYPE: trimesh.Trimesh | MeshNeuron

Source code in navis/conversion/wrappers.py
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
@utils.map_neuronlist(desc='Meshing', allow_parallel=True)
def mesh(x: Union['core.VoxelNeuron', np.ndarray, 'core.TreeNeuron'],
         **kwargs) -> Union[tm.Trimesh, 'core.MeshNeuron']:
    """Generate mesh from object(s).

    VoxelNeurons or (N, 3) arrays of voxel coordinates will be meshed using
    a marching cubes algorithm. TreeNeurons will be meshed by creating
    cylinders using the radii.

    Parameters
    ----------
    x :             VoxelNeuron | (N, 3) np.array | TreeNeuron
                    Object to mesh. See notes above.
    **kwargs
                    Keyword arguments are passed through to the respective
                    converters: [`navis.conversion.voxels2mesh`][] and
                    [`navis.conversion.tree2meshneuron`][], respectively.

    Returns
    -------
    mesh :          trimesh.Trimesh | MeshNeuron
                    Returns a trimesh or MeshNeuron depending on the input.
                    Data tables (e.g. `connectors`) are not carried over from
                    input neuron.

    """
    if isinstance(x, core.VoxelNeuron) or (isinstance(x, np.ndarray) and x.ndims == 2 and x.shape[1] == 3):
        return voxels2mesh(x, **kwargs)
    elif isinstance(x, core.TreeNeuron):
        return tree2meshneuron(x, **kwargs)

    raise TypeError(f'Unable to create mesh from data of type {type(x)}')

Mirror 3D coordinates about given axis.

This is a lower level version of navis.mirror_brain that: 1. Flips object along midpoint of axis using a affine transformation. 2. (Optional) Applies a warp transform that corrects asymmetries.

PARAMETER DESCRIPTION
points
            3D coordinates to mirror.

TYPE: (N, 3) numpy array

mirror_axis_size
            A single number specifying the size of the mirror axis.
            This is used to find the midpoint to mirror about.

TYPE: int | float

mirror_axis
            Axis to mirror. Defaults to `x`.

TYPE: 'x' | 'y' | 'z' DEFAULT: 'x'

warp
            If provided, will apply this warp transform after the
            affine flipping. Typically this will be a mirror
            registration to compensate for left/right asymmetries.

TYPE: Transform DEFAULT: None

RETURNS DESCRIPTION
points_mirrored

Mirrored coordinates.

See Also

navis.mirror_brain Higher level function that uses meta data from registered template brains to transform data for you.

Source code in navis/transforms/xfm_funcs.py
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
def mirror(points: np.ndarray, mirror_axis_size: float,
           mirror_axis: str = 'x',
           warp: Optional['BaseTransform'] = None) -> np.ndarray:
    """Mirror 3D coordinates about given axis.

    This is a lower level version of `navis.mirror_brain` that:
     1. Flips object along midpoint of axis using a affine transformation.
     2. (Optional) Applies a warp transform that corrects asymmetries.

    Parameters
    ----------
    points :            (N, 3) numpy array
                        3D coordinates to mirror.
    mirror_axis_size :  int | float
                        A single number specifying the size of the mirror axis.
                        This is used to find the midpoint to mirror about.
    mirror_axis :       'x' | 'y' | 'z', optional
                        Axis to mirror. Defaults to `x`.
    warp :              Transform, optional
                        If provided, will apply this warp transform after the
                        affine flipping. Typically this will be a mirror
                        registration to compensate for left/right asymmetries.

    Returns
    -------
    points_mirrored
                        Mirrored coordinates.

    See Also
    --------
    [`navis.mirror_brain`][]
                    Higher level function that uses meta data from registered
                    template brains to transform data for you.

    """
    utils.eval_param(mirror_axis, name='mirror_axis',
                     allowed_values=('x', 'y', 'z'), on_error='raise')

    # At this point we expect numpy arrays
    points = np.asarray(points)
    if not points.ndim == 2 or points.shape[1] != 3:
        raise ValueError('Array must be of shape (N, 3).')

    # Translate mirror axis to index
    mirror_ix = {'x': 0, 'y': 1, 'z': 2}[mirror_axis]

    # Construct homogeneous affine mirroring transform
    mirrormat = np.eye(4, 4)
    mirrormat[mirror_ix, 3] = mirror_axis_size
    mirrormat[mirror_ix, mirror_ix] = -1

    # Turn into affine transform
    flip_transform = AffineTransform(mirrormat)

    # Flip about mirror axis
    points_mirrored = flip_transform.xform(points)

    if isinstance(warp, (BaseTransform, TransformSequence)):
        points_mirrored = warp.xform(points_mirrored)

    # Note that we are enforcing the same data type as the input data here.
    # This is unlike in `xform` or `xform_brain` where data might genuinely
    # end up in a space that requires higher precision (e.g. going from
    # nm to microns).
    return points_mirrored.astype(points.dtype)

Mirror 3D object (neuron, coordinates) about given axis.

The way this works is: 1. Look up the length of the template space along the given axis. For this, the template space has to be registered (see docs for details). 2. Flip object along midpoint of axis using a affine transformation. 3. (Optional) Apply a warp transform that corrects asymmetries.

PARAMETER DESCRIPTION
x
        Data to transform. Dataframe must contain `['x', 'y', 'z']`
        columns. Numpy array must be shape `(N, 3)`.

TYPE: Neuron/List | Volume/trimesh | numpy.ndarray | pandas.DataFrame

template
        Source template brain space that the data is in. If string
        will be searched against registered template brains.
        Alternatively check out [`navis.transforms.mirror`][]
        for a lower level interface.

TYPE: str | TemplateBrain

mirror_axis
        Axis to mirror. Defaults to `x`.

TYPE: 'x' | 'y' | 'z' DEFAULT: 'x'

warp
        If 'auto', will check if a non-rigid mirror transformation
        exists for the given `template` and apply it after the
        flipping. Alternatively, you can also pass a Transform or
        TransformSequence directly.

TYPE: bool | "auto" | Transform DEFAULT: 'auto'

via
        If provided, (e.g. "FCWB") will first transform coordinates
        into that space, then mirror and transform back.
        Use this if there is no mirror registration for the original
        template, or to transform to a symmetrical template in which
        flipping is sufficient.

TYPE: str | None DEFAULT: None

verbose
        If True, will print some useful info on the transform(s).

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
xf

Same object type as input (array, neurons, etc) but with transformed coordinates.

Examples:

This example requires the flybrains library to be installed: pip3 install flybrains

Also, if you haven't already, you will need to have the optional Saalfeld lab (Janelia Research Campus) transforms installed (this is a one-off):

>>> import flybrains
>>> flybrains.download_jrc_transforms()

Once flybrains is installed and you have downloaded the registrations, you can run this:

>>> import navis
>>> import flybrains
>>> # navis example neurons are in raw hemibrain (JRCFIB2018Fraw) space
>>> n = navis.example_neurons(1)
>>> # Mirror about x axis (this is a simple flip in this case)
>>> mirrored = navis.mirror_brain(n * 8 / 1000, tem plate='JRCFIB2018F', via='JRC2018F')
>>> # We also need to get back to raw coordinates
>>> mirrored = mirrored / 8 * 1000
See Also

navis.mirror Lower level function for mirroring. You can use this if you want to mirror data without having a registered template for it.

Source code in navis/transforms/templates.py
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
def mirror_brain(x: Union['core.NeuronObject', 'pd.DataFrame', 'np.ndarray'],
                 template: Union[str, 'TemplateBrain'],
                 mirror_axis: Union[Literal['x'],
                                    Literal['y'],
                                    Literal['z']] = 'x',
                 warp: Union[Literal['auto'], bool] = 'auto',
                 via: Optional[str] = None,
                 verbose: bool = False) -> Union['core.NeuronObject',
                                                 'pd.DataFrame',
                                                 'np.ndarray']:
    """Mirror 3D object (neuron, coordinates) about given axis.

    The way this works is:
     1. Look up the length of the template space along the given axis. For this,
        the template space has to be registered (see docs for details).
     2. Flip object along midpoint of axis using a affine transformation.
     3. (Optional) Apply a warp transform that corrects asymmetries.

    Parameters
    ----------
    x :             Neuron/List | Volume/trimesh | numpy.ndarray | pandas.DataFrame
                    Data to transform. Dataframe must contain `['x', 'y', 'z']`
                    columns. Numpy array must be shape `(N, 3)`.
    template :      str | TemplateBrain
                    Source template brain space that the data is in. If string
                    will be searched against registered template brains.
                    Alternatively check out [`navis.transforms.mirror`][]
                    for a lower level interface.
    mirror_axis :   'x' | 'y' | 'z', optional
                    Axis to mirror. Defaults to `x`.
    warp :          bool | "auto" | Transform, optional
                    If 'auto', will check if a non-rigid mirror transformation
                    exists for the given `template` and apply it after the
                    flipping. Alternatively, you can also pass a Transform or
                    TransformSequence directly.
    via :           str | None
                    If provided, (e.g. "FCWB") will first transform coordinates
                    into that space, then mirror and transform back.
                    Use this if there is no mirror registration for the original
                    template, or to transform to a symmetrical template in which
                    flipping is sufficient.
    verbose :       bool
                    If True, will print some useful info on the transform(s).

    Returns
    -------
    xf
                    Same object type as input (array, neurons, etc) but with
                    transformed coordinates.

    Examples
    --------
    This example requires the
    [flybrains](https://github.com/navis-org/navis-flybrains)
    library to be installed: `pip3 install flybrains`

    Also, if you haven't already, you will need to have the optional Saalfeld
    lab (Janelia Research Campus) transforms installed (this is a one-off):

    >>> import flybrains                                        # doctest: +SKIP
    >>> flybrains.download_jrc_transforms()                     # doctest: +SKIP

    Once `flybrains` is installed and you have downloaded the registrations,
    you can run this:

    >>> import navis
    >>> import flybrains
    >>> # navis example neurons are in raw hemibrain (JRCFIB2018Fraw) space
    >>> n = navis.example_neurons(1)
    >>> # Mirror about x axis (this is a simple flip in this case)
    >>> mirrored = navis.mirror_brain(n * 8 / 1000, tem plate='JRCFIB2018F', via='JRC2018F') # doctest: +SKIP
    >>> # We also need to get back to raw coordinates
    >>> mirrored = mirrored / 8 * 1000                          # doctest: +SKIP

    See Also
    --------
    [`navis.mirror`][]
                    Lower level function for mirroring. You can use this if
                    you want to mirror data without having a registered
                    template for it.

    """
    utils.eval_param(mirror_axis, name='mirror_axis',
                     allowed_values=('x', 'y', 'z'), on_error='raise')
    if not isinstance(warp, (BaseTransform, TransformSequence)):
        utils.eval_param(warp, name='warp',
                         allowed_values=('auto', True, False), on_error='raise')

    # If we go via another brain space
    if via and via != template:
        # Xform to "via" space
        xf = xform_brain(x, source=template, target=via, verbose=verbose)
        # Mirror
        xfm = mirror_brain(xf,
                           template=via,
                           mirror_axis=mirror_axis,
                           warp=warp,
                           via=None)
        # Xform back to original template space
        xfm_inv = xform_brain(xfm, source=via, target=template, verbose=verbose)
        return xfm_inv

    if isinstance(x, core.NeuronList):
        if len(x) == 1:
            x = x[0]
        else:
            xf = []
            for n in config.tqdm(x, desc='Mirroring',
                                 disable=config.pbar_hide,
                                 leave=config.pbar_leave):
                xf.append(mirror_brain(n,
                                       template=template,
                                       mirror_axis=mirror_axis,
                                       warp=warp))
            return core.NeuronList(xf)

    if isinstance(x, core.BaseNeuron):
        x = x.copy()
        if isinstance(x, core.TreeNeuron):
            x.nodes = mirror_brain(x.nodes,
                                   template=template,
                                   mirror_axis=mirror_axis,
                                   warp=warp)
        elif isinstance(x, core.Dotprops):
            if isinstance(x.k, type(None)) or x.k <= 0:
                # If no k, we need to mirror vectors too. Note that this is less
                # than ideal though! Here, we are scaling the vector by the
                # dotprop's sampling resolution (i.e. ideally a representative
                # distance between the points) because if the vectors are too
                # small any warping transform will make them go haywire
                hp = mirror_brain(x.points + x.vect * x.sampling_resolution * 2,
                                  template=template,
                                  mirror_axis=mirror_axis,
                                  warp=warp)

            x.points = mirror_brain(x.points,
                                    template=template,
                                    mirror_axis=mirror_axis,
                                    warp=warp)

            if isinstance(x.k, type(None)) or x.k <= 0:
                # Re-generate vectors
                vect = x.points - hp
                vect = vect / np.linalg.norm(vect, axis=1).reshape(-1, 1)
                x._vect = vect
            else:
                # Set tangent vectors and alpha to None so they will be
                # regenerated on demand
                x._vect = x._alpha = None
        elif isinstance(x, core.MeshNeuron):
            x.vertices = mirror_brain(x.vertices,
                                      template=template,
                                      mirror_axis=mirror_axis,
                                      warp=warp)
            # We also need to flip the normals
            x.faces = x.faces[:, ::-1]
        else:
            raise TypeError(f"Don't know how to transform neuron of type '{type(x)}'")

        if x.has_connectors:
            x.connectors = mirror_brain(x.connectors,
                                        template=template,
                                        mirror_axis=mirror_axis,
                                        warp=warp)
        return x
    elif isinstance(x, tm.Trimesh):
        x = x.copy()
        x.vertices = mirror_brain(x.vertices,
                                  template=template,
                                  mirror_axis=mirror_axis,
                                  warp=warp)

        # We also need to flip the normals
        x.faces = x.faces[:, ::-1]
        return x
    elif isinstance(x, pd.DataFrame):
        if any([c not in x.columns for c in ['x', 'y', 'z']]):
            raise ValueError('DataFrame must have x, y and z columns.')
        x = x.copy()
        x[["x", "y", "z"]] = mirror_brain(
            x[["x", "y", "z"]].values,
            template=template,
            mirror_axis=mirror_axis,
            warp=warp,
        )
        return x
    else:
        try:
            # At this point we expect numpy arrays
            x = np.asarray(x)
        except BaseException:
            raise TypeError(f'Unable to transform data of type "{type(x)}"')

        if not x.ndim == 2 or x.shape[1] != 3:
            raise ValueError('Array must be of shape (N, 3).')

    if not isinstance(template, str):
        TypeError(f'Expected template of type str, got "{type(template)}"')

    if isinstance(warp, (BaseTransform, TransformSequence)):
        mirror_trans = warp
    elif warp:
        # See if there is a mirror registration
        mirror_trans = registry.find_mirror_reg(template, non_found='ignore')

        # Get actual transform from tuple
        if mirror_trans:
            mirror_trans = mirror_trans.transform
        # If warp was not "auto" and we didn't find a registration, raise
        elif warp != 'auto' and not mirror_trans:
            raise ValueError(f'No mirror transform found for "{template}"')
    else:
        mirror_trans = None

    # Now find the meta info about the template brain
    if isinstance(template, TemplateBrain):
        tb = template
    else:
        tb = registry.find_template(template, non_found='raise')

    # Get the bounding box
    if not hasattr(tb, 'boundingbox'):
        raise ValueError(f'Template "{tb.label}" has no bounding box info.')

    if not isinstance(tb.boundingbox, (list, tuple, np.ndarray)):
        raise TypeError("Expected the template brain's bounding box to be a "
                        f"list, tuple or array - got '{type(tb.boundingbox)}'")

    # Get bounding box of template brain
    bbox = np.asarray(tb.boundingbox)

    # Reshape if flat array
    if bbox.ndim == 1:
        bbox = bbox.reshape(3, 2)

    # Index of mirror axis
    ix = {'x': 0, 'y': 1, 'z': 2}[mirror_axis]

    if bbox.shape == (3, 2):
        # In nat.templatebrains this is using the sum (min+max) but have a
        # suspicion that this should be the difference (max-min)
        mirror_axis_size = bbox[ix, :].sum()
    elif bbox.shape == (2, 3):
        mirror_axis_size = bbox[:, ix].sum()
    else:
        raise ValueError('Expected bounding box to be of shape (3, 2) or (2, 3)'
                         f' got {bbox.shape}')

    return mirror(x, mirror_axis=mirror_axis, mirror_axis_size=mirror_axis_size,
                  warp=mirror_trans)

NBLAST query against target neurons.

This implements the NBLAST algorithm from Costa et al. (2016) (see references) and mirror the implementation in R's nat.nblast (https://github.com/natverse/nat.nblast).

PARAMETER DESCRIPTION
query
        Query neuron(s) to NBLAST against the targets. Neurons
        should be in microns as NBLAST is optimized for that and
        have similar sampling resolutions.

TYPE: Dotprops | NeuronList

target
        Target neuron(s) to NBLAST against. Neurons should be in
        microns as NBLAST is optimized for that and have
        similar sampling resolutions. If not provided, will NBLAST
        queries against themselves.

TYPE: Dotprops | NeuronList DEFAULT: None

scores
        Determines the final scores:

          - 'forward' (default) returns query->target scores
          - 'mean' returns the mean of query->target and
            target->query scores
          - 'min' returns the minium between query->target and
            target->query scores
          - 'max' returns the maximum between query->target and
            target->query scores
          - 'both' will return foward and reverse scores as
            multi-index DataFrame

TYPE: 'forward' | 'mean' | 'min' | 'max' | 'both' DEFAULT: 'forward'

use_alpha
        Emphasizes neurons' straight parts (backbone) over parts
        that have lots of branches.

TYPE: bool DEFAULT: False

normalized
        Whether to return normalized NBLAST scores.

TYPE: bool DEFAULT: True

smat
        Score matrix. If 'auto' (default), will use scoring matrices
        from FCWB. Same behaviour as in R's nat.nblast
        implementation.
        If `smat='v1'` it uses the analytic formulation of the
        NBLAST scoring from Kohl et. al (2013). You can adjust parameter
        `sigma_scaling` (default to 10) using `smat_kwargs`.
        If `Callable` given, it passes distance and dot products as
        first and second argument respectively.
        If `smat=None` the scores will be
        generated as the product of the distances and the dotproduct
        of the vectors of nearest-neighbor pairs.

TYPE: str | pd.DataFrame | Callable DEFAULT: 'auto'

limit_dist
        Sets the max distance for the nearest neighbor search
        (`distance_upper_bound`). Typically this should be the
        highest distance considered by the scoring function. If
        "auto", will extract that value from the scoring matrix.
        While this can give a ~2X speed up, it will introduce slight
        inaccuracies because we won't have a vector component for
        points without a nearest neighbour within the distance
        limits. The impact depends on the scoring function but with
        the default FCWB `smat`, this is typically limited to the
        third decimal (0.0086 +/- 0.0027 for an all-by-all of 1k
        neurons).

TYPE: float | "auto" | None DEFAULT: None

approx_nn
        If True, will use approximate nearest neighbors. This gives
        a >2X speed up but also produces only approximate scores.
        Impact depends on the use case - testing highly recommended!

TYPE: bool DEFAULT: False

n_cores
        Max number of cores to use for nblasting. Default is
        `os.cpu_count() // 2`. This should ideally be an even
        number as that allows optimally splitting queries onto
        individual processes.

TYPE: int DEFAULT: os.cpu_count() // 2

precision
        Precision for scores. Defaults to 64 bit (double) floats.
        This is useful to reduce the memory footprint for very large
        matrices. In real-world scenarios 32 bit (single)- and
        depending on the purpose even 16 bit (half) - are typically
        sufficient.

TYPE: int [16, 32, 64] | str [e.g. "float64"] | np.dtype DEFAULT: 64

progress
        Whether to show progress bars. This may cause some overhead,
        so switch off if you don't really need it.

TYPE: bool DEFAULT: True

smat_kwargs
        functions.

TYPE: Optional[Dict] DEFAULT: dict()

RETURNS DESCRIPTION
scores

Matrix with NBLAST scores. Rows are query neurons, columns are targets. The order is the same as in query/target and the labels are based on the neurons' .id property.

TYPE: pandas.DataFrame

References

Costa M, Manton JD, Ostrovsky AD, Prohaska S, Jefferis GS. NBLAST: Rapid, Sensitive Comparison of Neuronal Structure and Construction of Neuron Family Databases. Neuron. 2016 Jul 20;91(2):293-311. doi: 10.1016/j.neuron.2016.06.012.

Examples:

>>> import navis
>>> nl = navis.example_neurons(n=5)
>>> nl.units
<Quantity([8 8 8 8 8], 'nanometer')>
>>> # Convert to microns
>>> nl_um = nl * (8 / 1000)
>>> # Convert to dotprops
>>> dps = navis.make_dotprops(nl_um)
>>> # Run the nblast
>>> scores = navis.nblast(dps[:3], dps[3:])
See Also

navis.nblast_allbyall A more efficient way than nblast(query=x, target=x). navis.nblast_smart A smart(er) NBLAST suited for very large NBLAST. navis.synblast A synapse-based variant of NBLAST.

Source code in navis/nbl/nblast_funcs.py
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
def nblast(query: Union[Dotprops, NeuronList],
           target: Optional[str] = None,
           scores: Union[Literal['forward'],
                         Literal['mean'],
                         Literal['min'],
                         Literal['max']] = 'forward',
           normalized: bool = True,
           use_alpha: bool = False,
           smat: Optional[Union[str, pd.DataFrame, Callable]] = 'auto',
           limit_dist: Optional[Union[Literal['auto'], int, float]] = None,
           approx_nn: bool = False,
           precision: Union[int, str, np.dtype] = 64,
           n_cores: int = os.cpu_count() // 2,
           progress: bool = True,
           smat_kwargs: Optional[Dict] = dict()) -> pd.DataFrame:
    """NBLAST query against target neurons.

    This implements the NBLAST algorithm from Costa et al. (2016) (see
    references) and mirror the implementation in R's `nat.nblast`
    (https://github.com/natverse/nat.nblast).

    Parameters
    ----------
    query :         Dotprops | NeuronList
                    Query neuron(s) to NBLAST against the targets. Neurons
                    should be in microns as NBLAST is optimized for that and
                    have similar sampling resolutions.
    target :        Dotprops | NeuronList, optional
                    Target neuron(s) to NBLAST against. Neurons should be in
                    microns as NBLAST is optimized for that and have
                    similar sampling resolutions. If not provided, will NBLAST
                    queries against themselves.
    scores :        'forward' | 'mean' | 'min' | 'max' | 'both'
                    Determines the final scores:

                      - 'forward' (default) returns query->target scores
                      - 'mean' returns the mean of query->target and
                        target->query scores
                      - 'min' returns the minium between query->target and
                        target->query scores
                      - 'max' returns the maximum between query->target and
                        target->query scores
                      - 'both' will return foward and reverse scores as
                        multi-index DataFrame

    use_alpha :     bool, optional
                    Emphasizes neurons' straight parts (backbone) over parts
                    that have lots of branches.
    normalized :    bool, optional
                    Whether to return normalized NBLAST scores.
    smat :          str | pd.DataFrame | Callable
                    Score matrix. If 'auto' (default), will use scoring matrices
                    from FCWB. Same behaviour as in R's nat.nblast
                    implementation.
                    If `smat='v1'` it uses the analytic formulation of the
                    NBLAST scoring from Kohl et. al (2013). You can adjust parameter
                    `sigma_scaling` (default to 10) using `smat_kwargs`.
                    If `Callable` given, it passes distance and dot products as
                    first and second argument respectively.
                    If `smat=None` the scores will be
                    generated as the product of the distances and the dotproduct
                    of the vectors of nearest-neighbor pairs.
    limit_dist :    float | "auto" | None
                    Sets the max distance for the nearest neighbor search
                    (`distance_upper_bound`). Typically this should be the
                    highest distance considered by the scoring function. If
                    "auto", will extract that value from the scoring matrix.
                    While this can give a ~2X speed up, it will introduce slight
                    inaccuracies because we won't have a vector component for
                    points without a nearest neighbour within the distance
                    limits. The impact depends on the scoring function but with
                    the default FCWB `smat`, this is typically limited to the
                    third decimal (0.0086 +/- 0.0027 for an all-by-all of 1k
                    neurons).
    approx_nn :     bool
                    If True, will use approximate nearest neighbors. This gives
                    a >2X speed up but also produces only approximate scores.
                    Impact depends on the use case - testing highly recommended!
    n_cores :       int, optional
                    Max number of cores to use for nblasting. Default is
                    `os.cpu_count() // 2`. This should ideally be an even
                    number as that allows optimally splitting queries onto
                    individual processes.
    precision :     int [16, 32, 64] | str [e.g. "float64"] | np.dtype
                    Precision for scores. Defaults to 64 bit (double) floats.
                    This is useful to reduce the memory footprint for very large
                    matrices. In real-world scenarios 32 bit (single)- and
                    depending on the purpose even 16 bit (half) - are typically
                    sufficient.
    progress :      bool
                    Whether to show progress bars. This may cause some overhead,
                    so switch off if you don't really need it.
    smat_kwargs:    Dictionary with additional parameters passed to scoring
                    functions.

    Returns
    -------
    scores :        pandas.DataFrame
                    Matrix with NBLAST scores. Rows are query neurons, columns
                    are targets. The order is the same as in `query`/`target`
                    and the labels are based on the neurons' `.id` property.

    References
    ----------
    Costa M, Manton JD, Ostrovsky AD, Prohaska S, Jefferis GS. NBLAST: Rapid,
    Sensitive Comparison of Neuronal Structure and Construction of Neuron
    Family Databases. Neuron. 2016 Jul 20;91(2):293-311.
    doi: 10.1016/j.neuron.2016.06.012.

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(n=5)
    >>> nl.units
    <Quantity([8 8 8 8 8], 'nanometer')>
    >>> # Convert to microns
    >>> nl_um = nl * (8 / 1000)
    >>> # Convert to dotprops
    >>> dps = navis.make_dotprops(nl_um)
    >>> # Run the nblast
    >>> scores = navis.nblast(dps[:3], dps[3:])

    See Also
    --------
    [`navis.nblast_allbyall`][]
                A more efficient way than `nblast(query=x, target=x)`.
    [`navis.nblast_smart`][]
                A smart(er) NBLAST suited for very large NBLAST.
    [`navis.synblast`][]
                A synapse-based variant of NBLAST.

    """
    utils.eval_param(scores, name='scores', allowed_values=ALLOWED_SCORES)

    if isinstance(target, type(None)):
        target = query

    # Make sure we're working on NeuronLists
    query_dps = NeuronList(query)
    target_dps = NeuronList(target)

    # Run NBLAST preflight checks
    nblast_preflight(query_dps, target_dps, n_cores,
                     req_unique_ids=True,
                     req_microns=isinstance(smat, str) and smat=='auto')

    # Find a partition that produces batches that each run in approximately
    # 10 seconds
    if n_cores and n_cores > 1:
        if progress:
            # If progress bar, we need to make smaller mini batches.
            # These mini jobs must not be too small - otherwise the overhead
            # from spawning and sending results between processes slows things
            # down dramatically. Hence we want to make sure that each job runs
            # for >10s. The run time depends on the system and how big the neurons
            # are. Here, we run a quick test and try to extrapolate from there
            n_rows, n_cols = find_batch_partition(query_dps, target_dps,
                                                  T=10 * JOB_SIZE_MULTIPLIER)
        else:
            # If no progress bar needed, we could just split neurons evenly across
            # all available cores but that can lead to one core lagging behind
            # and finishing much later than all the others. To avoid this, we
            # should aim for each batch to finish in a certain amount of time
            n_rows, n_cols = find_batch_partition(query_dps, target_dps,
                                                  T=JOB_MAX_TIME_SECONDS)
            if (n_rows * n_cols) < n_cores:
                n_rows, n_cols = find_optimal_partition(n_cores, query_dps, target_dps)
    else:
        n_rows = n_cols = 1

    # Calculate self-hits once for all neurons
    nb = NBlaster(use_alpha=use_alpha,
                  normalized=normalized,
                  smat=smat,
                  limit_dist=limit_dist,
                  dtype=precision,
                  approx_nn=approx_nn,
                  progress=progress,
                  smat_kwargs=smat_kwargs)
    query_self_hits = np.array([nb.calc_self_hit(n) for n in query_dps])
    target_self_hits = np.array([nb.calc_self_hit(n) for n in target_dps])

    # This makes sure we don't run into multiple layers of concurrency
    with set_omp_flag(limits=OMP_NUM_THREADS_LIMIT if n_cores and (n_cores > 1) else None):
        # Initialize a pool of workers
        # Note that we're forcing "spawn" instead of "fork" (default on linux)!
        # This is to reduce the memory footprint since "fork" appears to inherit all
        # variables (including all neurons) while "spawn" appears to get only
        # what's required to run the job?
        with ProcessPoolExecutor(max_workers=n_cores,
                                 mp_context=mp.get_context('spawn')) as pool:
            with config.tqdm(desc='Preparing',
                             total=n_rows * n_cols,
                             leave=False,
                             disable=not progress) as pbar:
                futures = {}
                nblasters = []
                for qix in np.array_split(np.arange(len(query_dps)), n_rows):
                    for tix in np.array_split(np.arange(len(target_dps)), n_cols):
                        # Initialize NBlaster
                        this = NBlaster(use_alpha=use_alpha,
                                        normalized=normalized,
                                        smat=smat,
                                        limit_dist=limit_dist,
                                        dtype=precision,
                                        approx_nn=approx_nn,
                                        progress=progress,
                                        smat_kwargs=smat_kwargs)

                        # Add queries and targets
                        for i, ix in enumerate(qix):
                            this.append(query_dps[ix], query_self_hits[ix])
                        for i, ix in enumerate(tix):
                            this.append(target_dps[ix], target_self_hits[ix])

                        # Keep track of indices of queries and targets
                        this.queries = np.arange(len(qix))
                        this.targets = np.arange(len(tix)) + len(qix)
                        this.queries_ix = qix  # this facilitates filling in the big matrix later
                        this.targets_ix = tix  # this facilitates filling in the big matrix later
                        this.pbar_position = len(nblasters) if not utils.is_jupyter() else None

                        nblasters.append(this)
                        pbar.update()

                        # If multiple cores requested, submit job to the pool right away
                        if n_cores and n_cores > 1 and (n_cols > 1 or n_rows > 1):
                            this.progress=False  # no progress bar for individual NBLASTERs
                            futures[pool.submit(this.multi_query_target,
                                                q_idx=this.queries,
                                                t_idx=this.targets,
                                                scores=scores)] = this

            # Collect results
            if futures and len(futures) > 1:
                # Prepare empty score matrix
                scores = pd.DataFrame(np.empty((len(query_dps), len(target_dps)),
                                               dtype=this.dtype),
                                      index=query_dps.id, columns=target_dps.id)
                scores.index.name = 'query'
                scores.columns.name = 'target'

                # Collect results
                # We're dropping the "N / N_total" bit from the progress bar because
                # it's not helpful here
                fmt = ('{desc}: {percentage:3.0f}%|{bar}| [{elapsed}<{remaining}]')
                for f in config.tqdm(as_completed(futures),
                                     desc='NBLASTing',
                                     bar_format=fmt,
                                     total=len(futures),
                                     smoothing=0,
                                     disable=not progress,
                                     leave=False):
                    res = f.result()
                    this = futures[f]
                    # Fill-in big score matrix
                    scores.iloc[this.queries_ix, this.targets_ix] = res.values
            else:
                scores = this.multi_query_target(this.queries,
                                                 this.targets,
                                                 scores=scores)

    return scores

Run NBLAST on pairwise-aligned neurons.

Requires the pycpd library at least version 2.0.1 which at the time of writing is only available from Github (not PyPI):

https://github.com/siavashk/pycpd

PARAMETER DESCRIPTION
query
        Query neuron(s) to NBLAST against the targets. Neurons
        should be in microns as NBLAST is optimized for that and
        have similar sampling resolutions.

TYPE: Neuron | NeuronList

target
        Target neuron(s) to NBLAST against. Neurons should be in
        microns as NBLAST is optimized for that and have
        similar sampling resolutions. If not provided, will NBLAST
        queries against themselves.

TYPE: Neuron | NeuronList DEFAULT: None

align_method
        Which method to use for alignment. Maps to the respective
        `navis.align_{method}` function.

TYPE: "rigid" | "deform" | "pca" | "rigid+deform" DEFAULT: 'rigid'

two_way_align
        If True, will run the alignment + NBLAST in both,
        query->target as well as query->target direction. This is
        highly recommended because it reduces the chance that a
        single bad alignment will mess up your scores.

TYPE: bool DEFAULT: True

sample_align
        If provided, will calculate an initial alignment on just a
        fraction of the points followed by a landmark transform
        to transform the rest. Use this to speed things up.

TYPE: float [0-1] DEFAULT: None

scores
        Determines the final scores:

          - 'forward' (default) returns query->target scores
          - 'mean' returns the mean of query->target and
            target->query scores
          - 'min' returns the minium between query->target and
            target->query scores
          - 'max' returns the maximum between query->target and
            target->query scores
          - 'both' will return foward and reverse scores as
            multi-index DataFrame

TYPE: 'forward' | 'mean' | 'min' | 'max' | 'both' DEFAULT: 'mean'

use_alpha
        Emphasizes neurons' straight parts (backbone) over parts
        that have lots of branches for the NBLAST.

TYPE: bool DEFAULT: False

normalized
        Whether to return normalized NBLAST scores.

TYPE: bool DEFAULT: True

smat
        Score matrix. If 'auto' (default), will use scoring matrices
        from FCWB. Same behaviour as in R's nat.nblast
        implementation.
        If `smat='v1'` it uses the analytic formulation of the
        NBLAST scoring from Kohl et. al (2013). You can adjust parameter
        `sigma_scaling` (default to 10) using `smat_kwargs`.
        If `Callable` given, it passes distance and dot products as
        first and second argument respectively.
        If `smat=None` the scores will be
        generated as the product of the distances and the dotproduct
        of the vectors of nearest-neighbor pairs.

TYPE: str | pd.DataFrame | Callable DEFAULT: 'auto'

limit_dist
        Sets the max distance for the nearest neighbor search
        (`distance_upper_bound`). Typically this should be the
        highest distance considered by the scoring function. If
        "auto", will extract that value from the scoring matrix.
        While this can give a ~2X speed up, it will introduce slight
        inaccuracies because we won't have a vector component for
        points without a nearest neighbour within the distance
        limits. The impact depends on the scoring function but with
        the default FCWB `smat`, this is typically limited to the
        third decimal (0.0086 +/- 0.0027 for an all-by-all of 1k
        neurons).

TYPE: float | "auto" | None DEFAULT: None

approx_nn
        If True, will use approximate nearest neighbors. This gives
        a >2X speed up but also produces only approximate scores.
        Impact depends on the use case - testing highly recommended!

TYPE: bool DEFAULT: False

n_cores
        Max number of cores to use for nblasting. Default is
        `os.cpu_count() // 2`. This should ideally be an even
        number as that allows optimally splitting queries onto
        individual processes. Also note that due to multiple layers
        of concurrency using all available cores might not be the
        fastest option.

TYPE: int DEFAULT: os.cpu_count() // 2

precision
        Precision for scores. Defaults to 64 bit (double) floats.
        This is useful to reduce the memory footprint for very large
        matrices. In real-world scenarios 32 bit (single)- and
        depending on the purpose even 16 bit (half) - are typically
        sufficient.

TYPE: int [16, 32, 64] | str [e.g. "float64"] | np.dtype DEFAULT: 64

progress
        Whether to show progress bars. This may cause some overhead,
        so switch off if you don't really need it.

TYPE: bool DEFAULT: True

smat_kwargs
        Dictionary with additional parameters passed to scoring
        functions.

TYPE: dict DEFAULT: dict()

align_kwargs
        Dictionary with additional parameters passed to alignment
        function.

TYPE: dict DEFAULT: dict()

dotprop_kwargs
        Dictionary with additional parameters passed to
        `navis.make_dotprops`. Only relevant if inputs aren't
        already dotprops.

TYPE: dict DEFAULT: dict()

RETURNS DESCRIPTION
scores

Matrix with NBLAST scores. Rows are query neurons, columns are targets. The order is the same as in query/target and the labels are based on the neurons' .id property. Important to note that even when q == t and with scores=mean the matrix will not be symmetrical because we run separate alignments for the forward and the reverse comparisons.

TYPE: pandas.DataFrame

References

Costa M, Manton JD, Ostrovsky AD, Prohaska S, Jefferis GS. NBLAST: Rapid, Sensitive Comparison of Neuronal Structure and Construction of Neuron Family Databases. Neuron. 2016 Jul 20;91(2):293-311. doi: 10.1016/j.neuron.2016.06.012.

Examples:

>>> import navis
>>> nl = navis.example_neurons(n=5)
>>> nl.units
<Quantity([8 8 8 8 8], 'nanometer')>
>>> # Convert to microns
>>> nl_um = nl * (8 / 1000)
>>> # Run the align nblast
>>> scores = navis.nblast_align(nl_um[:3], nl_um[3:],
...                             dotprop_kwargs=dict(k=5),
...                             sample_align=.2)
See Also

navis.nblast The vanilla version of NBLAST. navis.nblast_allbyall A more efficient way than nblast(query=x, target=x). navis.nblast_smart A smart(er) NBLAST suited for very large NBLAST. navis.synblast A synapse-based variant of NBLAST.

Source code in navis/nbl/ablast_funcs.py
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
def nblast_align(query: Union[core.BaseNeuron, core.NeuronList],
                 target: Optional[str] = None,
                 align_method: Union[Literal['rigid'],
                                     Literal['deform'],
                                     Literal['pca']] = 'rigid',
                 two_way_align: bool = True,
                 sample_align: Optional[float] = None,
                 scores: Union[Literal['forward'],
                               Literal['mean'],
                               Literal['min'],
                               Literal['max']] = 'mean',
                 normalized: bool = True,
                 use_alpha: bool = False,
                 smat: Optional[Union[str, pd.DataFrame, Callable]] = 'auto',
                 limit_dist: Optional[Union[Literal['auto'], int, float]] = None,
                 approx_nn: bool = False,
                 precision: Union[int, str, np.dtype] = 64,
                 n_cores: int = os.cpu_count() // 2,
                 progress: bool = True,
                 dotprop_kwargs: Optional[Dict] = dict(),
                 align_kwargs: Optional[Dict] = dict(),
                 smat_kwargs: Optional[Dict] = dict()) -> pd.DataFrame:
    """Run NBLAST on pairwise-aligned neurons.

    Requires the `pycpd` library at least version 2.0.1 which at the time of
    writing is only available from Github (not PyPI):

      https://github.com/siavashk/pycpd

    Parameters
    ----------
    query :         Neuron | NeuronList
                    Query neuron(s) to NBLAST against the targets. Neurons
                    should be in microns as NBLAST is optimized for that and
                    have similar sampling resolutions.
    target :        Neuron | NeuronList, optional
                    Target neuron(s) to NBLAST against. Neurons should be in
                    microns as NBLAST is optimized for that and have
                    similar sampling resolutions. If not provided, will NBLAST
                    queries against themselves.
    align_method :  "rigid" | "deform" | "pca" | "rigid+deform"
                    Which method to use for alignment. Maps to the respective
                    `navis.align_{method}` function.
    two_way_align : bool
                    If True, will run the alignment + NBLAST in both,
                    query->target as well as query->target direction. This is
                    highly recommended because it reduces the chance that a
                    single bad alignment will mess up your scores.
    sample_align :  float [0-1], optional
                    If provided, will calculate an initial alignment on just a
                    fraction of the points followed by a landmark transform
                    to transform the rest. Use this to speed things up.
    scores :        'forward' | 'mean' | 'min' | 'max' | 'both'
                    Determines the final scores:

                      - 'forward' (default) returns query->target scores
                      - 'mean' returns the mean of query->target and
                        target->query scores
                      - 'min' returns the minium between query->target and
                        target->query scores
                      - 'max' returns the maximum between query->target and
                        target->query scores
                      - 'both' will return foward and reverse scores as
                        multi-index DataFrame

    use_alpha :     bool, optional
                    Emphasizes neurons' straight parts (backbone) over parts
                    that have lots of branches for the NBLAST.
    normalized :    bool, optional
                    Whether to return normalized NBLAST scores.
    smat :          str | pd.DataFrame | Callable
                    Score matrix. If 'auto' (default), will use scoring matrices
                    from FCWB. Same behaviour as in R's nat.nblast
                    implementation.
                    If `smat='v1'` it uses the analytic formulation of the
                    NBLAST scoring from Kohl et. al (2013). You can adjust parameter
                    `sigma_scaling` (default to 10) using `smat_kwargs`.
                    If `Callable` given, it passes distance and dot products as
                    first and second argument respectively.
                    If `smat=None` the scores will be
                    generated as the product of the distances and the dotproduct
                    of the vectors of nearest-neighbor pairs.
    limit_dist :    float | "auto" | None
                    Sets the max distance for the nearest neighbor search
                    (`distance_upper_bound`). Typically this should be the
                    highest distance considered by the scoring function. If
                    "auto", will extract that value from the scoring matrix.
                    While this can give a ~2X speed up, it will introduce slight
                    inaccuracies because we won't have a vector component for
                    points without a nearest neighbour within the distance
                    limits. The impact depends on the scoring function but with
                    the default FCWB `smat`, this is typically limited to the
                    third decimal (0.0086 +/- 0.0027 for an all-by-all of 1k
                    neurons).
    approx_nn :     bool
                    If True, will use approximate nearest neighbors. This gives
                    a >2X speed up but also produces only approximate scores.
                    Impact depends on the use case - testing highly recommended!
    n_cores :       int, optional
                    Max number of cores to use for nblasting. Default is
                    `os.cpu_count() // 2`. This should ideally be an even
                    number as that allows optimally splitting queries onto
                    individual processes. Also note that due to multiple layers
                    of concurrency using all available cores might not be the
                    fastest option.
    precision :     int [16, 32, 64] | str [e.g. "float64"] | np.dtype
                    Precision for scores. Defaults to 64 bit (double) floats.
                    This is useful to reduce the memory footprint for very large
                    matrices. In real-world scenarios 32 bit (single)- and
                    depending on the purpose even 16 bit (half) - are typically
                    sufficient.
    progress :      bool
                    Whether to show progress bars. This may cause some overhead,
                    so switch off if you don't really need it.
    smat_kwargs :   dict, optional
                    Dictionary with additional parameters passed to scoring
                    functions.
    align_kwargs :  dict, optional
                    Dictionary with additional parameters passed to alignment
                    function.
    dotprop_kwargs : dict, optional
                    Dictionary with additional parameters passed to
                    `navis.make_dotprops`. Only relevant if inputs aren't
                    already dotprops.


    Returns
    -------
    scores :        pandas.DataFrame
                    Matrix with NBLAST scores. Rows are query neurons, columns
                    are targets. The order is the same as in `query`/`target`
                    and the labels are based on the neurons' `.id` property.
                    Important to note that even when `q == t` and with
                    `scores=mean` the matrix will not be symmetrical because
                    we run separate alignments for the forward and the reverse
                    comparisons.

    References
    ----------
    Costa M, Manton JD, Ostrovsky AD, Prohaska S, Jefferis GS. NBLAST: Rapid,
    Sensitive Comparison of Neuronal Structure and Construction of Neuron
    Family Databases. Neuron. 2016 Jul 20;91(2):293-311.
    doi: 10.1016/j.neuron.2016.06.012.

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(n=5)
    >>> nl.units
    <Quantity([8 8 8 8 8], 'nanometer')>
    >>> # Convert to microns
    >>> nl_um = nl * (8 / 1000)
    >>> # Run the align nblast
    >>> scores = navis.nblast_align(nl_um[:3], nl_um[3:],
    ...                             dotprop_kwargs=dict(k=5),
    ...                             sample_align=.2)

    See Also
    --------
    [`navis.nblast`][]
                The vanilla version of NBLAST.
    [`navis.nblast_allbyall`][]
                A more efficient way than `nblast(query=x, target=x)`.
    [`navis.nblast_smart`][]
                A smart(er) NBLAST suited for very large NBLAST.
    [`navis.synblast`][]
                A synapse-based variant of NBLAST.

    """
    if isinstance(target, type(None)):
        target = query

    # Make sure we're working on NeuronLists
    query = core.NeuronList(query)
    target = core.NeuronList(target)

    if not callable(align_method):
        align_func = {'rigid': align_rigid,
                      'deform': align_deform,
                      'pca': align_pca,
                      'rigid+deform': _align_rigid_deform}[align_method]
    else:
        align_func = align_method

    # Run NBLAST preflight checks
    nblast_preflight(query, target, n_cores,
                     req_dotprops=False,
                     req_unique_ids=True,
                     req_microns=isinstance(smat, str) and smat=='auto')

    # Find a partition that produces batches that each run in approximately
    # 10 seconds
    if n_cores and n_cores > 1:
        n_rows, n_cols = find_optimal_partition(n_cores, query, target)
        if progress:
            # If progress bar, we need to make smaller mini batches.
            # These mini jobs must not be too small - otherwise the overhead
            # from spawning and sending results between processes slows things
            # down dramatically. Here we hardcode such that we get updates
            # at most every 1%
            n_rows = max(n_rows, len(query) // 10)
            n_cols = max(n_cols, len(target) // 10)
    else:
        n_rows = n_cols = 1

    # This makes sure we don't run into multiple layers of concurrency
    # Note that it doesn't do anything for the parent process (which is great
    # if we end up not actually using multiple cores)
    with set_omp_flag(limits=1):
        # Initialize a pool of workers
        # Note that we're forcing "spawn" instead of "fork" (default on linux)!
        # This is to reduce the memory footprint since "fork" appears to inherit all
        # variables (including all neurons) while "spawn" appears to get only
        # what's required to run the job?
        with ProcessPoolExecutor(max_workers=n_cores,
                                 mp_context=mp.get_context('spawn')) as pool:
            with config.tqdm(desc='Preparing',
                             total=n_rows * n_cols,
                             leave=False,
                             disable=not progress) as pbar:
                futures = {}
                nblasters = []
                for qix in np.array_split(np.arange(len(query)), n_rows):
                    for tix in np.array_split(np.arange(len(target)), n_cols):
                        # Initialize NBlaster
                        this = NBlasterAlign(align_func=align_func,
                                             two_way_align=two_way_align,
                                             sample_align=sample_align,
                                             use_alpha=use_alpha,
                                             normalized=normalized,
                                             smat=smat,
                                             limit_dist=limit_dist,
                                             dtype=precision,
                                             approx_nn=approx_nn,
                                             progress=progress,
                                             align_kwargs=align_kwargs,
                                             dotprop_kwargs=dotprop_kwargs,
                                             smat_kwargs=smat_kwargs)

                        # Add queries and targets
                        for i, ix in enumerate(qix):
                            this.append(query[ix])
                        for i, ix in enumerate(tix):
                            this.append(target[ix])

                        # Keep track of indices of queries and targets
                        this.queries = np.arange(len(qix))
                        this.targets = np.arange(len(tix)) + len(qix)
                        this.queries_ix = qix  # this facilitates filling in the big matrix later
                        this.targets_ix = tix  # this facilitates filling in the big matrix later
                        this.pbar_position = len(nblasters) if not utils.is_jupyter() else None

                        nblasters.append(this)
                        pbar.update()

                        # If multiple cores requested, submit job to the pool right away
                        if n_cores and n_cores > 1 and (n_cols > 1 or n_rows > 1):
                            this.progress=False  # no progress bar for individual NBLASTERs
                            futures[pool.submit(this.multi_query_target,
                                                q_idx=this.queries,
                                                t_idx=this.targets,
                                                scores=scores)] = this

            # Collect results
            if futures and len(futures) > 1:
                # Prepare empty score matrix
                scores = pd.DataFrame(np.empty((len(query), len(target)),
                                               dtype=this.dtype),
                                      index=query.id, columns=target.id)
                scores.index.name = 'query'
                scores.columns.name = 'target'

                # Collect results
                # We're dropping the "N / N_total" bit from the progress bar because
                # it's not helpful here
                fmt = ('{desc}: {percentage:3.0f}%|{bar}| [{elapsed}<{remaining}]')
                for f in config.tqdm(as_completed(futures),
                                     desc='NBLASTing',
                                     bar_format=fmt,
                                     total=len(futures),
                                     smoothing=0,
                                     disable=not progress,
                                     leave=False):
                    res = f.result()
                    this = futures[f]
                    # Fill-in big score matrix
                    scores.iloc[this.queries_ix, this.targets_ix] = res.values
            else:
                scores = this.multi_query_target(this.queries,
                                                 this.targets,
                                                 scores=scores)

    return scores

All-by-all NBLAST of inputs neurons.

A more efficient way than running nblast(query=x, target=x).

PARAMETER DESCRIPTION
x
        Neuron(s) to NBLAST against each other. Neurons should
        be in microns as NBLAST is optimized for that and have
        similar sampling resolutions.

TYPE: Dotprops | NeuronList

n_cores
        Max number of cores to use for nblasting. Default is
        `os.cpu_count() // 2`. This should ideally be an even
        number as that allows optimally splitting queries onto
        individual processes.

TYPE: int DEFAULT: os.cpu_count() // 2

use_alpha
        Emphasizes neurons' straight parts (backbone) over parts
        that have lots of branches.

TYPE: bool DEFAULT: False

normalized
        Whether to return normalized NBLAST scores.

TYPE: bool DEFAULT: True

smat
        Score matrix/function:
         - If `smat='auto'` (default), will use scoring matrices
           based on flycircuit data. Same behaviour as in R's
           nat.nblast implementation.
         - For `smat='v1'`, uses the analytic formulation of the
           NBLAST scoring from Kohl et. al (2013). You can adjust
           parameter `sigma_scaling` (default to 10) using `smat_kwargs`.
         - For `smat=None` the scores will be generated as the product
           of the distances and the dotproduct of the vectors of
           nearest-neighbor pairs.
         - If function, must consume distance and dot products as
           first and second argument, respectively and return float.

TYPE: str | pd.DataFrame | Callable DEFAULT: 'auto'

limit_dist
        Sets the max distance for the nearest neighbor search
        (`distance_upper_bound`). Typically this should be the
        highest distance considered by the scoring function. If
        "auto", will extract that value from the scoring matrix.
        While this can give a ~2X speed up, it will introduce slight
        inaccuracies because we won't have a vector component for
        points without a nearest neighbour within the distance
        limits. The impact depends on the scoring function but with
        the default FCWB `smat`, this is typically limited to the
        third decimal (0.0086 +/- 0.0027 for an all-by-all of 1k
        neurons).

TYPE: float | "auto" | None DEFAULT: None

approx_nn
        If True, will use approximate nearest neighbors. This gives
        a >2X speed up but also produces only approximate scores.
        Impact depends on the use case - testing highly recommended!

TYPE: bool DEFAULT: False

precision
        Precision for scores. Defaults to 64 bit (double) floats.
        This is useful to reduce the memory footprint for very large
        matrices. In real-world scenarios 32 bit (single)- and
        depending on the purpose even 16 bit (half) - are typically
        sufficient.

TYPE: int [16, 32, 64] | str [e.g. "float64"] | np.dtype DEFAULT: 64

progress
        Whether to show progress bars. This cause may some overhead,
        so switch off if you don't really need it.

TYPE: bool DEFAULT: True

smat_kwargs
        functions.

TYPE: Optional[Dict] DEFAULT: dict()

RETURNS DESCRIPTION
scores

Matrix with NBLAST scores. Rows are query neurons, columns are targets. The order is the same as in x and the labels are based on the neurons' .id property.

TYPE: pandas.DataFrame

References

Costa M, Manton JD, Ostrovsky AD, Prohaska S, Jefferis GS. NBLAST: Rapid, Sensitive Comparison of Neuronal Structure and Construction of Neuron Family Databases. Neuron. 2016 Jul 20;91(2):293-311. doi: 10.1016/j.neuron.2016.06.012.

Examples:

>>> import navis
>>> nl = navis.example_neurons(n=5)
>>> nl.units
<Quantity([8 8 8 8 8], 'nanometer')>
>>> # Convert to microns
>>> nl_um = nl * (8 / 1000)
>>> # Make dotprops
>>> dps = navis.make_dotprops(nl_um)
>>> # Run the nblast
>>> scores = navis.nblast_allbyall(dps)
See Also

navis.nblast For generic query -> target nblasts.

Source code in navis/nbl/nblast_funcs.py
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
def nblast_allbyall(x: NeuronList,
                    normalized: bool = True,
                    use_alpha: bool = False,
                    smat: Optional[Union[str, pd.DataFrame, Callable]] = 'auto',
                    limit_dist: Optional[Union[Literal['auto'], int, float]] = None,
                    approx_nn: bool = False,
                    precision: Union[int, str, np.dtype] = 64,
                    n_cores: int = os.cpu_count() // 2,
                    progress: bool = True,
                    smat_kwargs: Optional[Dict] = dict()) -> pd.DataFrame:
    """All-by-all NBLAST of inputs neurons.

    A more efficient way than running `nblast(query=x, target=x)`.

    Parameters
    ----------
    x :             Dotprops | NeuronList
                    Neuron(s) to NBLAST against each other. Neurons should
                    be in microns as NBLAST is optimized for that and have
                    similar sampling resolutions.
    n_cores :       int, optional
                    Max number of cores to use for nblasting. Default is
                    `os.cpu_count() // 2`. This should ideally be an even
                    number as that allows optimally splitting queries onto
                    individual processes.
    use_alpha :     bool, optional
                    Emphasizes neurons' straight parts (backbone) over parts
                    that have lots of branches.
    normalized :    bool, optional
                    Whether to return normalized NBLAST scores.
    smat :          str | pd.DataFrame | Callable, optional
                    Score matrix/function:
                     - If `smat='auto'` (default), will use scoring matrices
                       based on flycircuit data. Same behaviour as in R's
                       nat.nblast implementation.
                     - For `smat='v1'`, uses the analytic formulation of the
                       NBLAST scoring from Kohl et. al (2013). You can adjust
                       parameter `sigma_scaling` (default to 10) using `smat_kwargs`.
                     - For `smat=None` the scores will be generated as the product
                       of the distances and the dotproduct of the vectors of
                       nearest-neighbor pairs.
                     - If function, must consume distance and dot products as
                       first and second argument, respectively and return float.
    limit_dist :    float | "auto" | None
                    Sets the max distance for the nearest neighbor search
                    (`distance_upper_bound`). Typically this should be the
                    highest distance considered by the scoring function. If
                    "auto", will extract that value from the scoring matrix.
                    While this can give a ~2X speed up, it will introduce slight
                    inaccuracies because we won't have a vector component for
                    points without a nearest neighbour within the distance
                    limits. The impact depends on the scoring function but with
                    the default FCWB `smat`, this is typically limited to the
                    third decimal (0.0086 +/- 0.0027 for an all-by-all of 1k
                    neurons).
    approx_nn :     bool
                    If True, will use approximate nearest neighbors. This gives
                    a >2X speed up but also produces only approximate scores.
                    Impact depends on the use case - testing highly recommended!
    precision :     int [16, 32, 64] | str [e.g. "float64"] | np.dtype
                    Precision for scores. Defaults to 64 bit (double) floats.
                    This is useful to reduce the memory footprint for very large
                    matrices. In real-world scenarios 32 bit (single)- and
                    depending on the purpose even 16 bit (half) - are typically
                    sufficient.
    progress :      bool
                    Whether to show progress bars. This cause may some overhead,
                    so switch off if you don't really need it.
    smat_kwargs:    Dictionary with additional parameters passed to scoring
                    functions.

    Returns
    -------
    scores :        pandas.DataFrame
                    Matrix with NBLAST scores. Rows are query neurons, columns
                    are targets. The order is the same as in `x`
                    and the labels are based on the neurons' `.id` property.

    References
    ----------
    Costa M, Manton JD, Ostrovsky AD, Prohaska S, Jefferis GS. NBLAST: Rapid,
    Sensitive Comparison of Neuronal Structure and Construction of Neuron
    Family Databases. Neuron. 2016 Jul 20;91(2):293-311.
    doi: 10.1016/j.neuron.2016.06.012.

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(n=5)
    >>> nl.units
    <Quantity([8 8 8 8 8], 'nanometer')>
    >>> # Convert to microns
    >>> nl_um = nl * (8 / 1000)
    >>> # Make dotprops
    >>> dps = navis.make_dotprops(nl_um)
    >>> # Run the nblast
    >>> scores = navis.nblast_allbyall(dps)

    See Also
    --------
    [`navis.nblast`][]
                For generic query -> target nblasts.

    """
    # Check if pykdtree flag needed to be set
    #if n_cores and n_cores > 1:
    #    check_pykdtree_flag()

    # Make sure we're working on NeuronLists
    dps = NeuronList(x)

    # Run NBLAST preflight checks
    # Note that we are passing the same dotprops twice to avoid having to
    # change the function's signature. Should have little to no overhead.
    nblast_preflight(dps, dps, n_cores,
                     req_unique_ids=True,
                     req_microns=isinstance(smat, str) and smat=='auto')

    # Find a partition that produces batches that each run in approximately
    # 10 seconds
    if n_cores and n_cores > 1:
        if progress:
            # If progress bar, we need to make smaller mini batches.
            # These mini jobs must not be too small - otherwise the overhead
            # from spawning and sending results between processes slows things
            # down dramatically. Hence we want to make sure that each job runs
            # for >10s. The run time depends on the system and how big the neurons
            # are. Here, we run a quick test and try to extrapolate from there:
            n_rows, n_cols = find_batch_partition(dps, dps,
                                                  T=10 * JOB_SIZE_MULTIPLIER)
        else:
            # If no progress bar needed, we can just split neurons evenly across
            # all available cores
            n_rows, n_cols = find_optimal_partition(n_cores, dps, dps)
    else:
        n_rows = n_cols = 1

    # Calculate self-hits once for all neurons
    nb = NBlaster(use_alpha=use_alpha,
                  normalized=normalized,
                  smat=smat,
                  limit_dist=limit_dist,
                  dtype=precision,
                  approx_nn=approx_nn,
                  progress=progress,
                  smat_kwargs=smat_kwargs)
    self_hits = np.array([nb.calc_self_hit(n) for n in dps])

    # This makes sure we don't run into multiple layers of concurrency
    with set_omp_flag(limits=OMP_NUM_THREADS_LIMIT if n_cores and (n_cores > 1) else None):
        # Initialize a pool of workers
        # Note that we're forcing "spawn" instead of "fork" (default on linux)!
        # This is to reduce the memory footprint since "fork" appears to inherit all
        # variables (including all neurons) while "spawn" appears to get only
        # what's required to run the job?
        with ProcessPoolExecutor(max_workers=n_cores,
                                 mp_context=mp.get_context('spawn')) as pool:
            with config.tqdm(desc='Preparing',
                             total=n_rows * n_cols,
                             leave=False,
                             disable=not progress) as pbar:
                futures = {}
                nblasters = []
                for qix in np.array_split(np.arange(len(dps)), n_rows):
                    for tix in np.array_split(np.arange(len(dps)), n_cols):
                        # Initialize NBlaster
                        this = NBlaster(use_alpha=use_alpha,
                                        normalized=normalized,
                                        smat=smat,
                                        limit_dist=limit_dist,
                                        dtype=precision,
                                        approx_nn=approx_nn,
                                        progress=progress,
                                        smat_kwargs=smat_kwargs)

                        # Make sure we don't add the same neuron twice
                        # Map indices to neurons
                        to_add = list(set(qix) | set(tix))

                        # Add neurons
                        ixmap = {}
                        for i, ix in enumerate(to_add):
                            this.append(dps[ix], self_hits[ix])
                            ixmap[ix] = i

                        # Keep track of indices of queries and targets
                        this.queries = [ixmap[ix] for ix in qix]
                        this.targets = [ixmap[ix] for ix in tix]
                        this.queries_ix = qix  # this facilitates filling in the big matrix later
                        this.targets_ix = tix  # this facilitates filling in the big matrix later
                        this.pbar_position = len(nblasters) if not utils.is_jupyter() else None

                        nblasters.append(this)
                        pbar.update()

                        # If multiple cores requested, submit job to the pool right away
                        if n_cores and n_cores > 1 and (n_cols > 1 or n_rows > 1):
                            this.progress=False  # no progress bar for individual NBLASTERs
                            futures[pool.submit(this.multi_query_target,
                                                q_idx=this.queries,
                                                t_idx=this.targets,
                                                scores='forward')] = this

            # Collect results
            if futures and len(futures) > 1:
                # Prepare empty score matrix
                scores = pd.DataFrame(np.empty((len(dps), len(dps)),
                                               dtype=this.dtype),
                                      index=dps.id, columns=dps.id)
                scores.index.name = 'query'
                scores.columns.name = 'target'

                # Collect results
                # We're dropping the "N / N_total" bit from the progress bar because
                # it's not helpful here
                fmt = ('{desc}: {percentage:3.0f}%|{bar}| [{elapsed}<{remaining}]')
                for f in config.tqdm(as_completed(futures),
                                     desc='NBLASTing',
                                     bar_format=fmt,
                                     total=len(futures),
                                     smoothing=0,
                                     disable=not progress,
                                     leave=False):
                    res = f.result()
                    this = futures[f]
                    # Fill-in big score matrix
                    scores.iloc[this.queries_ix, this.targets_ix] = res.values
            else:
                scores = this.all_by_all()

    return scores

Smart(er) NBLAST query against target neurons.

In contrast to navis.nblast, this function will first run a "pre-NBLAST" in which only 10% of the query dotprops' points are used. Using those initial scores we select, for each query, the highest scoring targets and run the full NBLAST only on those query-target pairs (see t and criterion for fine-tuning).

PARAMETER DESCRIPTION
query
        Query neuron(s) to NBLAST against the targets. Neurons
        should be in microns as NBLAST is optimized for that and
        have similar sampling resolutions.

TYPE: Dotprops | NeuronList

target
        Target neuron(s) to NBLAST against. Neurons should be in
        microns as NBLAST is optimized for that and have
        similar sampling resolutions. If not provided, will NBLAST
        queries against themselves.

TYPE: Dotprops | NeuronList DEFAULT: None

t
        Determines for which pairs we will run a full NBLAST. See
        `criterion` parameter for details.

TYPE: int | float DEFAULT: 90

criterion
        Criterion for selecting query-target pairs for full NBLAST:
          - "percentile" runs full NBLAST on the `t`-th percentile
          - "score" runs full NBLAST on all scores above `t`
          - "N" runs full NBLAST on top `t` targets

TYPE: "percentile" | "score" | "N" DEFAULT: 'percentile'

return_mask
        If True, will also return a boolean mask that shows which
        scores are based on a full NBLAST and which ones only on
        the pre-NBLAST.

TYPE: bool DEFAULT: False

scores
        Determines the final scores:
          - 'forward' (default) returns query->target scores
          - 'mean' returns the mean of query->target and
            target->query scores
          - 'min' returns the minium between query->target and
            target->query scores
          - 'max' returns the maximum between query->target and
            target->query scores

TYPE: 'forward' | 'mean' | 'min' | 'max' DEFAULT: 'forward'

use_alpha
        Emphasizes neurons' straight parts (backbone) over parts
        that have lots of branches.

TYPE: bool DEFAULT: False

normalized
        Whether to return normalized NBLAST scores.

TYPE: bool DEFAULT: True

smat
        Score matrix. If 'auto' (default), will use scoring matrices
        from FCWB. Same behaviour as in R's nat.nblast
        implementation.
        If `smat='v1'` it uses the analytic formulation of the
        NBLAST scoring from Kohl et. al (2013). You can adjust parameter
        `sigma_scaling` (default to 10) using `smat_kwargs`.
        If `smat=None` the scores will be
        generated as the product of the distances and the dotproduct
        of the vectors of nearest-neighbor pairs.
        If `Callable` given, it passes distance and dot products as
        first and second argument respectively.

TYPE: str | pd.DataFrame | Callable DEFAULT: 'auto'

smat_kwargs
        functions.

TYPE: Optional[Dict] DEFAULT: dict()

limit_dist
        Sets the max distance for the nearest neighbor search
        (`distance_upper_bound`). Typically this should be the
        highest distance considered by the scoring function. If
        "auto", will extract that value from the scoring matrix.
        While this can give a ~2X speed up, it will introduce slight
        inaccuracies because we won't have a vector component for
        points without a nearest neighbour within the distance
        limits. The impact depends on the scoring function but with
        the default FCWB `smat`, this is typically limited to the
        third decimal (0.0086 +/- 0.0027 for an all-by-all of 1k
        neurons).

TYPE: float | "auto" | None DEFAULT: 'auto'

approx_nn
        If True, will use approximate nearest neighbors. This gives
        a >2X speed up but also produces only approximate scores.
        Impact depends on the use case - testing highly recommended!

TYPE: bool DEFAULT: False

precision
        Precision for scores. Defaults to 64 bit (double) floats.
        This is useful to reduce the memory footprint for very large
        matrices. In real-world scenarios 32 bit (single)- and
        depending on the purpose even 16 bit (half) - are typically
        sufficient.

TYPE: int [16, 32, 64] | str [e.g. "float64"] | np.dtype DEFAULT: 64

n_cores
        Max number of cores to use for nblasting. Default is
        `os.cpu_count() // 2`. This should ideally be an even
        number as that allows optimally splitting queries onto
        individual processes.

TYPE: int DEFAULT: os.cpu_count() // 2

progress
        Whether to show progress bars.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
scores

Matrix with NBLAST scores. Rows are query neurons, columns are targets. The order is the same as in query/target and the labels are based on the neurons' .id property.

TYPE: pandas.DataFrame

mask

Only if return_mask=True: a boolean mask with same shape as scores that shows which scores are based on a full NBLAST and which ones only on the pre-NBLAST.

TYPE: np.ndarray

References

Costa M, Manton JD, Ostrovsky AD, Prohaska S, Jefferis GS. NBLAST: Rapid, Sensitive Comparison of Neuronal Structure and Construction of Neuron Family Databases. Neuron. 2016 Jul 20;91(2):293-311. doi: 10.1016/j.neuron.2016.06.012.

Examples:

>>> import navis
>>> nl = navis.example_neurons(n=5)
>>> nl.units
<Quantity([8 8 8 8 8], 'nanometer')>
>>> # Convert to microns
>>> nl_um = nl * (8 / 1000)
>>> # Convert to dotprops
>>> dps = navis.make_dotprops(nl_um)
>>> # Run a NBLAST where only the top target from the pre-NBLAST is run
>>> # through a full NBLAST
>>> scores = navis.nblast_smart(dps[:3], dps[3:], t=1, criterion='N')
See Also

navis.nblast The conventional full NBLAST. navis.nblast_allbyall A more efficient way than nblast(query=x, target=x). navis.synblast A synapse-based variant of NBLAST.

Source code in navis/nbl/nblast_funcs.py
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
def nblast_smart(query: Union[Dotprops, NeuronList],
                 target: Optional[str] = None,
                 t: Union[int, float] = 90,
                 criterion: Union[Literal['percentile'],
                                  Literal['score'],
                                  Literal['N']] = 'percentile',
                 scores: Union[Literal['forward'],
                               Literal['mean'],
                               Literal['min'],
                               Literal['max']] = 'forward',
                 return_mask: bool = False,
                 normalized: bool = True,
                 use_alpha: bool = False,
                 smat: Optional[Union[str, pd.DataFrame]] = 'auto',
                 limit_dist: Optional[Union[Literal['auto'], int, float]] = 'auto',
                 approx_nn: bool = False,
                 precision: Union[int, str, np.dtype] = 64,
                 n_cores: int = os.cpu_count() // 2,
                 progress: bool = True,
                 smat_kwargs: Optional[Dict] = dict()) -> pd.DataFrame:
    """Smart(er) NBLAST query against target neurons.

    In contrast to [`navis.nblast`][], this function will first run a
    "pre-NBLAST" in which only 10% of the query dotprops' points are used.
    Using those initial scores we select, for each query, the highest scoring
    targets and run the full NBLAST only on those query-target pairs (see
    `t` and `criterion` for fine-tuning).

    Parameters
    ----------
    query :         Dotprops | NeuronList
                    Query neuron(s) to NBLAST against the targets. Neurons
                    should be in microns as NBLAST is optimized for that and
                    have similar sampling resolutions.
    target :        Dotprops | NeuronList, optional
                    Target neuron(s) to NBLAST against. Neurons should be in
                    microns as NBLAST is optimized for that and have
                    similar sampling resolutions. If not provided, will NBLAST
                    queries against themselves.
    t :             int | float
                    Determines for which pairs we will run a full NBLAST. See
                    `criterion` parameter for details.
    criterion :     "percentile" | "score" | "N"
                    Criterion for selecting query-target pairs for full NBLAST:
                      - "percentile" runs full NBLAST on the `t`-th percentile
                      - "score" runs full NBLAST on all scores above `t`
                      - "N" runs full NBLAST on top `t` targets
    return_mask :   bool
                    If True, will also return a boolean mask that shows which
                    scores are based on a full NBLAST and which ones only on
                    the pre-NBLAST.
    scores :        'forward' | 'mean' | 'min' | 'max'
                    Determines the final scores:
                      - 'forward' (default) returns query->target scores
                      - 'mean' returns the mean of query->target and
                        target->query scores
                      - 'min' returns the minium between query->target and
                        target->query scores
                      - 'max' returns the maximum between query->target and
                        target->query scores
    use_alpha :     bool, optional
                    Emphasizes neurons' straight parts (backbone) over parts
                    that have lots of branches.
    normalized :    bool, optional
                    Whether to return normalized NBLAST scores.
    smat :          str | pd.DataFrame | Callable
                    Score matrix. If 'auto' (default), will use scoring matrices
                    from FCWB. Same behaviour as in R's nat.nblast
                    implementation.
                    If `smat='v1'` it uses the analytic formulation of the
                    NBLAST scoring from Kohl et. al (2013). You can adjust parameter
                    `sigma_scaling` (default to 10) using `smat_kwargs`.
                    If `smat=None` the scores will be
                    generated as the product of the distances and the dotproduct
                    of the vectors of nearest-neighbor pairs.
                    If `Callable` given, it passes distance and dot products as
                    first and second argument respectively.
    smat_kwargs:    Dictionary with additional parameters passed to scoring
                    functions.
    limit_dist :    float | "auto" | None
                    Sets the max distance for the nearest neighbor search
                    (`distance_upper_bound`). Typically this should be the
                    highest distance considered by the scoring function. If
                    "auto", will extract that value from the scoring matrix.
                    While this can give a ~2X speed up, it will introduce slight
                    inaccuracies because we won't have a vector component for
                    points without a nearest neighbour within the distance
                    limits. The impact depends on the scoring function but with
                    the default FCWB `smat`, this is typically limited to the
                    third decimal (0.0086 +/- 0.0027 for an all-by-all of 1k
                    neurons).
    approx_nn :     bool
                    If True, will use approximate nearest neighbors. This gives
                    a >2X speed up but also produces only approximate scores.
                    Impact depends on the use case - testing highly recommended!
    precision :     int [16, 32, 64] | str [e.g. "float64"] | np.dtype
                    Precision for scores. Defaults to 64 bit (double) floats.
                    This is useful to reduce the memory footprint for very large
                    matrices. In real-world scenarios 32 bit (single)- and
                    depending on the purpose even 16 bit (half) - are typically
                    sufficient.
    n_cores :       int, optional
                    Max number of cores to use for nblasting. Default is
                    `os.cpu_count() // 2`. This should ideally be an even
                    number as that allows optimally splitting queries onto
                    individual processes.
    progress :      bool
                    Whether to show progress bars.

    Returns
    -------
    scores :        pandas.DataFrame
                    Matrix with NBLAST scores. Rows are query neurons, columns
                    are targets. The order is the same as in `query`/`target`
                    and the labels are based on the neurons' `.id` property.
    mask :          np.ndarray
                    Only if `return_mask=True`: a boolean mask with same shape
                    as `scores` that shows which scores are based on a full
                    NBLAST and which ones only on the pre-NBLAST.

    References
    ----------
    Costa M, Manton JD, Ostrovsky AD, Prohaska S, Jefferis GS. NBLAST: Rapid,
    Sensitive Comparison of Neuronal Structure and Construction of Neuron
    Family Databases. Neuron. 2016 Jul 20;91(2):293-311.
    doi: 10.1016/j.neuron.2016.06.012.

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(n=5)
    >>> nl.units
    <Quantity([8 8 8 8 8], 'nanometer')>
    >>> # Convert to microns
    >>> nl_um = nl * (8 / 1000)
    >>> # Convert to dotprops
    >>> dps = navis.make_dotprops(nl_um)
    >>> # Run a NBLAST where only the top target from the pre-NBLAST is run
    >>> # through a full NBLAST
    >>> scores = navis.nblast_smart(dps[:3], dps[3:], t=1, criterion='N')

    See Also
    --------
    [`navis.nblast`][]
                The conventional full NBLAST.
    [`navis.nblast_allbyall`][]
                A more efficient way than `nblast(query=x, target=x)`.
    [`navis.synblast`][]
                A synapse-based variant of NBLAST.

    """
    utils.eval_param(criterion, name='criterion',
                     allowed_values=("percentile", "score", "N"))
    utils.eval_param(scores, name='scores', allowed_values=ALLOWED_SCORES)

    # We will make a couple tweaks for speed things up if this is
    # an all-by-all NBLAST
    aba = False
    pre_scores = scores
    if isinstance(target, type(None)):
        target = query
        aba = True
        # For all-by-all's we can compute only forward scores and
        # produce the mean later
        if scores == 'mean':
            pre_scores = 'forward'

    try:
        t = int(t)
    except BaseException:
        raise TypeError(f'`t` must be (convertable to) integer - got "{type(t)}"')

    if criterion == 'percentile':
        if (t <= 0 or t >= 100):
            raise ValueError('Expected `t` to be integer between 0 and 100 for '
                             f'criterion "percentile", got {t}')
    elif criterion == 'N':
        if (t < 0 or t > len(target)):
            raise ValueError('`t` must be between 0 and the total number of '
                             f'targets ({len(target)}) for criterion "N", '
                             f'got {t}')

    # Make sure we're working on NeuronLists
    query_dps = NeuronList(query)
    target_dps = NeuronList(target)

    # Run NBLAST preflight checks
    nblast_preflight(query_dps, target_dps, n_cores,
                     req_unique_ids=True,
                     req_microns=isinstance(smat, str) and smat=='auto')

    # Make simplified dotprops
    query_dps_simp = query_dps.downsample(10, inplace=False)
    if not aba:
        target_dps_simp = target_dps.downsample(10, inplace=False)
    else:
        target_dps_simp = query_dps_simp

    # Find a partition that produces batches that each run in approximately
    # 10 seconds
    if n_cores and n_cores > 1:
        if progress:
            # If progress bar, we need to make smaller mini batches.
            # These mini jobs must not be too small - otherwise the overhead
            # from spawning and sending results between processes slows things
            # down dramatically. Hence we want to make sure that each job runs
            # for >10s. The run time depends on the system and how big the neurons
            # are. Here, we run a quick test and try to extrapolate from there
            n_rows, n_cols = find_batch_partition(query_dps_simp, target_dps_simp,
                                                  T=10 * JOB_SIZE_MULTIPLIER)
        else:
            # If no progress bar needed, we can just split neurons evenly across
            # all available cores
            n_rows, n_cols = find_optimal_partition(n_cores, query_dps_simp, target_dps_simp)
    else:
        n_rows = n_cols = 1

    # Calculate self-hits once for all neurons
    nb = NBlaster(use_alpha=use_alpha,
                  normalized=normalized,
                  smat=smat,
                  limit_dist=limit_dist,
                  dtype=precision,
                  approx_nn=approx_nn,
                  progress=progress,
                  smat_kwargs=smat_kwargs)
    query_self_hits = np.array([nb.calc_self_hit(n) for n in query_dps_simp])
    target_self_hits = np.array([nb.calc_self_hit(n) for n in target_dps_simp])

    # This makes sure we don't run into multiple layers of concurrency
    with set_omp_flag(limits=OMP_NUM_THREADS_LIMIT if n_cores and (n_cores > 1) else None):
        # Initialize a pool of workers
        # Note that we're forcing "spawn" instead of "fork" (default on linux)!
        # This is to reduce the memory footprint since "fork" appears to inherit all
        # variables (including all neurons) while "spawn" appears to get only
        # what's required to run the job?
        with ProcessPoolExecutor(max_workers=n_cores,
                                 mp_context=mp.get_context('spawn')) as pool:
            with config.tqdm(desc='Prep. pre-NBLAST',
                             total=n_rows * n_cols,
                             leave=False,
                             disable=not progress) as pbar:
                futures = {}
                nblasters = []
                for qix in np.array_split(np.arange(len(query_dps_simp)), n_rows):
                    for tix in np.array_split(np.arange(len(target_dps_simp)), n_cols):
                        # Initialize NBlaster
                        this = NBlaster(use_alpha=use_alpha,
                                        normalized=normalized,
                                        smat=smat,
                                        limit_dist=limit_dist,
                                        dtype=precision,
                                        approx_nn=approx_nn,
                                        progress=progress,
                                        smat_kwargs=smat_kwargs)

                        # Add queries and targets
                        for i, ix in enumerate(qix):
                            this.append(query_dps_simp[ix], query_self_hits[ix])
                        for i, ix in enumerate(tix):
                            this.append(target_dps_simp[ix], target_self_hits[ix])

                        # Keep track of indices of queries and targets
                        this.queries = np.arange(len(qix))
                        this.targets = np.arange(len(tix)) + len(qix)
                        this.queries_ix = qix  # this facilitates filling in the big matrix later
                        this.targets_ix = tix  # this facilitates filling in the big matrix later
                        this.pbar_position = len(nblasters) if not utils.is_jupyter() else None

                        nblasters.append(this)
                        pbar.update()

                        # If multiple cores requested, submit job to the pool right away
                        if n_cores and n_cores > 1 and (n_cols > 1 or n_rows > 1):
                            this.progress=False  # no progress bar for individual NBLASTERs
                            futures[pool.submit(this.multi_query_target,
                                                q_idx=this.queries,
                                                t_idx=this.targets,
                                                scores=pre_scores)] = this

            # Collect results
            if futures and len(futures) > 1:
                # Prepare empty score matrix
                scr = pd.DataFrame(np.empty((len(query_dps_simp),
                                             len(target_dps_simp)),
                                            dtype=this.dtype),
                                      index=query_dps_simp.id,
                                      columns=target_dps_simp.id)
                scr.index.name = 'query'
                scr.columns.name = 'target'

                # Collect results
                # We're dropping the "N / N_total" bit from the progress bar because
                # it's not helpful here
                fmt = ('{desc}: {percentage:3.0f}%|{bar}| [{elapsed}<{remaining}]')
                for f in config.tqdm(as_completed(futures),
                                     desc='Pre-NBLASTs',
                                     bar_format=fmt,
                                     total=len(futures),
                                     smoothing=0,
                                     disable=not progress,
                                     leave=False):
                    res = f.result()
                    this = futures[f]
                    # Fill-in big score matrix
                    scr.iloc[this.queries_ix, this.targets_ix] = res.values
            else:
                scr = this.multi_query_target(this.queries,
                                              this.targets,
                                              scores=scores)

    # If this is an all-by-all and we would have computed only forward scores
    # during pre-NBLAST
    if aba and scores == 'mean':
        scr = (scr + scr.T.values) / 2

    # Now select targets of interest for each query
    if criterion == 'percentile':
        # Generate a mask for the scores we want to recalculate from full dotprops
        sel = np.percentile(scr, q=t, axis=1)
        mask = scr >= sel.reshape(-1, 1)
    elif criterion == 'score':
        # Generate a mask for the scores we want to recalculate from full dotprops
        sel = np.full(scr.shape[0], fill_value=t)
        mask = scr >= sel.reshape(-1, 1)
    else:
        # Sort such that the top hit is to the left
        srt = np.argsort(scr.values, axis=1)[:, ::-1]
        # Generate the mask
        mask = pd.DataFrame(np.zeros(scr.shape, dtype=bool),
                            columns=scr.columns, index=scr.index)
        _ = np.arange(mask.shape[0])
        for N in range(t):
            mask.values[_, srt[:, N]] = True

    # Calculate self-hits for full neurons
    query_self_hits = np.array([nb.calc_self_hit(n) for n in query_dps])
    target_self_hits = np.array([nb.calc_self_hit(n) for n in target_dps])

    # This makes sure we don't run into multiple layers of concurrency
    with set_omp_flag(limits=OMP_NUM_THREADS_LIMIT if n_cores and (n_cores > 1) else None):
        # Initialize a pool of workers
        # Note that we're forcing "spawn" instead of "fork" (default on linux)!
        # This is to reduce the memory footprint since "fork" appears to inherit all
        # variables (including all neurons) while "spawn" appears to get only
        # what's required to run the job?
        with ProcessPoolExecutor(max_workers=n_cores,
                                 mp_context=mp.get_context('spawn')) as pool:
            with config.tqdm(desc='Prep. full NBLAST',
                             total=n_rows * n_cols,
                             leave=False,
                             disable=not progress) as pbar:
                futures = {}
                nblasters = []
                for qix in np.array_split(np.arange(len(query_dps)), n_rows):
                    for tix in np.array_split(np.arange(len(target_dps)), n_cols):
                        # Initialize NBlaster
                        this = NBlaster(use_alpha=use_alpha,
                                        normalized=normalized,
                                        smat=smat,
                                        limit_dist=limit_dist,
                                        dtype=precision,
                                        approx_nn=approx_nn,
                                        progress=progress,
                                        smat_kwargs=smat_kwargs)
                        # Add queries and targets
                        for i, ix in enumerate(qix):
                            this.append(query_dps[ix], query_self_hits[ix])
                        for i, ix in enumerate(tix):
                            this.append(target_dps[ix], target_self_hits[ix])

                        # Find the pairs to NBLAST in this part of the matrix
                        submask = mask.loc[query_dps[qix].id,
                                           target_dps[tix].id]
                        # `pairs` is an array of `[[query, target], [...]]` pairs
                        this.pairs = np.vstack(np.where(submask)).T

                        # Offset the query indices
                        this.pairs[:, 1] += len(qix)

                        # Track this NBLASTER's mask relative to the original big one
                        this.mask = np.zeros(mask.shape, dtype=bool)
                        this.mask[qix[0]:qix[-1]+1, tix[0]:tix[-1]+1] = submask

                        # Make sure position of progress bar checks out
                        this.pbar_position = len(nblasters) if not utils.is_jupyter() else None
                        this.desc = 'Full NBLAST'

                        nblasters.append(this)
                        pbar.update()

                        # If multiple cores requested, submit job to the pool right away
                        if n_cores and n_cores > 1 and (n_cols > 1 or n_rows > 1):
                            this.progress=False  # no progress bar for individual NBLASTERs
                            futures[pool.submit(this.pair_query_target,
                                                pairs=this.pairs,
                                                scores=scores)] = this

            # Collect results
            if futures and len(futures) > 1:
                # Collect results
                # We're dropping the "N / N_total" bit from the progress bar because
                # it's not helpful here
                fmt = ('{desc}: {percentage:3.0f}%|{bar}| [{elapsed}<{remaining}]')
                for f in config.tqdm(as_completed(futures),
                                     desc='NBLASTing',
                                     bar_format=fmt,
                                     total=len(futures),
                                     smoothing=0,
                                     disable=not progress,
                                     leave=False):
                    res = f.result()
                    this = futures[f]

                    # Fill-in big score matrix
                    scr[this.mask] = res
            else:
                scr[mask] = this.pair_query_target(this.pairs, scores=scores)

    if return_mask:
        return scr, mask

    return scr

Generate iGraph graph from edge list or adjacency.

Requires iGraph to be installed.

PARAMETER DESCRIPTION
x
            Connectivity information:

             1. List of edges (columns: 'source', 'target', 'weight')
             2. Adjacency matrix (pd.DataFrame, rows=sources,
                columns=targets)

TYPE: pandas.DataFrame | np.array

threshold
            Connections weaker than this will be excluded.

TYPE: float | int DEFAULT: None

RETURNS DESCRIPTION
igraph.Graph(directed=True)

iGraph representation of the network.

Source code in navis/graph/converters.py
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
def network2igraph(
    x: Union[pd.DataFrame, Iterable], threshold: Optional[float] = None
) -> "igraph.Graph":
    """Generate iGraph graph from edge list or adjacency.

    Requires iGraph to be installed.

    Parameters
    ----------
    x :                 pandas.DataFrame | np.array
                        Connectivity information:

                         1. List of edges (columns: 'source', 'target', 'weight')
                         2. Adjacency matrix (pd.DataFrame, rows=sources,
                            columns=targets)

    threshold :         float | int, optional
                        Connections weaker than this will be excluded.

    Returns
    -------
    igraph.Graph(directed=True)
                        iGraph representation of the network.

    """
    if igraph is None:
        raise ModuleNotFoundError("igraph must be installed to use this function.")

    if isinstance(x, pd.DataFrame):
        present = [c in x.columns for c in ["source", "target", "weight"]]
        if all(present):
            edges = x[["source", "target", "weight"]].values
        else:
            edges = (
                x.reset_index(inplace=False, drop=False)
                .melt(id_vars="index", inplace=False)
                .values
            )
    elif isinstance(x, (list, np.ndarray)):
        edges = np.array(x)
    else:
        raise TypeError(f'Expected numpy array or pandas DataFrame, got "{type(x)}"')

    if edges.ndim != 2 or edges.shape[1] != 3:
        raise ValueError(
            "Edges must be (N, 3) array containing source, " "target, weight"
        )

    if not isinstance(threshold, (type(None), bool)):
        edges = edges[edges[:, 2] >= threshold]

    names = list(set(np.array(edges)[:, 0]) | set(np.array(edges)[:, 1]))

    edges_by_index = [[names.index(e[0]), names.index(e[1])] for e in edges]

    # Generate igraph and assign custom properties
    g = igraph.Graph(directed=True)
    g.add_vertices(len(names))
    g.add_edges(edges_by_index)

    g.vs["node_id"] = names
    # g.vs['neuron_name'] = g.vs['label'] = neuron_names
    g.es["weight"] = edges[:, 2]

    return g

Generate NetworkX graph from edge list or adjacency.

PARAMETER DESCRIPTION
x
            Connectivity information:

             1. List of edges (columns: 'source', 'target', 'weight')
             2. Adjacency matrix (pd.DataFrame, rows=sources,
                columns=targets)

TYPE: pandas.DataFrame

threshold
            Connections weaker than this will be excluded.

TYPE: float | int DEFAULT: None

group_by
            Provide a dictionary `{group_name: [skid1, skid2, ...]}`
            to collapse sets of nodes into groups.

TYPE: None | dict DEFAULT: None

RETURNS DESCRIPTION
networkx.DiGraph

NetworkX representation of the network.

Source code in navis/graph/converters.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
def network2nx(
    x: Union[pd.DataFrame, Iterable],
    threshold: Optional[float] = None,
    group_by: Union[dict, None] = None,
) -> nx.DiGraph:
    """Generate NetworkX graph from edge list or adjacency.

    Parameters
    ----------
    x :                 pandas.DataFrame
                        Connectivity information:

                         1. List of edges (columns: 'source', 'target', 'weight')
                         2. Adjacency matrix (pd.DataFrame, rows=sources,
                            columns=targets)

    threshold :         float | int, optional
                        Connections weaker than this will be excluded.
    group_by :          None | dict, optional
                        Provide a dictionary `{group_name: [skid1, skid2, ...]}`
                        to collapse sets of nodes into groups.

    Returns
    -------
    networkx.DiGraph
                        NetworkX representation of the network.

    """
    if isinstance(x, pd.DataFrame):
        present = [c in x.columns for c in ["source", "target", "weight"]]
        if all(present):
            edges = x[["source", "target", "weight"]].values
        else:
            # Assume it's an adjacency matrix
            ix_name = x.index.name if x.index.name else "index"
            edges = (
                x.reset_index(inplace=False, drop=False).melt(id_vars=ix_name).values
            )
    elif isinstance(x, (list, np.ndarray)):
        edges = np.array(x)
    else:
        raise TypeError(f'Expected numpy array or pandas DataFrame, got "{type(x)}"')

    if edges.ndim != 2 or edges.shape[1] != 3:
        raise ValueError(
            "Edges must be (N, 3) array containing source, " "target, weight"
        )

    if not isinstance(threshold, (type(None), bool)):
        edges = edges[edges[:, 2] >= threshold]

    # Generate graph and assign custom properties
    g = nx.DiGraph()
    g.add_weighted_edges_from(edges)

    # Group nodes
    if group_by:
        for n, skids in group_by.items():
            # First collapse all nodes into the first of each group
            for s in skids[1:]:
                g = nx.contracted_nodes(g, str(skids[0]), str(s))
            # Now relabel the first node
            g = nx.relabel_nodes(g, {str(skids[0]): str(n)})
            g.nodes[str(n)]["neuron_name"] = str(n)

    return g

Turn neuron into scipy KDTree.

PARAMETER DESCRIPTION
x
    A single neuron to turn into a KDTree.

TYPE: TreeNeuron | MeshNeuron | VoxelNeuron | Dotprops

tree_type
    Type of KDTree:
      1. `'c'` = `scipy.spatial.cKDTree` (faster)
      2. `'normal'` = `scipy.spatial.KDTree` (more functions)

TYPE: c | normal DEFAULT: 'c'

data
    Data used to generate tree. "auto" will pick the core data
    depending on neuron type: `nodes`, `vertices`, `voxels` and
    `points` for TreeNeuron, MeshNeuron, VoxelNeuron and Dotprops,
    respectively. Other values (e.g. "connectors" or "nodes") must
    map to a neuron property that is either (N, 3) array or
    DataFrame with x/y/z columns.

TYPE: 'auto' | str DEFAULT: 'auto'

**kwargs
    Keyword arguments passed at KDTree initialization.

DEFAULT: {}

RETURNS DESCRIPTION
`scipy.spatial.cKDTree` or `scipy.spatial.KDTree`
Source code in navis/graph/converters.py
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
def neuron2KDTree(
    x: "core.NeuronObject", tree_type: str = "c", data: str = "auto", **kwargs
) -> Union[scipy.spatial.cKDTree, scipy.spatial.KDTree]:
    """Turn neuron into scipy KDTree.

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | VoxelNeuron | Dotprops
                A single neuron to turn into a KDTree.
    tree_type : 'c' | 'normal'
                Type of KDTree:
                  1. `'c'` = `scipy.spatial.cKDTree` (faster)
                  2. `'normal'` = `scipy.spatial.KDTree` (more functions)
    data :      'auto' | str
                Data used to generate tree. "auto" will pick the core data
                depending on neuron type: `nodes`, `vertices`, `voxels` and
                `points` for TreeNeuron, MeshNeuron, VoxelNeuron and Dotprops,
                respectively. Other values (e.g. "connectors" or "nodes") must
                map to a neuron property that is either (N, 3) array or
                DataFrame with x/y/z columns.
    **kwargs
                Keyword arguments passed at KDTree initialization.


    Returns
    -------
    `scipy.spatial.cKDTree` or `scipy.spatial.KDTree`

    """
    if tree_type not in ["c", "normal"]:
        raise ValueError('"tree_type" needs to be either "c" or "normal"')

    if isinstance(x, core.NeuronList):
        if len(x) == 1:
            x = x[0]
        else:
            raise ValueError("Need a single TreeNeuron")
    elif not isinstance(x, core.BaseNeuron):
        raise TypeError(f'Need Neuron, got "{type(x)}"')

    if data == "auto":
        if isinstance(x, core.TreeNeuron):
            data = "nodes"
        if isinstance(x, core.MeshNeuron):
            data = "vertices"
        if isinstance(x, core.VoxelNeuron):
            data = "voxels"
        if isinstance(x, core.Dotprops):
            data = "points"

    if not hasattr(x, data):
        raise ValueError(f"Neuron does not have a {data} property")

    data = getattr(x, data)

    if isinstance(data, pd.DataFrame):
        if not all(np.isin(["x", "y", "z"], data.columns)):
            raise ValueError(
                f'"{data}" DataFrame must contain "x", "y" and ' '"z" columns.'
            )
        data = data[["x", "y", "z"]].values

    if not isinstance(data, np.ndarray) or data.ndim != 2 or data.shape[1] != 3:
        raise ValueError(
            f'"{data}" must be DataFrame or (N, 3) array, got {type(data)}'
        )

    if tree_type == "c":
        return scipy.spatial.cKDTree(data=data, **kwargs)
    else:
        return scipy.spatial.KDTree(data=data, **kwargs)

Turn Tree-, Mesh- or VoxelNeuron(s) into an iGraph graph.

Requires iGraph to be installed.

PARAMETER DESCRIPTION
x
                Neuron(s) to convert.

TYPE: TreeNeuron | MeshNeuron | VoxelNeuron | NeuronList

simplify
                For TreeNeurons only: simplify the graph by keeping only roots,
                leaves and branching points. Preserves the original branch
                lengths (i.e. weights).

TYPE: bool DEFAULT: False

connectivity
                For VoxelNeurons only. Defines the connectedness:
                 - 6 = faces
                 - 18 = faces + edges
                 - 26 = faces + edges + vertices

TYPE: 6 | 18 | 26 DEFAULT: 18

raise_not_installed
                If False and igraph is not installed will silently
                return `None`.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
igraph.Graph

Representation of the neuron. Returns list of graphs if x is multiple neurons. Directed for TreeNeurons, undirected for MeshNeurons.

None

If igraph not installed.

Source code in navis/graph/converters.py
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
def neuron2igraph(
    x: "core.NeuronObject",
    simplify: bool = False,
    connectivity: int = 18,
    raise_not_installed: bool = True,
) -> "igraph.Graph":
    """Turn Tree-, Mesh- or VoxelNeuron(s) into an iGraph graph.

    Requires iGraph to be installed.

    Parameters
    ----------
    x :                     TreeNeuron | MeshNeuron | VoxelNeuron | NeuronList
                            Neuron(s) to convert.
    simplify :              bool
                            For TreeNeurons only: simplify the graph by keeping only roots,
                            leaves and branching points. Preserves the original branch
                            lengths (i.e. weights).
    connectivity :          6 | 18 | 26
                            For VoxelNeurons only. Defines the connectedness:
                             - 6 = faces
                             - 18 = faces + edges
                             - 26 = faces + edges + vertices
    raise_not_installed :   bool
                            If False and igraph is not installed will silently
                            return `None`.

    Returns
    -------
    igraph.Graph
                Representation of the neuron. Returns list of graphs
                if x is multiple neurons. Directed for TreeNeurons, undirected
                for MeshNeurons.
    None
                If igraph not installed.

    """
    # If iGraph is not installed return nothing
    if igraph is None:
        if not raise_not_installed:
            return None
        else:
            raise ModuleNotFoundError(
                "iGraph appears to not be installed (properly). "
                'Make sure "import igraph" works.'
            )

    if isinstance(x, core.NeuronList):
        return [
            neuron2igraph(x.loc[i], connectivity=connectivity)
            for i in range(x.shape[0])
        ]

    if isinstance(x, core.TreeNeuron):
        # Make sure we have correctly numbered indices
        nodes = x.nodes.reset_index(inplace=False, drop=True)

        # Generate list of vertices -> this order is retained
        vlist = nodes.node_id.values

        # Get list of edges as indices (needs to exclude root node)
        tn_index_with_parent = nodes.index.values[nodes.parent_id >= 0]
        parent_ids = nodes.parent_id.values[nodes.parent_id >= 0]
        nodes["temp_index"] = nodes.index  # add temporary index column
        try:
            parent_index = (
                nodes.set_index("node_id", inplace=False)
                .loc[parent_ids, "temp_index"]
                .values
            )
        except KeyError:
            miss = nodes[~nodes.parent_id.isin(nodes.node_id)].node_id.unique()
            raise KeyError(
                f"{len(miss)} nodes (e.g. {miss[0]}) in TreeNeuron "
                f"{x.id} connect to non-existent parent nodes."
            )
        except BaseException:
            raise

        # Generate list of edges based on index of vertices
        elist = np.vstack((tn_index_with_parent, parent_index)).T

        # iGraph < 0.8.0 does not like arrays as edge list
        if getattr(igraph, "__version_info__", (0, 0, 0))[1] < 8:
            elist = elist.tolist()

        # Generate graph and assign custom properties
        G = igraph.Graph(elist, n=len(vlist), directed=True)

        G.vs["node_id"] = G.vs["name"] = nodes.node_id.values
        G.vs["parent_id"] = nodes.parent_id.values

        # Generate weights by calculating edge lengths = distance between nodes
        tn_coords = nodes[["x", "y", "z"]].values[tn_index_with_parent, :]
        parent_coords = nodes[["x", "y", "z"]].values[parent_index.astype(int), :]

        w = np.sqrt(np.sum((tn_coords - parent_coords) ** 2, axis=1))
        G.es["weight"] = w

        if simplify:
            simplify_graph(G, inplace=True)
    elif isinstance(x, core.MeshNeuron):
        elist = x.trimesh.edges_unique
        G = igraph.Graph(elist, n=x.n_vertices, directed=False)
        G.es["weight"] = x.trimesh.edges_unique_length
    elif isinstance(x, core.VoxelNeuron):
        edges = _voxels2edges(x, connectivity=connectivity)
        G = igraph.Graph(edges, n=len(x.voxels), directed=False)
    else:
        raise ValueError(f'Unable to convert data of type "{type(x)}" to igraph.')

    return G

Turn Tree-, Mesh- or VoxelNeuron into an NetworkX graph.

PARAMETER DESCRIPTION
x
    Uses simple 6-connectedness for voxels.

TYPE: TreeNeuron | MeshNeuron | VoxelNeuron | NeuronList

simplify
    For TreeNeurons only: simplify the graph by keeping only roots,
    leaves and branching points. Preserves the original
    branch lengths (i.e. weights).

TYPE: bool DEFAULT: False

epsilon
    For Dotprops only: maximum distance between two points to
    connect them. If `None`, will use 5x the average distance
    between points (i.e. `5 * x.sampling_resolution`).

TYPE: float DEFAULT: None

RETURNS DESCRIPTION
graph

NetworkX representation of the neuron. Returns list of graphs if x is multiple neurons. Graph is directed for TreeNeurons and undirected for Mesh- and VoxelNeurons. Graph is weighted for Tree- and MeshNeurons.

TYPE: networkx.Graph | networkx.DiGraph

Source code in navis/graph/converters.py
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
def neuron2nx(x: "core.NeuronObject", simplify=False, epsilon=None) -> nx.DiGraph:
    """Turn Tree-, Mesh- or VoxelNeuron into an NetworkX graph.

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | VoxelNeuron | NeuronList
                Uses simple 6-connectedness for voxels.
    simplify :  bool
                For TreeNeurons only: simplify the graph by keeping only roots,
                leaves and branching points. Preserves the original
                branch lengths (i.e. weights).
    epsilon :   float, optional
                For Dotprops only: maximum distance between two points to
                connect them. If `None`, will use 5x the average distance
                between points (i.e. `5 * x.sampling_resolution`).

    Returns
    -------
    graph:      networkx.Graph | networkx.DiGraph
                NetworkX representation of the neuron. Returns list of graphs
                if x is multiple neurons. Graph is directed for TreeNeurons
                and undirected for Mesh- and VoxelNeurons. Graph is weighted
                for Tree- and MeshNeurons.

    """
    if isinstance(x, core.NeuronList):
        return [neuron2nx(x.loc[i]) for i in range(x.shape[0])]

    if isinstance(x, core.TreeNeuron):
        # Collect nodes
        nodes = x.nodes.set_index("node_id", inplace=False)
        # Collect edges
        edges = x.nodes[x.nodes.parent_id >= 0][["node_id", "parent_id"]].values
        # Collect weight
        weights = np.sqrt(
            np.sum(
                (
                    nodes.loc[edges[:, 0], ["x", "y", "z"]].values.astype(float)
                    - nodes.loc[edges[:, 1], ["x", "y", "z"]].values.astype(float)
                )
                ** 2,
                axis=1,
            )
        )
        # It's fastest to generate a list of (source, target, weight) tuples to pass to networkX
        elist = [(e[0], e[1], l) for e, l in zip(edges, weights)]
        # Create empty directed Graph
        G = nx.DiGraph()
        # Add nodes (in case we have disconnected nodes)
        G.add_nodes_from(x.nodes.node_id.values)
        # Add edges
        G.add_weighted_edges_from(elist)

        if simplify:
            simplify_graph(G, inplace=True)
    elif isinstance(x, core.MeshNeuron):
        G = nx.Graph()
        G.add_nodes_from(np.arange(x.n_vertices))
        edges = [
            (e[0], e[1], l)
            for e, l in zip(x.trimesh.edges_unique, x.trimesh.edges_unique_length)
        ]
        G.add_weighted_edges_from(edges)
    elif isinstance(x, core.Dotprops):
        if epsilon is None:
            epsilon = 5 * x.sampling_resolution

        # Generate KDTree
        tree = neuron2KDTree(x)

        # Generate graph and assign custom properties
        G = nx.Graph()
        G.add_nodes_from(np.arange(x.n_points))
        G.add_edges_from(tree.query_pairs(epsilon))
    elif isinstance(x, core.VoxelNeuron):
        # First we need to determine the 6-connecivity between voxels
        edges = []
        # Go over each axis
        for i in range(3):
            # Generate an offset of 1 voxel along given axis
            offset = np.zeros(3, dtype=int)
            offset[i] = 1
            # Combine real and offset voxels
            vox_off = x.voxels + offset
            # Find out which voxels overlap (i.e. count == 2 after offset)
            unique, cnt = np.unique(
                np.append(x.voxels, vox_off, axis=0), axis=0, return_counts=True
            )

            connected = unique[cnt > 1]
            for vox in connected:
                edges.append([tuple(vox), tuple(vox - offset)])
        G = nx.Graph()
        G.add_nodes_from([tuple(v) for v in x.voxels])
        G.add_edges_from(edges)
    else:
        raise ValueError(
            f'Unable to convert data of type "{type(x)}" to networkx graph.'
        )

    return G

Turn skeleton(s) into points + tangent vectors.

This will drop zero-length vectors (i.e when node and parent occupy the exact same position).

PARAMETER DESCRIPTION
x

TYPE: TreeNeuron | NeuronList

RETURNS DESCRIPTION
points

Midpoints for each child->parent node pair.

TYPE: (N, 3) array

vect

Normalized child-> parent vectors.

TYPE: (N, 3) array

length

Distance between parent and child

TYPE: (N, ) array

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> t = navis.neuron2tangents(n)
Source code in navis/graph/converters.py
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def neuron2tangents(x: "core.NeuronObject") -> "core.Dotprops":
    """Turn skeleton(s) into points + tangent vectors.

    This will drop zero-length vectors (i.e when node and parent occupy the
    exact same position).

    Parameters
    ----------
    x :         TreeNeuron | NeuronList

    Returns
    -------
    points :    (N, 3) array
                Midpoints for each child->parent node pair.
    vect :      (N, 3) array
                Normalized child-> parent vectors.
    length :    (N, ) array
                Distance between parent and child

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> t = navis.neuron2tangents(n)

    """
    if isinstance(x, core.NeuronList):
        return [neuron2tangents(n) for n in x]
    elif not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Expected TreeNeuron/List, got "{type(x)}"')

    # Collect nodes
    nodes = x.nodes[x.nodes.parent_id >= 0]

    # Get child->parent vectors
    parent_locs = (
        x.nodes.set_index("node_id").loc[nodes.parent_id, ["x", "y", "z"]].values
    )
    child_locs = nodes[["x", "y", "z"]].values
    vect = child_locs - parent_locs

    # Get mid point
    points = child_locs + (parent_locs - child_locs) / 2

    # Get length
    length = np.sqrt(np.sum(vect**2, axis=1))

    # Drop zero length points
    points = points[length != 0]
    vect = vect[length != 0]
    length = length[length != 0]

    # Normalize vector
    vect = vect / np.linalg.norm(vect, axis=1).reshape(-1, 1)

    return points, vect, length

Create TreeNeuron from NetworkX Graph.

This function will try to generate a neuron-like tree structure from the Graph. Therefore the graph must not contain loops!

All node attributes (e.g. x, y, z, radius) will be added to the neuron's .nodes table.

PARAMETER DESCRIPTION
G
        Graph to convert to neuron.

TYPE: networkx.Graph

root
        Node in graph to use as root for neuron. If not provided,
        will use first node in `g.nodes`. Ignored if graph
        consists of several disconnected components.

TYPE: str | int | list DEFAULT: None

break_cycles
        The input graph must not contain cycles. We can break them
        up at risk of disconnecting parts of the graph.

TYPE: bool DEFAULT: False

**kwargs
        Keyword arguments are passed to the construction of
        [`navis.TreeNeuron`][].

DEFAULT: {}

RETURNS DESCRIPTION
TreeNeuron

Examples:

>>> import navis
>>> import networkx as nx
>>> G = nx.balanced_tree(2, 3)
>>> tn = navis.nx2neuron(G)
>>> tn
type            navis.TreeNeuron
name                        None
n_nodes                       15
n_connectors                None
n_branches                     6
n_leafs                        8
cable_length                 0.0
soma                        None
units            1 dimensionless
dtype: object
Source code in navis/graph/converters.py
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
def nx2neuron(
    G: nx.Graph,
    root: Optional[Union[int, str]] = None,
    break_cycles: bool = False,
    **kwargs,
) -> pd.DataFrame:
    """Create TreeNeuron from NetworkX Graph.

    This function will try to generate a neuron-like tree structure from
    the Graph. Therefore the graph must not contain loops!

    All node attributes (e.g. `x`, `y`, `z`, `radius`) will be added to
    the neuron's `.nodes` table.

    Parameters
    ----------
    G :             networkx.Graph
                    Graph to convert to neuron.
    root :          str | int | list, optional
                    Node in graph to use as root for neuron. If not provided,
                    will use first node in `g.nodes`. Ignored if graph
                    consists of several disconnected components.
    break_cycles :  bool
                    The input graph must not contain cycles. We can break them
                    up at risk of disconnecting parts of the graph.
    **kwargs
                    Keyword arguments are passed to the construction of
                    [`navis.TreeNeuron`][].

    Returns
    -------
    TreeNeuron

    Examples
    --------
    >>> import navis
    >>> import networkx as nx
    >>> G = nx.balanced_tree(2, 3)
    >>> tn = navis.nx2neuron(G)
    >>> tn
    type            navis.TreeNeuron
    name                        None
    n_nodes                       15
    n_connectors                None
    n_branches                     6
    n_leafs                        8
    cable_length                 0.0
    soma                        None
    units            1 dimensionless
    dtype: object

    """
    # First some sanity checks
    if not isinstance(G, nx.Graph):
        raise TypeError(f'`G` must be NetworkX Graph, got "{type(G)}"')

    # We need an undirected Graph
    if isinstance(G, nx.DiGraph):
        G = G.to_undirected(as_view=True)

    if not nx.is_forest(G):
        if not break_cycles:
            raise TypeError(
                "Graph must be tree-like. You can try setting "
                "the `cut_cycles` parameter to True."
            )
        else:
            if break_cycles:
                while True:
                    try:
                        # Find cycle
                        cycle = nx.find_cycle(G)
                    except nx.exception.NetworkXNoCycle:
                        break
                    except BaseException:
                        raise

                    # Sort by degree
                    cycle = sorted(cycle, key=lambda x: G.degree[x[0]])

                    # Remove the edge with the lowest degree
                    G.remove_edge(cycle[0][0], cycle[0][1])

    # Ignore root if this is a forest
    if not nx.is_tree(G):
        root = None

    # This effectively makes sure that all edges point in the same direction
    lop = {}
    for c in nx.connected_components(G):
        sg = nx.subgraph(G, c)
        # Pick a random root if not explicitly provided
        if not root:
            r = list(sg.nodes)[0]
        elif root not in sg.nodes:
            raise ValueError(f'Node "{root}" not in graph.')
        else:
            r = root

        # Generate parent->child dictionary
        this_lop = nx.predecessor(sg, r)

        # Make sure no node has more than one parent
        if any((len(v) > 1 for v in this_lop.values())):
            raise ValueError(
                "Nodes with multiple parents found. Make sure graph is tree-like."
            )

        # Note that we assign -1 as root's parent
        lop.update({k: v[0] if v else -1 for k, v in this_lop.items()})

    # Generate node table
    tn_table = pd.DataFrame(index=list(G.nodes))
    tn_table.index = tn_table.index.set_names("node_id", inplace=False)

    # Add parents - use -1 for root's parent
    tn_table["parent_id"] = tn_table.index.map(lop)

    try:
        tn_table.index = tn_table.index.astype(int)
        tn_table["parent_id"] = tn_table.parent_id.astype(int)
    except (ValueError, TypeError):
        raise ValueError("Node IDs must be convertible to integers.")
    except BaseException:
        raise

    # Add additional generic attribute -> will skip node_id and parent_id
    # if they exist
    all_attr = set([k for n in G.nodes for k in G.nodes[n].keys()])

    # Remove some that we don't need
    all_attr -= set(["parent_id", "node_id"])
    # Add some that we want as columns even if they don't exist
    all_attr |= set(["x", "y", "z", "radius"])

    # For some we want to have set default values
    defaults = {"x": 0, "y": 0, "z": 0, "radius": -1}

    # Now map the attributes onto node table
    for at in all_attr:
        vals = nx.get_node_attributes(G, at)
        tn_table[at] = tn_table.index.map(vals).fillna(defaults.get(at, None))

    return core.TreeNeuron(tn_table.reset_index(drop=False, inplace=False), **kwargs)

Monkey patch cloud-volume to return navis neurons.

This function must be run before initializing the CloudVolume! Adds new methods/parameters to CloudVolume.mesh.get and CloudVolume.skeleton.get. See examples for details.

Examples:

>>> import navis
>>> import cloudvolume as cv
>>> # Monkey patch cloudvolume
>>> navis.patch_cloudvolume()
>>> # Connect to the Google segmentation of FAFB
>>> vol = cv.CloudVolume('precomputed://gs://fafb-ffn1-20200412/segmentation',
...                       use_https=True,
...                       progress=False)
>>> ids = [2137190164, 2268989790]
>>> # Fetch as navis neuron using newly added method or ...
>>> nl = vol.mesh.get_navis(ids, lod=3)
>>> # ... alternatively use `as_navis` keyword argument in original method
>>> nl = vol.mesh.get(ids, lod=3, as_navis=True)
>>> type(nl)
<class 'navis.core.neuronlist.NeuronList'>
>>> # The same works for skeletons
>>> skels = vol.skeleton.get_navis(ids)
Source code in navis/utils/cv.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
def patch_cloudvolume():
    """Monkey patch cloud-volume to return navis neurons.

    This function must be run before initializing the `CloudVolume`! Adds new
    methods/parameters to `CloudVolume.mesh.get` and `CloudVolume.skeleton.get`.
    See examples for details.

    Examples
    --------
    >>> import navis
    >>> import cloudvolume as cv
    >>> # Monkey patch cloudvolume
    >>> navis.patch_cloudvolume()
    >>> # Connect to the Google segmentation of FAFB
    >>> vol = cv.CloudVolume('precomputed://gs://fafb-ffn1-20200412/segmentation',
    ...                       use_https=True,
    ...                       progress=False)
    >>> ids = [2137190164, 2268989790]
    >>> # Fetch as navis neuron using newly added method or ...
    >>> nl = vol.mesh.get_navis(ids, lod=3)
    >>> # ... alternatively use `as_navis` keyword argument in original method
    >>> nl = vol.mesh.get(ids, lod=3, as_navis=True)
    >>> type(nl)
    <class 'navis.core.neuronlist.NeuronList'>
    >>> # The same works for skeletons
    >>> skels = vol.skeleton.get_navis(ids)

    """
    global cv
    try:
        import cloudvolume as cv
    except ModuleNotFoundError:
        cv = None

    # If CV not installed do nothing
    if not cv:
        logger.info("cloud-volume appears to not be installed?")
        return

    for ds in [
        cv.datasource.graphene.mesh.sharded.GrapheneShardedMeshSource,
        cv.datasource.graphene.mesh.unsharded.GrapheneUnshardedMeshSource,
        cv.datasource.precomputed.mesh.unsharded.UnshardedLegacyPrecomputedMeshSource,
        cv.datasource.precomputed.mesh.multilod.UnshardedMultiLevelPrecomputedMeshSource,
        cv.datasource.precomputed.mesh.multilod.ShardedMultiLevelPrecomputedMeshSource,
        cv.datasource.precomputed.skeleton.sharded.ShardedPrecomputedSkeletonSource,
        cv.datasource.precomputed.skeleton.unsharded.UnshardedPrecomputedSkeletonSource,
    ]:
        ds.get_navis = return_navis(ds.get, only_on_kwarg=False)
        ds.get = return_navis(ds.get, only_on_kwarg=True)

    logger.info("cloud-volume successfully patched!")

Calculate morphological similarity using persistence diagrams.

This works by: 1. Generate persistence points for each neuron. 2. Create a weighted Gaussian from persistence points and sample 100 evenly spaced points to create a feature vector. 3. Calculate Euclidean distance.

PARAMETER DESCRIPTION
q
    Queries and targets, respectively. If `t=None` will run
    queries against queries. Neurons should have the same units,
    ideally nanometers.

TYPE: core.NeuronObject

normalize
    If True, will normalized the vector for each neuron to be within
    0-1. Set to False if the total number of linear segments matter.

TYPE: bool DEFAULT: True

bw
    Bandwidth for Gaussian kernel: larger = smoother, smaller =
    more detailed.

TYPE: float DEFAULT: 0.2

augment
    Whether to augment the persistence vectors with other neuron
    properties (number of branch points & leafs and cable length).

TYPE: bool DEFAULT: True

**persistence_kwargs
    Keyword arguments are passed to [`navis.persistence_points`][].

DEFAULT: {}

RETURNS DESCRIPTION
distances

TYPE: pandas.DataFrame

See Also

navis.persistence_points The function to calculate the persistence points. navis.persistence_vectors Use this to get and inspect the actual vectors used here.

Source code in navis/morpho/persistence.py
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
def persistence_distances(q: 'core.NeuronObject',
                          t: Optional['core.NeuronObject'] = None,
                          augment: bool = True,
                          normalize: bool = True,
                          bw: float = .2,
                          **persistence_kwargs):
    """Calculate morphological similarity using persistence diagrams.

    This works by:
      1. Generate persistence points for each neuron.
      2. Create a weighted Gaussian from persistence points and sample 100
         evenly spaced points to create a feature vector.
      3. Calculate Euclidean distance.

    Parameters
    ----------
    q/t :       NeuronList
                Queries and targets, respectively. If `t=None` will run
                queries against queries. Neurons should have the same units,
                ideally nanometers.
    normalize : bool
                If True, will normalized the vector for each neuron to be within
                0-1. Set to False if the total number of linear segments matter.
    bw :        float
                Bandwidth for Gaussian kernel: larger = smoother, smaller =
                more detailed.
    augment :   bool
                Whether to augment the persistence vectors with other neuron
                properties (number of branch points & leafs and cable length).
    **persistence_kwargs
                Keyword arguments are passed to [`navis.persistence_points`][].

    Returns
    -------
    distances : pandas.DataFrame

    See Also
    --------
    [`navis.persistence_points`][]
                The function to calculate the persistence points.
    [`navis.persistence_vectors`][]
                Use this to get and inspect the actual vectors used here.

    """
    q = core.NeuronList(q)
    all_n = q

    if t:
        t = core.NeuronList(t)
        all_n += t

    # Some sanity checks
    if len(all_n) <= 1:
        raise ValueError('Need more than one neuron.')

    soma_warn = False
    root_warn = False
    for n in all_n:
        if not soma_warn:
            if n.has_soma and n.soma not in n.root:
                soma_warn = True
        if not root_warn:
            if len(n.root) > 1:
                root_warn = True

        if root_warn and soma_warn:
            break

    if soma_warn:
        logger.warning('At least some neurons are not rooted to their soma.')
    if root_warn:
        logger.warning('At least some neurons are fragmented.')

    # Get persistence points for each skeleton
    pers = persistence_points(all_n, **persistence_kwargs)

    # Get the vectors
    vectors, samples = persistence_vectors(pers, samples=100, bw=bw)

    # Normalizing the vectors will produce more useful distances
    if normalize:
        vectors = vectors / vectors.max(axis=1).reshape(-1, 1)
    else:
        vectors = vectors / vectors.max()

    if augment:
        # Collect extra data. Note that this adds only 3 more to the existing
        # 100 observations
        vec_aug = np.vstack((all_n.cable_length,
                             all_n.n_leafs,
                             all_n.n_branches)).T

        # Normalize per metric
        vec_aug = vec_aug / vec_aug.max(axis=0)

        # If we wanted to weigh those observation equal to the 100 topology
        # observations:
        # vec_aug *= 100 / vec_aug.shape[1]

        vectors = np.append(vectors, vec_aug, axis=1)

    if t:
        # Extract source and target vectors
        q_vec = vectors[:len(q)]
        t_vec = vectors[len(q):]
        return pd.DataFrame(cdist(q_vec, t_vec), index=q.id, columns=t.id)
    else:
        return pd.DataFrame(squareform(pdist(vectors)), index=q.id, columns=q.id)

Calculate points for a persistence diagram.

Based on Li et al., PLoS One (2017). Briefly, this cuts the neuron into linear segments, the start (birth) and end (death) of which are assigned a value (see descriptor parameter). In combination, these points represent a fingerprint for the topology of the neuron.

PARAMETER DESCRIPTION
x
    Neuron(s) to calculate persistence poinst for. For MeshNeurons,
    we will use the skeleton produced by/associated with its
    `.skeleton` property.

TYPE: TreeNeuron | MeshNeuron | NeuronList

descriptor
    Descriptor function used to calculate birth and death "time" of
    the segments:
      - `root_dist` distance from root

TYPE: root_dist DEFAULT: 'root_dist'

remove_cbf
    In unipolar neurons (e.g. in insects) the soma is separate and
    connects to the neuron's backbone via "cell body fiber" (CBF).
    The length of the CBF can vary quite a bit. Discounting the
    CBF can make the persistence points more stable.
    If `remove_cbf=True` and the neuron has a soma (!) we ignore
    the CBF for the birth & death times. Neurons will also be
    automatically be rooted onto their soma!

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
pandas.DataFrame

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> n.reroot(n.soma, inplace=True)
>>> p = navis.persistence_points(n)
References

Li Y, Wang D, Ascoli GA, Mitra P, Wang Y (2017) Metrics for comparing neuronal tree shapes based on persistent homology. PLOS ONE 12(8): e0182184. https://doi.org/10.1371/journal.pone.0182184

Source code in navis/morpho/persistence.py
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
@utils.map_neuronlist(desc='Calc. persistence', allow_parallel=True)
def persistence_points(x: 'core.NeuronObject',
                       descriptor: Union[
                                         Literal['root_dist']
                                         ] = 'root_dist',
                       remove_cbf: bool = False
                       ) -> pd.DataFrame:
    """Calculate points for a persistence diagram.

    Based on Li et al., PLoS One (2017). Briefly, this cuts the neuron into
    linear segments, the start (birth) and end (death) of which are assigned a
    value (see `descriptor` parameter). In combination, these points represent
    a fingerprint for the topology of the neuron.

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | NeuronList
                Neuron(s) to calculate persistence poinst for. For MeshNeurons,
                we will use the skeleton produced by/associated with its
                `.skeleton` property.
    descriptor : 'root_dist'
                Descriptor function used to calculate birth and death "time" of
                the segments:
                  - `root_dist` distance from root
    remove_cbf : bool
                In unipolar neurons (e.g. in insects) the soma is separate and
                connects to the neuron's backbone via "cell body fiber" (CBF).
                The length of the CBF can vary quite a bit. Discounting the
                CBF can make the persistence points more stable.
                If `remove_cbf=True` and the neuron has a soma (!) we ignore
                the CBF for the birth & death times. Neurons will also be
                automatically be rooted onto their soma!

    Returns
    -------
    pandas.DataFrame

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n.reroot(n.soma, inplace=True)
    >>> p = navis.persistence_points(n)

    References
    ----------
    Li Y, Wang D, Ascoli GA, Mitra P, Wang Y (2017) Metrics for comparing
    neuronal tree shapes based on persistent homology.
    PLOS ONE 12(8): e0182184. https://doi.org/10.1371/journal.pone.0182184

    """
    if descriptor not in ('root_dist', ):
        raise ValueError(f'Unknown "descriptor" parameter: {descriptor}')

    if isinstance(x, core.MeshNeuron):
        x = x.skeleton
    elif not isinstance(x, core.TreeNeuron):
        raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"')

    if remove_cbf and x.has_soma:
        # Reroot to soma
        x.reroot(x.soma, inplace=True)
        # Find the main branch point
        mbp = graph.find_main_branchpoint(x)

    # Generate segments
    segs = graph._generate_segments(x, weight='weight')

    # Grab starts and ends of each segment
    ends = np.array([s[0] for s in segs])
    starts = np.array([s[-1] for s in segs])

    if descriptor == 'root_dist':
        # Get geodesic distances to roots
        dist = graph.dist_to_root(x, weight='weight')
        death = np.array([dist[e] for e in ends])
        birth = np.array([dist[s] for s in starts])

        if remove_cbf and x.has_soma:
            # Subtract length of CBF
            cbf_length = graph.dist_between(x, mbp, x.soma)
            birth -= cbf_length
            death -= cbf_length

            # Drop segments that are entirely on the CBF
            starts = starts[death >= 0]
            ends = ends[death >= 0]
            birth = birth[death >= 0]
            death = death[death >= 0]

            # Clip negative births
            birth[birth < 0] = 0

    # Compile into a DataFrame
    pers = pd.DataFrame()
    pers['start_node'] = starts
    pers['end_node'] = ends
    pers['birth'] = birth
    pers['death'] = death

    return pers

Produce vectors from persistence points.

Works by creating a Gaussian and sampling samples evenly spaced points across it.

PARAMETER DESCRIPTION
x
    The persistence points (see [`navis.persistence_points`][]).
    For vectors for multiple neurons, provide either a list of
    persistence points DataFrames or a single DataFrame with a
    "neuron_id" column.

TYPE: navis.NeuronList | pd.DataFrame | list thereof

threshold
    If provided, segments shorter (death - birth) than this will not
    be used to create the Gaussian.

TYPE: float DEFAULT: None

samples
    Number of points sampled across the Gaussian.

TYPE: int DEFAULT: 100

bw
    Bandwidth for Gaussian kernel: larger = smoother, smaller =
    more detailed.

TYPE: float DEFAULT: 0.2

center
    Whether to center the individual curves on their highest value.
    This is done by "rolling" the axis (using `np.roll`) which
    means that elements that roll beyond the last position are
    re-introduced at the first.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
vectors

TYPE: np.ndarray

samples

Sampled distances. If center=True the absolute values don't make much sense anymore.

TYPE: np.ndarray

References

Li Y, Wang D, Ascoli GA, Mitra P, Wang Y (2017) Metrics for comparing neuronal tree shapes based on persistent homology. PLOS ONE 12(8): e0182184. https://doi.org/10.1371/journal.pone.0182184

See Also

navis.persistence_points The function to calculate the persistence points. navis.persistence_distances Get distances based on (augmented) persistence vectors.

Source code in navis/morpho/persistence.py
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
def persistence_vectors(x,
                        threshold: Optional[float] = None,
                        samples: int = 100,
                        bw: float = .2,
                        center: bool = False,
                        **kwargs):
    """Produce vectors from persistence points.

    Works by creating a Gaussian and sampling `samples` evenly spaced
    points across it.

    Parameters
    ----------
    x :         navis.NeuronList | pd.DataFrame | list thereof
                The persistence points (see [`navis.persistence_points`][]).
                For vectors for multiple neurons, provide either a list of
                persistence points DataFrames or a single DataFrame with a
                "neuron_id" column.
    threshold : float, optional
                If provided, segments shorter (death - birth) than this will not
                be used to create the Gaussian.
    samples :   int
                Number of points sampled across the Gaussian.
    bw :        float
                Bandwidth for Gaussian kernel: larger = smoother, smaller =
                more detailed.
    center :    bool
                Whether to center the individual curves on their highest value.
                This is done by "rolling" the axis (using `np.roll`) which
                means that elements that roll beyond the last position are
                re-introduced at the first.

    Returns
    -------
    vectors :   np.ndarray
    samples :   np.ndarray
                Sampled distances. If `center=True` the absolute values don't
                make much sense anymore.

    References
    ----------
    Li Y, Wang D, Ascoli GA, Mitra P, Wang Y (2017) Metrics for comparing
    neuronal tree shapes based on persistent homology.
    PLOS ONE 12(8): e0182184. https://doi.org/10.1371/journal.pone.0182184

    See Also
    --------
    [`navis.persistence_points`][]
                The function to calculate the persistence points.
    [`navis.persistence_distances`][]
                Get distances based on (augmented) persistence vectors.

    """
    if isinstance(x, core.BaseNeuron):
        x = core.NeuronList(x)

    if isinstance(x, pd.DataFrame):
        pers = [x]
    elif isinstance(x, core.NeuronList):
        pers = [persistence_points(n, **kwargs) for n in x]
    elif isinstance(x, list):
        if not all([isinstance(l, pd.DataFrame) for l in x]):
            raise ValueError('Expected lists to contain only DataFrames')
        pers = x
    else:
        raise TypeError('Unable to work extract persistence vectors from data '
                        f'of type "{x}"')

    # Get the max distance
    max_pdist = max([p.birth.max() for p in pers])
    samples = np.linspace(0, max_pdist * 1.05, samples)

    # Now get a persistence vector
    vectors = []
    for p in pers:
        weights = p.death.values - p.birth.values
        if threshold:
            p = p.loc[weights >= threshold]
            weights = weights[weights >= threshold]

        # For each persistence generate a weighted Gaussian kernel
        kernel = gaussian_kde(p.birth.values,
                              weights=weights,
                              bw_method=bw)

        # And sample probabilities at the sample points
        vectors.append(kernel(samples))
    vectors = np.array(vectors)

    if center:
        # Shift each vector such that the highest value lies in the center.
        # Note that we are "rolling" the array which means that elements that
        # drop off to the right are reintroduced on the left
        for i in range(len(vectors)):
            vectors[i] = np.roll(vectors[i],
                                 -np.argmax(vectors[i]) + len(samples) // 2)

    return vectors, samples

Plot neuron topology in 1D according to Cuntz et al. (2010).

This function breaks a neurons into segments between branch points. See Cuntz et al., PLoS Computational Biology (2010) for detailed explanation. For very complex neurons, this neuron "barcode" can get fairly complicated - make sure to zoom in.

PARAMETER DESCRIPTION
x
    Neuron(s) to plot.

TYPE: TreeNeuron | NeuronList

ax

TYPE: matplotlib.ax DEFAULT: None

color
    Color. If dict must map neuron UUID to color.

TYPE: tuple | dict DEFAULT: None

palette
    Name of a matplotlib or seaborn palette. If `color` is
    not specified will pick colors from this palette.

TYPE: str | array | list of arrays DEFAULT: None

color_by
    Can be the name of a column in the node table of
    `TreeNeurons` or an array of (numerical or categorical)
    values for each node. Numerical values will be normalized.
    You can control the normalization by passing a `vmin`
    and/or `vmax` parameter.

TYPE: str | array | list of arrays DEFAULT: = None

**kwargs
    Will be passed to `matplotlib.patches.Rectangle`.

DEFAULT: {}

RETURNS DESCRIPTION
matplotlib.ax

Examples:

>>> import navis
>>> import matplotlib.pyplot as plt
>>> n = navis.example_neurons(2)
>>> ax = navis.plot1d(n)
>>> plt.show()

Close figures (only relevant for doctests)

>>> plt.close('all')

See the flat plotting tutorial for more examples.

Source code in navis/plotting/d.py
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
def plot1d(x: 'core.NeuronObject',
           ax: Optional[mpl.axes.Axes] = None,
           color: Optional[Union['str',
                                 colortype,
                                 Dict[Any, colortype],
                                 ]
                           ] = None,
           color_by: Optional[Union[str, np.ndarray]] = None,
           palette: Optional[str] = None,
           **kwargs) -> mpl.axes.Axes:
    """Plot neuron topology in 1D according to Cuntz et al. (2010).

    This function breaks a neurons into segments between branch points.
    See Cuntz et al., PLoS Computational Biology (2010) for detailed
    explanation. For very complex neurons, this neuron "barcode" can get
    fairly complicated - make sure to zoom in.

    Parameters
    ----------
    x :         TreeNeuron | NeuronList
                Neuron(s) to plot.
    ax :        matplotlib.ax, optional
    color :     tuple | dict
                Color. If dict must map neuron UUID to color.
    palette :   str | array | list of arrays, default=None
                Name of a matplotlib or seaborn palette. If `color` is
                not specified will pick colors from this palette.
    color_by :  str | array | list of arrays, default = None
                Can be the name of a column in the node table of
                `TreeNeurons` or an array of (numerical or categorical)
                values for each node. Numerical values will be normalized.
                You can control the normalization by passing a `vmin`
                and/or `vmax` parameter.
    **kwargs
                Will be passed to `matplotlib.patches.Rectangle`.

    Returns
    -------
    matplotlib.ax

    Examples
    --------

    >>> import navis
    >>> import matplotlib.pyplot as plt
    >>> n = navis.example_neurons(2)
    >>> ax = navis.plot1d(n)
    >>> plt.show() # doctest: +SKIP

    Close figures (only relevant for doctests)

    >>> plt.close('all')

    See the [`flat plotting tutorial`](generated/gallery/1_plotting/tutorial_plotting_02_1d/)
    for more examples.

    """
    if isinstance(x, core.NeuronList):
        if x.is_mixed:
            raise TypeError('NeuronList contains MeshNeuron(s). Unable to plot1d.')
    elif isinstance(x, core.TreeNeuron):
        x = core.NeuronList(x)
    else:
        raise TypeError(f'Unable plot1d data of type "{type(x)}"')

    if isinstance(color, type(None)) and isinstance(palette, type(None)):
        color = (0.56, 0.86, 0.34)

    color, _ =  prepare_colormap(color,
                                 neurons=x,
                                 palette=palette,
                                 color_range=1)

    if not isinstance(color_by, type(None)):
        if not palette:
            raise ValueError('Must provide `palette` (e.g. "viridis") argument '
                             'if using `color_by`')

        vertex_map = vertex_colors(x,
                                   by=color_by,
                                   use_alpha=False,
                                   palette=palette,
                                   vmin=kwargs.pop('vmin', None),
                                   vmax=kwargs.pop('vmax', None),
                                   na=kwargs.pop('na', 'raise'),
                                   color_range=1)
    else:
        vertex_map = None

    if not ax:
        fig, ax = plt.subplots(figsize=(8, len(x)))
        # Make background transparent (nicer for dark themes)
        fig.patch.set_alpha(0)
        ax.patch.set_alpha(0)

    # Add some default parameters for the plotting to kwargs
    DEFAULTS = {'lw': kwargs.pop('lw', kwargs.pop('linewidth', .2)),
                'ec': kwargs.pop('ec', kwargs.pop('edgecolor', (1, 1, 1))),
    }
    kwargs.update(DEFAULTS)

    max_x = []
    for ix, n in enumerate(config.tqdm(x, desc='Processing',
                                       disable=config.pbar_hide,
                                       leave=config.pbar_leave)):
        if isinstance(color, dict):
            this_c = color[n.id]
        else:
            this_c = color[ix]

        # Get topological sort (root -> terminals)
        topology = graph.node_label_sorting(n, weighted=True)

        # Get terminals and branch points
        roots = n.nodes[n.nodes.type == 'root'].node_id.values
        bp = n.nodes[n.nodes.type == 'branch'].node_id.values
        term = n.nodes[n.nodes.type == 'end'].node_id.values
        breaks = np.concatenate((bp, term, roots))

        # Order this neuron's segments by topology (remember that segments are
        # sorted child -> parent, i.e. distal to proximal)
        topo_ix = dict(zip(topology, range(len(topology))))
        segs = sorted(n.small_segments, key=lambda x: topo_ix[x[0]])

        # Keep only the first and the last node in each segment
        segs = [[s[0], s[1]] for s in segs]

        # Now get distances for each segment
        if 'nodes_geodesic_distance_matrix' in n.__dict__:
            # If available, use existing geodesic distance matrix
            dist_mat = n.nodes_geodesic_distance_matrix
        else:
            # If not, compute matrix for subset of nodes
            dist_mat = graph.geodesic_matrix(n, from_=breaks, directed=False)

        # Get length of each segment
        lengths = np.array([dist_mat.loc[s[0], s[1]] for s in segs])
        max_x.append(sum(lengths))

        # Plot
        curr_dist = 0
        id2ix = dict(zip(n.nodes.node_id.values, range(n.n_nodes)))
        for k, le in enumerate(lengths):
            if isinstance(vertex_map, type(None)):
                c = this_c

                if segs[k][0] in term:
                    c = tuple(np.array(c) / 2)
            else:
                # Get this segments vertex colors
                node_ix = [id2ix[i] for i in segs[k]]
                vc = vertex_map[ix][node_ix]
                c = vc[-1]

            p = mpatches.Rectangle((curr_dist, ix), le, 1, fc=c, **kwargs)
            ax.add_patch(p)
            curr_dist += le
    ax.set_xlim(0, max(max_x))
    ax.set_ylim(0, len(x))

    ax.set_yticks(np.array(range(0, len(x))) + .5)
    ax.set_yticklabels(x.name)

    dstring = 'distance'
    ax.set_xlabel(dstring)

    ax.set_frame_on(False)

    return ax

Generate 2D plots of neurons and neuropils.

The main advantage of this is that you can save plot as vector graphics.

Note

This function uses matplotlib which "fakes" 3D as it has only very limited control over layering objects in 3D. Therefore neurites are not necessarily plotted in the right Z order. This becomes especially troublesome when plotting a complex scene with lots of neurons criss-crossing. See the method parameter for details.

PARAMETER DESCRIPTION
x
            Objects to plot:
             - multiple objects can be passed as list (see examples)
             - numpy array of shape (N, 3) is intepreted as points for
               scatter plots

TYPE: TreeNeuron | MeshNeuron | NeuronList | Volume | Dotprops | np.ndarray

Object parameters

soma : bool | dict, default=True

                Plot soma if one exists. Size of the soma is determined
                by the neuron's `.soma_radius` property which defaults
                to the "radius" column for `TreeNeurons`. You can also
                pass `soma` as a dictionary to customize the appearance
                of the soma - for example `soma={"color": "red", "lw": 2, "ec": 1}`.

radius : bool | "auto", default=False

                If "auto" will plot neurites of `TreeNeurons` with radius
                if they have radii. If True, will try plotting neurites of
                `TreeNeurons` with radius regardless. The radius can be
                scaled by `linewidth`. Note that this will increase rendering
                time.

linewidth : int | float, default=.5

                Width of neurites. Also accepts alias `lw`.

linestyle : str, default='-'

                Line style of neurites. Also accepts alias `ls`.

color : None | str | tuple | list | dict, default=None

                Use single str (e.g. `'red'`) or `(r, g, b)` tuple
                to give all neurons the same color. Use `list` of
                colors to assign colors: `['red', (1, 0, 1), ...].
                Use `dict` to map colors to neuron IDs:
                `{id: (r, g, b), ...}`.

palette : str | array | list of arrays, default=None

                Name of a matplotlib or seaborn palette. If `color` is
                not specified will pick colors from this palette.

color_by : str | array | list of arrays, default = None

                Color neurons by a property. Can be:
                  - a list/array of labels, one per each neuron
                  - a neuron property (str)
                  - a column name in the node table of `TreeNeurons`
                  - a list/array of values for each node
                Numerical values will be normalized. You can control
                the normalization by passing a `vmin` and/or `vmax` parameter.

shade_by : str | array | list of arrays, default=None

                Similar to `color_by` but will affect only the alpha
                channel of the color. If `shade_by='strahler'` will
                compute Strahler order if not already part of the node
                table (TreeNeurons only). Numerical values will be
                normalized. You can control the normalization by passing
                a `smin` and/or `smax` parameter.

alpha : float [0-1], default=1

                Alpha value for neurons. Overriden if alpha is provided
                as fourth value in `color` (rgb*a*). You can override
                alpha value for connectors by using `cn_alpha`.

mesh_shade : bool, default=False

                Only relevant for meshes (e.g. `MeshNeurons`) and
                `TreeNeurons` with radius, and when method is 3d or
                3d complex. Whether to shade the object which will give it
                a 3D look.

depth_coloring : bool, default=False

                If True, will use neuron color to encode depth (Z).
                Overrides `color` argument. Does not work with
                `method = '3d_complex'`.

depth_scale : bool, default=True

                If True and `depth_coloring=True` will plot a scale.

connectors : bool | "presynapses" | "postsynapses" | str | list, default=True

                Plot connectors. This can either be `True` (plot all
                connectors), `"presynapses"` (only presynaptic connectors)
                or `"postsynapses"` (only postsynaptic connectors). If
                a string or a list is provided, it will be used to filter the
                `type` column in the connectors table.

connectors_only : boolean, default=False

                Plot only connectors, not the neuron.

cn_size : int | float, default = 1

                Size of connectors.

cn_layout : dict, default={}

                Defines default settings (color, style) for connectors.
                See `navis.config.default_connector_colors` for the
                default layout.

cn_colors : str | tuple | dict | "neuron"

                Overrides the default connector (e.g. synpase) colors:
                    - single color as str (e.g. `'red'`) or rgb tuple
                    (e.g. `(1, 0, 0)`)
                    - dict mapping the connectors tables `type` column to
                    a color (e.g. `{"pre": (1, 0, 0)}`)
                    - with "neuron", connectors will receive the same color
                    as their neuron

cn_mesh_colors : bool, default=False

                If True, will use the neuron's color for its connectors.

scatter_kws : dict, default={}

                Parameters to be used when plotting points. Accepted
                keywords are: `size` and `color`.

volume_outlines : bool | "both", default=False

                If True will plot volume outline with no fill. Only
                works with `method="2d"`. Requires the `shapely` package.

dps_scale_vec : float

                Scale vector for dotprops.
Figure parameters

method : '2d' | '3d' (default) | '3d_complex'

                Method used to generate plot. Comes in three flavours:
                 1. `2d` uses normal matplotlib. Neurons are plotted on
                    top of one another in the order their are passed to
                    the function. Use the `view` parameter (below) to
                    set the view (default = xy).
                 2. `3d` uses matplotlib's 3D axis. Here, matplotlib
                    decide the depth order (zorder) of plotting. Can
                    change perspective either interacively or by code
                    (see examples).
                 3. `3d_complex` same as 3d but each neuron segment is
                    added individually. This allows for more complex
                    zorders to be rendered correctly. Slows down
                    rendering!

view : tuple, default = ("x", "y")

                Sets view for `method='2d'`. Can be any combination of
                "x", "y", "z" and their negations. For example, to plot
                from the top, use `view=('x', '-y')`. For 3D `methods`,
                this will set the initial view which can be changed by
                adjusting `ax.azim`, `ax.elev` and `ax.roll` (see examples).

non_view_axes3d : "show" | "hide" (default) | "fade"

                Only relevant for methods '3d' and '3d_complex': what to
                do with the axis that are not in the view. If 'hide', will
                hide them. If 'show', will show them. If 'fade', will
                make them semi-transparent. This is relevant if you
                intend if you intend to customize the view after plotting.

autoscale : bool, default=True

                If True, will scale the axes to fit the data.

scalebar : int | float | str | pint.Quantity | dict, default=False

                Adds a scale bar. Provide integer, float or str to set
                size of scalebar. Int|float are assumed to be in same
                units as data. You can specify units in as string:
                e.g. "1 um". For methods '3d' and '3d_complex', this
                will create an axis object.

                You can customize the scalebar by passing a dictionary.
                For example:

                `{size: "1 micron", color: 'k', lw: 3, alpha: 0.9}`

ax : matplotlib.Axes, default=None

                Pass an axis object if you want to plot on an existing
                canvas. Must match `method` - i.e. 2D or 3D axis.

figsize : tuple, default=None

                Size of figure. Ignored if `ax` is provided.

rasterize : bool, default=False

                Neurons produce rather complex vector graphics which can
                lead to large files when saving to SVG, PDF or PS. Use
                this parameter to rasterize neurons and meshes/volumes
                (but not axes or labels) to reduce file size.

orthogonal : bool, default=True

                Whether to use orthogonal or perspective view for
                methods '3d' and '3d_complex'.

group_neurons : bool, default=False

                If True, neurons will be grouped. Works with SVG export
                but not PDF. Does NOT work with `method='3d_complex'`.
RETURNS DESCRIPTION
fig

TYPE: matplotlib.Figure

ax

TYPE: matplotlib.Axes

Examples:

>>> import navis
>>> import matplotlib.pyplot as plt

Plot list of neurons as simple 2d:

>>> nl = navis.example_neurons()
>>> fig, ax = navis.plot2d(nl, method='2d', view=('x', '-z'))
>>> plt.show()

Add a volume:

>>> vol = navis.example_volume('LH')
>>> fig, ax = navis.plot2d([nl, vol], method='2d', view=('x', '-z'))
>>> plt.show()

Change neuron colors:

>>> fig, ax = navis.plot2d(
...              nl,
...              method='2d',
...              view=('x', '-z'),
...              color=['r', 'g', 'b', 'm', 'c', 'y']
...          )
>>> plt.show()

Plot in "fake" 3D:

>>> fig, ax = navis.plot2d(nl, method='3d', view=('x', '-z'))
>>> plt.show()
>>> # In an interactive window you can dragging the plot to rotate

Plot in "fake" 3D and change perspective:

>>> fig, ax = navis.plot2d(nl, method='3d', view=('x', '-z'))
>>> # Change view
>>> ax.elev = -20
>>> ax.azim = 45
>>> ax.roll = 180
>>> plt.show()

Plot using depth-coloring:

>>> fig, ax = navis.plot2d(nl, method='3d', depth_coloring=True, view=('x', '-z'))
>>> plt.show()

See the plotting intro for more examples.

See Also

navis.plot3d Use this if you want interactive, perspectively correct renders and if you don't need vector graphics as outputs. navis.plot1d A nifty way to visualise neurons in a single dimension. navis.plot_flat Plot neurons as flat structures (e.g. dendrograms).

Source code in navis/plotting/dd.py
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
def plot2d(
    x: Union[
        core.NeuronObject,
        core.Volume,
        np.ndarray,
        List[Union[core.NeuronObject, np.ndarray, core.Volume]],
    ],
    **kwargs,
) -> Tuple[mpl.figure.Figure, mpl.axes.Axes]:
    """Generate 2D plots of neurons and neuropils.

    The main advantage of this is that you can save plot as vector graphics.

    Note
    ----
    This function uses `matplotlib` which "fakes" 3D as it has only very limited
    control over layering objects in 3D. Therefore neurites are not necessarily
    plotted in the right Z order. This becomes especially troublesome when
    plotting a complex scene with lots of neurons criss-crossing. See the
    `method` parameter for details.

    Parameters
    ----------
    x :                 TreeNeuron | MeshNeuron | NeuronList | Volume | Dotprops | np.ndarray
                        Objects to plot:
                         - multiple objects can be passed as list (see examples)
                         - numpy array of shape (N, 3) is intepreted as points for
                           scatter plots

    Object parameters
    -----------------
    soma :              bool | dict, default=True

                        Plot soma if one exists. Size of the soma is determined
                        by the neuron's `.soma_radius` property which defaults
                        to the "radius" column for `TreeNeurons`. You can also
                        pass `soma` as a dictionary to customize the appearance
                        of the soma - for example `soma={"color": "red", "lw": 2, "ec": 1}`.

    radius :            bool | "auto", default=False

                        If "auto" will plot neurites of `TreeNeurons` with radius
                        if they have radii. If True, will try plotting neurites of
                        `TreeNeurons` with radius regardless. The radius can be
                        scaled by `linewidth`. Note that this will increase rendering
                        time.

    linewidth :         int | float, default=.5

                        Width of neurites. Also accepts alias `lw`.

    linestyle :         str, default='-'

                        Line style of neurites. Also accepts alias `ls`.

    color :             None | str | tuple | list | dict, default=None

                        Use single str (e.g. `'red'`) or `(r, g, b)` tuple
                        to give all neurons the same color. Use `list` of
                        colors to assign colors: `['red', (1, 0, 1), ...].
                        Use `dict` to map colors to neuron IDs:
                        `{id: (r, g, b), ...}`.

    palette :           str | array | list of arrays, default=None

                        Name of a matplotlib or seaborn palette. If `color` is
                        not specified will pick colors from this palette.

    color_by :          str | array | list of arrays, default = None

                        Color neurons by a property. Can be:
                          - a list/array of labels, one per each neuron
                          - a neuron property (str)
                          - a column name in the node table of `TreeNeurons`
                          - a list/array of values for each node
                        Numerical values will be normalized. You can control
                        the normalization by passing a `vmin` and/or `vmax` parameter.

    shade_by :          str | array | list of arrays, default=None

                        Similar to `color_by` but will affect only the alpha
                        channel of the color. If `shade_by='strahler'` will
                        compute Strahler order if not already part of the node
                        table (TreeNeurons only). Numerical values will be
                        normalized. You can control the normalization by passing
                        a `smin` and/or `smax` parameter.

    alpha :             float [0-1], default=1

                        Alpha value for neurons. Overriden if alpha is provided
                        as fourth value in `color` (rgb*a*). You can override
                        alpha value for connectors by using `cn_alpha`.

    mesh_shade :        bool, default=False

                        Only relevant for meshes (e.g. `MeshNeurons`) and
                        `TreeNeurons` with radius, and when method is 3d or
                        3d complex. Whether to shade the object which will give it
                        a 3D look.

    depth_coloring :    bool, default=False

                        If True, will use neuron color to encode depth (Z).
                        Overrides `color` argument. Does not work with
                        `method = '3d_complex'`.

    depth_scale :       bool, default=True

                        If True and `depth_coloring=True` will plot a scale.

    connectors :        bool | "presynapses" | "postsynapses" | str | list, default=True

                        Plot connectors. This can either be `True` (plot all
                        connectors), `"presynapses"` (only presynaptic connectors)
                        or `"postsynapses"` (only postsynaptic connectors). If
                        a string or a list is provided, it will be used to filter the
                        `type` column in the connectors table.

    connectors_only :   boolean, default=False

                        Plot only connectors, not the neuron.

    cn_size :           int | float, default = 1

                        Size of connectors.

    cn_layout :         dict, default={}

                        Defines default settings (color, style) for connectors.
                        See `navis.config.default_connector_colors` for the
                        default layout.

    cn_colors :         str | tuple | dict | "neuron"

                        Overrides the default connector (e.g. synpase) colors:
                            - single color as str (e.g. `'red'`) or rgb tuple
                            (e.g. `(1, 0, 0)`)
                            - dict mapping the connectors tables `type` column to
                            a color (e.g. `{"pre": (1, 0, 0)}`)
                            - with "neuron", connectors will receive the same color
                            as their neuron

    cn_mesh_colors :    bool, default=False

                        If True, will use the neuron's color for its connectors.

    scatter_kws :       dict, default={}

                        Parameters to be used when plotting points. Accepted
                        keywords are: `size` and `color`.

    volume_outlines :   bool | "both", default=False

                        If True will plot volume outline with no fill. Only
                        works with `method="2d"`. Requires the `shapely` package.

    dps_scale_vec :     float

                        Scale vector for dotprops.

    Figure parameters
    -----------------
    method :            '2d' | '3d' (default) | '3d_complex'

                        Method used to generate plot. Comes in three flavours:
                         1. `2d` uses normal matplotlib. Neurons are plotted on
                            top of one another in the order their are passed to
                            the function. Use the `view` parameter (below) to
                            set the view (default = xy).
                         2. `3d` uses matplotlib's 3D axis. Here, matplotlib
                            decide the depth order (zorder) of plotting. Can
                            change perspective either interacively or by code
                            (see examples).
                         3. `3d_complex` same as 3d but each neuron segment is
                            added individually. This allows for more complex
                            zorders to be rendered correctly. Slows down
                            rendering!

    view :              tuple, default = ("x", "y")

                        Sets view for `method='2d'`. Can be any combination of
                        "x", "y", "z" and their negations. For example, to plot
                        from the top, use `view=('x', '-y')`. For 3D `methods`,
                        this will set the initial view which can be changed by
                        adjusting `ax.azim`, `ax.elev` and `ax.roll` (see examples).

    non_view_axes3d :   "show" | "hide" (default) | "fade"

                        Only relevant for methods '3d' and '3d_complex': what to
                        do with the axis that are not in the view. If 'hide', will
                        hide them. If 'show', will show them. If 'fade', will
                        make them semi-transparent. This is relevant if you
                        intend if you intend to customize the view after plotting.

    autoscale :         bool, default=True

                        If True, will scale the axes to fit the data.

    scalebar :          int | float | str | pint.Quantity | dict, default=False

                        Adds a scale bar. Provide integer, float or str to set
                        size of scalebar. Int|float are assumed to be in same
                        units as data. You can specify units in as string:
                        e.g. "1 um". For methods '3d' and '3d_complex', this
                        will create an axis object.

                        You can customize the scalebar by passing a dictionary.
                        For example:

                        `{size: "1 micron", color: 'k', lw: 3, alpha: 0.9}`


    ax :                matplotlib.Axes, default=None

                        Pass an axis object if you want to plot on an existing
                        canvas. Must match `method` - i.e. 2D or 3D axis.

    figsize :           tuple, default=None

                        Size of figure. Ignored if `ax` is provided.

    rasterize :         bool, default=False

                        Neurons produce rather complex vector graphics which can
                        lead to large files when saving to SVG, PDF or PS. Use
                        this parameter to rasterize neurons and meshes/volumes
                        (but not axes or labels) to reduce file size.

    orthogonal :        bool, default=True

                        Whether to use orthogonal or perspective view for
                        methods '3d' and '3d_complex'.

    group_neurons :     bool, default=False

                        If True, neurons will be grouped. Works with SVG export
                        but not PDF. Does NOT work with `method='3d_complex'`.

    Returns
    -------
    fig :               matplotlib.Figure
    ax :                matplotlib.Axes

    Examples
    --------

    >>> import navis
    >>> import matplotlib.pyplot as plt

    Plot list of neurons as simple 2d:

    >>> nl = navis.example_neurons()
    >>> fig, ax = navis.plot2d(nl, method='2d', view=('x', '-z'))
    >>> plt.show() # doctest: +SKIP

    Add a volume:

    >>> vol = navis.example_volume('LH')
    >>> fig, ax = navis.plot2d([nl, vol], method='2d', view=('x', '-z'))
    >>> plt.show() # doctest: +SKIP

    Change neuron colors:

    >>> fig, ax = navis.plot2d(
    ...              nl,
    ...              method='2d',
    ...              view=('x', '-z'),
    ...              color=['r', 'g', 'b', 'm', 'c', 'y']
    ...          )
    >>> plt.show() # doctest: +SKIP

    Plot in "fake" 3D:

    >>> fig, ax = navis.plot2d(nl, method='3d', view=('x', '-z'))
    >>> plt.show() # doctest: +SKIP
    >>> # In an interactive window you can dragging the plot to rotate

    Plot in "fake" 3D and change perspective:

    >>> fig, ax = navis.plot2d(nl, method='3d', view=('x', '-z'))
    >>> # Change view
    >>> ax.elev = -20
    >>> ax.azim = 45
    >>> ax.roll = 180
    >>> plt.show() # doctest: +SKIP

    Plot using depth-coloring:

    >>> fig, ax = navis.plot2d(nl, method='3d', depth_coloring=True, view=('x', '-z'))
    >>> plt.show() # doctest: +SKIP

    See the [plotting intro](../../generated/gallery/1_plotting/tutorial_plotting_00_intro)
    for more examples.

    See Also
    --------
    [`navis.plot3d`][]
            Use this if you want interactive, perspectively correct renders
            and if you don't need vector graphics as outputs.
    [`navis.plot1d`][]
            A nifty way to visualise neurons in a single dimension.
    [`navis.plot_flat`][]
            Plot neurons as flat structures (e.g. dendrograms).

    """
    # This handles (1) checking for invalid arguments, (2) setting defaults and
    # (3) synonyms
    settings = Matplotlib2dSettings().update_settings(**kwargs)

    _METHOD_OPTIONS = ["2d", "3d", "3d_complex"]
    if settings.method not in _METHOD_OPTIONS:
        raise ValueError(
            f'Unknown method "{settings.method}". Please use either: '
            f'{",".join(_METHOD_OPTIONS)}'
        )

    # Parse objects
    (neurons, volumes, points, _) = utils.parse_objects(x)

    # Here we check whether `color_by` is a neuron property which we
    # want to translate into a single color per neuron, or a
    # per node/vertex property which we will parse late
    color_neurons_by = None
    if settings.color_by is not None and neurons:
        if not settings.palette:
            raise ValueError(
                'Must provide palette (via e.g. `palette="viridis"`) '
                "when using `color_by` argument."
            )

        # Check if this may be a neuron property
        if isinstance(settings.color_by, str):
            # Check if this could be a neuron property
            has_prop = hasattr(neurons[0], settings.color_by)

            # For TreeNeurons, we also check if it is a node property
            # If so, prioritize this.
            if isinstance(neurons[0], core.TreeNeuron):
                if settings.color_by in neurons[0].nodes.columns:
                    has_prop = False

            if has_prop:
                # If it is, use it to color neurons
                color_neurons_by = [
                    getattr(neuron, settings.color_by) for neuron in neurons
                ]
                settings.color_by = None
        elif isinstance(settings.color_by, (list, np.ndarray)):
            if len(settings.color_by) == len(neurons):
                color_neurons_by = settings.color_by
                settings.color_by = None

    # Generate the per-neuron colors
    (neuron_cmap, volumes_cmap) = prepare_colormap(
        settings.color,
        neurons=neurons,
        volumes=volumes,
        palette=settings.palette,
        color_by=color_neurons_by,
        alpha=settings.alpha,
        color_range=1,
    )

    if not isinstance(settings.color_by, type(None)):
        neuron_cmap = vertex_colors(
            neurons,
            by=settings.color_by,
            use_alpha=False,
            palette=settings.palette,
            norm_global=settings.norm_global,
            vmin=settings.vmin,
            vmax=settings.vmax,
            na="raise",
            color_range=1,
        )

    if not isinstance(settings.shade_by, type(None)):
        alphamap = vertex_colors(
            neurons,
            by=settings.shade_by,
            use_alpha=True,
            palette="viridis",  # palette is irrelevant here
            norm_global=settings.norm_global,
            vmin=settings.smin,
            vmax=settings.smax,
            na="raise",
            color_range=1,
        )

        new_colormap = []
        for c, a in zip(neuron_cmap, alphamap):
            if not (isinstance(c, np.ndarray) and c.ndim == 2):
                c = np.tile(c, (a.shape[0], 1))

            if c.shape[1] == 4:
                c[:, 3] = a[:, 3]
            else:
                c = np.insert(c, 3, a[:, 3], axis=1)

            new_colormap.append(c)
        neuron_cmap = new_colormap

    # Generate axes
    if not settings.ax:
        if settings.method == "2d":
            fig, ax = plt.subplots(figsize=settings.figsize)
        elif settings.method in ("3d", "3d_complex"):
            fig = plt.figure(
                figsize=settings.figsize if settings.figsize else plt.figaspect(1) * 1.5
            )
            ax = fig.add_subplot(111, projection="3d")
        # Hide axes
        # ax.set_axis_off()
    else:
        # Check if correct axis were provided
        if not isinstance(settings.ax, mpl.axes.Axes):
            raise TypeError('Ax must be of type "mpl.axes.Axes", ' f'not "{type(ax)}"')
        ax = settings.ax
        fig = ax.get_figure()
        if settings.method in ("3d", "3d_complex") and ax.name != "3d":
            raise TypeError("Axis must be 3d.")
        elif settings.method == "2d" and ax.name == "3d":
            raise TypeError("Axis must be 2d.")

    # Set axis projection
    if settings.method in ("3d", "3d_complex"):
        # This sets the view
        _set_view3d(ax, settings)

        # Some styling:
        # Make background transparent (nicer for dark themes)
        fig.patch.set_alpha(0)
        ax.patch.set_alpha(0)

        # For 3d axes, we also need to set the pane color to transparent
        if hasattr(ax, "zaxis"):
            ax.xaxis.pane.fill = False
            ax.xaxis.pane.set_edgecolor((1, 1, 1, 0))

            ax.yaxis.pane.fill = False
            ax.yaxis.pane.set_edgecolor((1, 1, 1, 0))

            ax.zaxis.pane.set_edgecolor((1, 1, 1, 0))
            ax.zaxis.pane.fill = False

        if settings.orthogonal:
            ax.set_proj_type("ortho")
        else:
            ax.set_proj_type("persp", focal_length=1)  # smaller = more perspective
    else:
        ax.set_aspect("equal")
        _set_view2d(ax, settings)

    # Prepare some stuff for depth coloring
    if settings.depth_coloring and not neurons.empty:
        if settings.method == "3d_complex":
            raise Exception(
                f'Depth coloring unavailable for method "{settings.method}"'
            )
        elif settings.method == "2d":
            bbox = neurons.bbox
            # Add to kwargs
            xy = [v.replace("-", "").replace("+", "") for v in settings.view]
            depth_ix = [v[1] for v in [("x", 0), ("y", 1), ("z", 2)] if v[0] not in xy]

            # We use this to track the normaliser
            settings.norm = plt.Normalize(
                vmin=bbox[depth_ix, 0], vmax=bbox[depth_ix, 1]
            )

    # Plot volumes first
    if volumes:
        for i, v in enumerate(volumes):
            _ = _plot_volume(v, volumes_cmap[i], ax, settings)

    # Create lines from segments
    visuals = {}
    for i, neuron in enumerate(
        config.tqdm(
            neurons,
            desc="Plot neurons",
            leave=False,
            disable=config.pbar_hide | len(neurons) <= 10,
        )
    ):
        if not settings.connectors_only:
            if isinstance(neuron, core.TreeNeuron) and neuron.nodes.empty:
                logger.warning(f"Skipping TreeNeuron w/o nodes: {neuron.label}")
                continue
            if isinstance(neuron, core.TreeNeuron) and neuron.nodes.shape[0] == 1:
                logger.warning(f"Skipping single-node TreeNeuron: {neuron.label}")
                continue
            elif isinstance(neuron, core.MeshNeuron) and neuron.faces.size == 0:
                logger.warning(f"Skipping MeshNeuron w/o faces: {neuron.label}")
                continue
            elif isinstance(neuron, core.Dotprops) and neuron.points.size == 0:
                logger.warning(f"Skipping Dotprops w/o points: {neuron.label}")
                continue

            if isinstance(neuron, core.TreeNeuron) and settings.radius == "auto":
                # Number of nodes with radii
                n_radii = (
                    neuron.nodes.get("radius", pd.Series([])).fillna(0) > 0
                ).sum()
                # If less than 30% of nodes have a radius, we will fall back to lines
                if n_radii / neuron.nodes.shape[0] < 0.3:
                    settings.radius = False

            if isinstance(neuron, core.TreeNeuron) and settings.radius:
                # Warn once if more than 5% of nodes have missing radii
                if not getattr(fig, "_radius_warned", False):
                    if (
                        (neuron.nodes.radius.fillna(0).values <= 0).sum()
                        / neuron.n_nodes
                    ) > 0.05:
                        logger.warning(
                            "Some skeleton nodes have radius <= 0. This may lead to "
                            "rendering artifacts. Set `radius=False` to plot skeletons "
                            "as single-width lines instead."
                        )
                        fig._radius_warned = True

                _neuron = conversion.tree2meshneuron(
                    neuron,
                    warn_missing_radii=False,
                    radius_scale_factor=settings.get("linewidth", 1),
                )
                _neuron.connectors = neuron.connectors
                neuron = _neuron

                # See if we need to map colors to vertices
                if isinstance(neuron_cmap[i], np.ndarray) and neuron_cmap[i].ndim == 2:
                    neuron_cmap[i] = neuron_cmap[i][neuron.vertex_map]

            if isinstance(neuron, core.TreeNeuron):
                lc, sc = _plot_skeleton(neuron, neuron_cmap[i], ax, settings)
                # Keep track of visuals related to this neuron
                visuals[neuron] = {"skeleton": lc, "somata": sc}
            elif isinstance(neuron, core.MeshNeuron):
                m = _plot_mesh(neuron, neuron_cmap[i], ax, settings)
                visuals[neuron] = {"mesh": m}
            elif isinstance(neuron, core.Dotprops):
                dp = _plot_dotprops(neuron, neuron_cmap[i], ax, settings)
                visuals[neuron] = {"dotprop": dp}
            elif isinstance(neuron, core.VoxelNeuron):
                dp = _plot_voxels(
                    neuron,
                    neuron_cmap[i],
                    ax,
                    settings,
                    **settings.scatter_kws,
                )
                visuals[neuron] = {"dotprop": dp}
            else:
                raise TypeError(
                    f"Don't know how to plot neuron of type '{type(neuron)}' "
                )

        if (settings.connectors or settings.connectors_only) and neuron.has_connectors:
            _ = _plot_connectors(neuron, neuron_cmap[i], ax, settings)

    # Plot points
    for p in points:
        _ = _plot_scatter(p, ax, settings)

    # Note: autoscaling is a bitch for 3d. In particular when we use Collections, because
    # these are currently ignored by matplotlib's built-in autoscaling.
    if settings.autoscale:
        ax.autoscale(tight=False)  # tight=False avoids clipping the neurons

        if "3d" in settings.method:
            update_axes3d_bounds(ax)

        # This is apparently still required and has to happen AFTER updating axis bounds
        ax.set_aspect("equal", adjustable="box")

    # Add scalebar after the dust has settled
    if settings.scalebar not in (False, None):
        if not settings.orthogonal:
            raise ValueError("Scalebar only available if `orthogonal=True`.")

        _ = _add_scalebar(settings.scalebar, neurons, ax, settings)

    def set_depth():
        """Set depth information for neurons according to camera position."""
        # Get projected coordinates
        proj_co = proj_points(all_co, ax.get_proj())

        # Get min and max of z coordinates
        z_min, z_max = min(proj_co[:, 2]), max(proj_co[:, 2])

        # Generate a new normaliser
        norm = plt.Normalize(vmin=z_min, vmax=z_max)

        # Go over all neurons and update Z information
        for neuron in visuals:
            # Get this neurons colletion and coordinates
            if "skeleton" in visuals[neuron]:
                c = visuals[neuron]["skeleton"]
                this_co = c._segments3d[:, 0, :]
            elif "mesh" in visuals[neuron]:
                c = visuals[neuron]["mesh"]
                # Note that we only get every third position -> that's because
                # these vectors actually represent faces, i.e. each vertex
                this_co = c._vec.T[::3, [0, 1, 2]]
            else:
                raise ValueError(
                    f"Neither mesh nor skeleton found for neuron {neuron.id}"
                )

            # Get projected coordinates
            this_proj = proj_points(this_co, ax.get_proj())

            # Normalise z coordinates
            ns = norm(this_proj[:, 2]).data

            # Set array
            c.set_array(ns)

            # No need for normaliser - already happened
            c.set_norm(None)

            if isinstance(neuron, core.TreeNeuron) and not isinstance(
                getattr(neuron, "soma", None), type(None)
            ):
                # Get depth of soma(s)
                soma = utils.make_iterable(neuron.soma)
                soma_co = (
                    neuron.nodes.set_index("node_id").loc[soma][["x", "y", "z"]].values
                )
                soma_proj = proj_points(soma_co, ax.get_proj())
                soma_cs = norm(soma_proj[:, 2]).data

                # Set soma color
                for cs, s in zip(soma_cs, visuals[neuron]["somata"]):
                    s.set_color(cmap(cs))

    def Update(event):
        set_depth()

    if settings.depth_coloring:
        if settings.palette:
            cmap = plt.get_cmap(settings.palette)
        else:
            cmap = DEPTH_CMAP
        if settings.method == "2d" and settings.depth_scale:
            sm = ScalarMappable(norm=settings.norm, cmap=cmap)
            fig.colorbar(sm, ax=ax, fraction=0.075, shrink=0.5, label="Depth")
        elif settings.method == "3d":
            # Collect all coordinates
            all_co = []
            for n in visuals:
                if "skeleton" in visuals[n]:
                    all_co.append(visuals[n]["skeleton"]._segments3d[:, 0, :])
                if "mesh" in visuals[n]:
                    all_co.append(visuals[n]["mesh"]._vec.T[:, [0, 1, 2]])

            all_co = np.concatenate(all_co, axis=0)
            fig.canvas.mpl_connect("draw_event", Update)
            set_depth()

    return fig, ax

Generate interactive 3D plot.

Uses either octarine, vispy, k3d or plotly as backend. By default, the choice is automatic depending on what backends are installed and the context:

  • Terminal: octarine > vispy > plotly
  • Jupyter: plotly > octarine > k3d

See the backend parameter on how to change this behavior.

PARAMETER DESCRIPTION
x
          The object(s) to plot. Can be:
            - navis neurons, neuronlists or volumes
            - numpy.array (N,3) is plotted as scatter plot
            - multiple objects can be passed as list (see examples)
          See parameters below for ways to customize the plot.

TYPE: Neuron/List | Volume | numpy.array | list thereof

Object parameters

color : None | str | tuple | list | dict, default=None

              Use single str (e.g. `'red'`) or `(r, g, b)` tuple
              to give all neurons the same color. Use `list` of
              colors to assign colors: `['red', (1, 0, 1), ...].
              Use `dict` to map colors to neurons:
              `{neuron.id: (r, g, b), ...}`.

palette : str | array | list of arrays, default=None

              Name of a matplotlib or seaborn palette. If `color` is
              not specified will pick colors from this palette.

alpha : float [0-1], optional

              Alpha value for neurons. Overriden if alpha is provided
              as color specified in `color` has an alpha channel.

connectors : bool | "presynapses" | "postsynapses" | str | list, default=True

              Plot connectors. This can either be `True` (plot all
              connectors), `"presynapses"` (only presynaptic connectors)
              or `"postsynapses"` (only postsynaptic connectors). If
              a string or a list is provided, it will be used to filter the
              `type` column in the connectors table.

              Use these parameters to adjust the way connectors are plotted:

                - `cn_colors` (str | tuple | dict | "neuron" ) overrides
                  the default connector (e.g. synpase) colors:
                    - single color as str (e.g. `'red'`) or rgb tuple
                      (e.g. `(1, 0, 0)`)
                    - dict mapping the connectors tables `type` column to
                      a color (e.g. `{"pre": (1, 0, 0)}`)
                    - with "neuron", connectors will receive the same color
                      as their neuron
                - `cn_layout` (dict): Layout of the connectors. See
                  `navis.config.default_connector_colors` for options.
                - `cn_size` (float): Size of the connectors.
                - `cn_alpha` (float): Transparency of the connectors.
                - `cn_mesh_colors` (bool): Whether to color the connectors
                  by the neuron's color.

connectors_only : bool, default=False

              Plot only connectors (e.g. synapses) if available and
              ignore the neurons.

color_by : str | array | list of arrays, default = None

              Color neurons by a property. Can be:

                - a list/array of labels, one per each neuron
                - a neuron property (str)
                - a column name in the node table of `TreeNeurons`
                - a list/array of values for each node

              Numerical values will be normalized. You can control
              the normalization by passing a `vmin` and/or `vmax`
              parameter. Must specify a colormap via `palette`.

shade_by : str | array | list of arrays, default=None

              Similar to `color_by` but will affect only the alpha
              channel of the color. If `shade_by='strahler'` will
              compute Strahler order if not already part of the node
              table (TreeNeurons only). Numerical values will be
              normalized. You can control the normalization by passing
              a `smin` and/or `smax` parameter. Does not work with
              `k3d` backend.

radius : bool | "auto", default=False

              If "auto" will plot neurites of `TreeNeurons` with radius
              if they have radii. If True, will try plotting neurites of
              `TreeNeurons` with radius regardless. The radius can be
              scaled by `linewidth`. Note that this will increase rendering
              time.

soma : bool, default=True

              TreeNeurons only: Whether to plot soma if it exists. Size
              of the soma is determined by the neuron's `.soma_radius`
              property which defaults to the "radius" column for
              `TreeNeurons`.

linewidth : float, default=3 for plotly and 1 for all others

              TreeNeurons only.

linestyle : str, default='-'

              TreeNeurons only. Follows the same rules as in matplotlib.

scatter_kws : dict, optional

              Use to modify scatter plots. Accepted parameters are:
                - `size` to adjust size of dots
                - `color` to adjust color
Figure parameters

backend : 'auto' (default) | 'octarine' | 'vispy' | 'plotly' | 'k3d'

              Which backend to use for plotting. Note that there will
              be minor differences in what feature/parameters are
              supported depending on the backend:

                - `auto` selects backend based on availability and
                  context (see above). You can override this by setting an
                  environment variable e.g. `NAVIS_PLOT3D_BACKEND="vispy"`
                  or `NAVIS_PLOT3D_JUPYTER_BACKEND="k3d"`.
                - `octarine` uses WGPU to generate high performances
                  interactive 3D plots. Works both terminal and Jupyter.
                - `vispy` similar to octarine but uses OpenGL: slower
                  but runs on older systems. Works only from terminals.
                - `plotly` generates 3D plots using WebGL. Works
                  "inline" in Jupyter notebooks but can also produce a
                  HTML file that can be opened in any browers.
                - `k3d` generates 3D plots using k3d. Works only in
                  Jupyter notebooks!

Below parameters are for plotly backend only:

fig : plotly.graph_objs.Figure

              Pass to add graph objects to existing plotly figure. Will
              not change layout.

title : str, default=None

              For plotly only! Change plot title.

width/height : int, optional

              Use to adjust figure size.

fig_autosize : bool, default=False

              For plotly only! Autoscale figure size.
              Attention: autoscale overrides width and height

hover_name : bool, default=False

              If True, hovering over neurons will show their label.

hover_id : bool, default=False

              If True, hovering over skeleton nodes will show their ID.

legend : bool, default=True

              Whether or not to show the legend.

legend_orientation : "v" (default) | "h"

              Orientation of the legend. Can be 'h' (horizontal) or 'v'
              (vertical).

legend_group : dict, default=None

              A dictionary mapping neuron IDs to labels (strings).
              Use this to group neurons under a common label in the
              legend.

inline : bool, default=True

              If True and you are in an Jupyter environment, will
              render plotly/k3d plots inline. If False, will generate
              and return either a plotly Figure or a k3d Plot object
              without immediately showing it.

Below parameters are for the Octarine/vispy backends only:

clear : bool, default = False

              If True, will clear the viewer before adding the new
              objects.

center : bool, default = True

              If True, will center camera on the newly added objects.

combine : bool, default = False

              If True, will combine objects of the same type into a
              single visual. This can greatly improve performance but
              also means objects can't be selected individually
              anymore. This is Vispy only.

size : (width, height) tuple, optional

              Use to adjust figure/window size.

show : bool, default=True

              Whether to immediately show the viewer.
RETURNS DESCRIPTION
If `backend='octarine'`

From terminal: opens a 3D window and returns :class:octarine.Viewer. From Jupyter: :class:octarine.Viewer displayed in an ipywidget.

If `backend='vispy'`

Opens a 3D window and returns navis.Viewer.

If `backend='plotly'`

Returns either None if you are in a Jupyter notebook (see also inline parameter) or a plotly.graph_objects.Figure (see examples).

If `backend='k3d'`

Returns either None and immediately displays the plot or a k3d.plot object that you can manipulate further (see inline parameter).

See Also

octarine.Viewer Interactive 3D viewer.

navis.Viewer Interactive vispy 3D viewer.

Examples:

>>> import navis

In a Jupyter notebook using plotly as backend:

>>> nl = navis.example_neurons()
>>> # Backend is automatically chosen but we can set it explicitly
>>> # Plot inline
>>> nl.plot3d(backend='plotly')
>>> # Plot as separate html in a new window
>>> fig = nl.plot3d(backend='plotly', inline=False)
>>> import plotly.offline
>>> _ = plotly.offline.plot(fig)

In a Jupyter notebook using k3d as backend:

>>> nl = navis.example_neurons()
>>> # Plot inline
>>> nl.plot3d(backend='k3d')

In a terminal using octarine as backend:

>>> # Plot list of neurons
>>> nl = navis.example_neurons()
>>> v = navis.plot3d(nl, backend='octarine')
>>> # Clear canvas
>>> navis.clear3d()

Some more advanced examples:

>>> # plot3d() can deal with combinations of objects
>>> nl = navis.example_neurons()
>>> vol = navis.example_volume('LH')
>>> vol.color = (255, 0, 0, .5)
>>> # This plots a neuronlists, a single neuron and a volume
>>> v = navis.plot3d([nl[0:2], nl[3], vol])
>>> # Clear viewer (works only with octarine and vispy)
>>> v = navis.plot3d(nl, clear=True)

See the plotting intro for even more examples.

Source code in navis/plotting/ddd.py
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
def plot3d(
    x: Union[
        core.NeuronObject,
        core.Volume,
        np.ndarray,
        List[Union[core.NeuronObject, np.ndarray, core.Volume]],
    ],
    **kwargs,
):
    """Generate interactive 3D plot.

    Uses either [octarine], [vispy], [k3d] or [plotly] as backend.
    By default, the choice is automatic depending on what backends
    are installed and the context:

      - Terminal: octarine > vispy > plotly
      - Jupyter: plotly > octarine > k3d

    See the `backend` parameter on how to change this behavior.

    [octarine]: https://schlegelp.github.io/octarine/
    [vispy]: http://vispy.org
    [k3d]: https://k3d-jupyter.org/
    [plotly]: http://plot.ly

    Parameters
    ----------
    x :               Neuron/List | Volume | numpy.array | list thereof
                      The object(s) to plot. Can be:
                        - navis neurons, neuronlists or volumes
                        - numpy.array (N,3) is plotted as scatter plot
                        - multiple objects can be passed as list (see examples)
                      See parameters below for ways to customize the plot.

    Object parameters
    -----------------
    color :           None | str | tuple | list | dict, default=None

                      Use single str (e.g. `'red'`) or `(r, g, b)` tuple
                      to give all neurons the same color. Use `list` of
                      colors to assign colors: `['red', (1, 0, 1), ...].
                      Use `dict` to map colors to neurons:
                      `{neuron.id: (r, g, b), ...}`.

    palette :         str | array | list of arrays, default=None

                      Name of a matplotlib or seaborn palette. If `color` is
                      not specified will pick colors from this palette.

    alpha :           float [0-1], optional

                      Alpha value for neurons. Overriden if alpha is provided
                      as color specified in `color` has an alpha channel.

    connectors :      bool | "presynapses" | "postsynapses" | str | list, default=True

                      Plot connectors. This can either be `True` (plot all
                      connectors), `"presynapses"` (only presynaptic connectors)
                      or `"postsynapses"` (only postsynaptic connectors). If
                      a string or a list is provided, it will be used to filter the
                      `type` column in the connectors table.

                      Use these parameters to adjust the way connectors are plotted:

                        - `cn_colors` (str | tuple | dict | "neuron" ) overrides
                          the default connector (e.g. synpase) colors:
                            - single color as str (e.g. `'red'`) or rgb tuple
                              (e.g. `(1, 0, 0)`)
                            - dict mapping the connectors tables `type` column to
                              a color (e.g. `{"pre": (1, 0, 0)}`)
                            - with "neuron", connectors will receive the same color
                              as their neuron
                        - `cn_layout` (dict): Layout of the connectors. See
                          `navis.config.default_connector_colors` for options.
                        - `cn_size` (float): Size of the connectors.
                        - `cn_alpha` (float): Transparency of the connectors.
                        - `cn_mesh_colors` (bool): Whether to color the connectors
                          by the neuron's color.

    connectors_only : bool, default=False

                      Plot only connectors (e.g. synapses) if available and
                      ignore the neurons.

    color_by :        str | array | list of arrays, default = None

                      Color neurons by a property. Can be:

                        - a list/array of labels, one per each neuron
                        - a neuron property (str)
                        - a column name in the node table of `TreeNeurons`
                        - a list/array of values for each node

                      Numerical values will be normalized. You can control
                      the normalization by passing a `vmin` and/or `vmax`
                      parameter. Must specify a colormap via `palette`.

    shade_by :        str | array | list of arrays, default=None

                      Similar to `color_by` but will affect only the alpha
                      channel of the color. If `shade_by='strahler'` will
                      compute Strahler order if not already part of the node
                      table (TreeNeurons only). Numerical values will be
                      normalized. You can control the normalization by passing
                      a `smin` and/or `smax` parameter. Does not work with
                      `k3d` backend.

    radius :          bool | "auto", default=False

                      If "auto" will plot neurites of `TreeNeurons` with radius
                      if they have radii. If True, will try plotting neurites of
                      `TreeNeurons` with radius regardless. The radius can be
                      scaled by `linewidth`. Note that this will increase rendering
                      time.

    soma :            bool, default=True

                      TreeNeurons only: Whether to plot soma if it exists. Size
                      of the soma is determined by the neuron's `.soma_radius`
                      property which defaults to the "radius" column for
                      `TreeNeurons`.

    linewidth :       float, default=3 for plotly and 1 for all others

                      TreeNeurons only.

    linestyle :       str, default='-'

                      TreeNeurons only. Follows the same rules as in matplotlib.

    scatter_kws :     dict, optional

                      Use to modify scatter plots. Accepted parameters are:
                        - `size` to adjust size of dots
                        - `color` to adjust color

    Figure parameters
    -----------------
    backend :         'auto' (default) | 'octarine' | 'vispy' | 'plotly' | 'k3d'

                      Which backend to use for plotting. Note that there will
                      be minor differences in what feature/parameters are
                      supported depending on the backend:

                        - `auto` selects backend based on availability and
                          context (see above). You can override this by setting an
                          environment variable e.g. `NAVIS_PLOT3D_BACKEND="vispy"`
                          or `NAVIS_PLOT3D_JUPYTER_BACKEND="k3d"`.
                        - `octarine` uses WGPU to generate high performances
                          interactive 3D plots. Works both terminal and Jupyter.
                        - `vispy` similar to octarine but uses OpenGL: slower
                          but runs on older systems. Works only from terminals.
                        - `plotly` generates 3D plots using WebGL. Works
                          "inline" in Jupyter notebooks but can also produce a
                          HTML file that can be opened in any browers.
                        - `k3d` generates 3D plots using k3d. Works only in
                          Jupyter notebooks!

    **Below parameters are for plotly backend only:**

    fig :             plotly.graph_objs.Figure

                      Pass to add graph objects to existing plotly figure. Will
                      not change layout.

    title :           str, default=None

                      For plotly only! Change plot title.

    width/height :    int, optional

                      Use to adjust figure size.

    fig_autosize :    bool, default=False

                      For plotly only! Autoscale figure size.
                      Attention: autoscale overrides width and height

    hover_name :      bool, default=False

                      If True, hovering over neurons will show their label.

    hover_id :        bool, default=False

                      If True, hovering over skeleton nodes will show their ID.

    legend :          bool, default=True

                      Whether or not to show the legend.

    legend_orientation : "v" (default) | "h"

                      Orientation of the legend. Can be 'h' (horizontal) or 'v'
                      (vertical).

    legend_group :    dict, default=None

                      A dictionary mapping neuron IDs to labels (strings).
                      Use this to group neurons under a common label in the
                      legend.

    inline :          bool, default=True

                      If True and you are in an Jupyter environment, will
                      render plotly/k3d plots inline. If False, will generate
                      and return either a plotly Figure or a k3d Plot object
                      without immediately showing it.

    **Below parameters are for the Octarine/vispy backends only:**

    clear :           bool, default = False

                      If True, will clear the viewer before adding the new
                      objects.

    center :          bool, default = True

                      If True, will center camera on the newly added objects.

    combine :         bool, default = False

                      If True, will combine objects of the same type into a
                      single visual. This can greatly improve performance but
                      also means objects can't be selected individually
                      anymore. This is Vispy only.

    size :            (width, height) tuple, optional

                      Use to adjust figure/window size.

    show :            bool, default=True

                      Whether to immediately show the viewer.

    Returns
    -------
    If `backend='octarine'`

        From terminal: opens a 3D window and returns :class:`octarine.Viewer`.
        From Jupyter: :class:`octarine.Viewer` displayed in an ipywidget.

    If `backend='vispy'`

        Opens a 3D window and returns [`navis.Viewer`][].

    If `backend='plotly'`

        Returns either `None` if you are in a Jupyter notebook (see also
        `inline` parameter) or a `plotly.graph_objects.Figure`
        (see examples).

    If `backend='k3d'`

        Returns either `None` and immediately displays the plot or a
        `k3d.plot` object that you can manipulate further (see `inline`
        parameter).

    See Also
    --------
    [`octarine.Viewer`](https://schlegelp.github.io/octarine/)
        Interactive 3D viewer.

    [`navis.Viewer`][]
        Interactive vispy 3D viewer.

    Examples
    --------
    >>> import navis

    In a Jupyter notebook using plotly as backend:

    >>> nl = navis.example_neurons()
    >>> # Backend is automatically chosen but we can set it explicitly
    >>> # Plot inline
    >>> nl.plot3d(backend='plotly')                             # doctest: +SKIP
    >>> # Plot as separate html in a new window
    >>> fig = nl.plot3d(backend='plotly', inline=False)
    >>> import plotly.offline
    >>> _ = plotly.offline.plot(fig)                            # doctest: +SKIP

    In a Jupyter notebook using k3d as backend:

    >>> nl = navis.example_neurons()
    >>> # Plot inline
    >>> nl.plot3d(backend='k3d')                                # doctest: +SKIP

    In a terminal using octarine as backend:

    >>> # Plot list of neurons
    >>> nl = navis.example_neurons()
    >>> v = navis.plot3d(nl, backend='octarine')                # doctest: +SKIP
    >>> # Clear canvas
    >>> navis.clear3d()

    Some more advanced examples:

    >>> # plot3d() can deal with combinations of objects
    >>> nl = navis.example_neurons()
    >>> vol = navis.example_volume('LH')
    >>> vol.color = (255, 0, 0, .5)
    >>> # This plots a neuronlists, a single neuron and a volume
    >>> v = navis.plot3d([nl[0:2], nl[3], vol])
    >>> # Clear viewer (works only with octarine and vispy)
    >>> v = navis.plot3d(nl, clear=True)

    See the [plotting intro](../../generated/gallery/1_plotting/tutorial_plotting_00_intro)
    for even more examples.

    """
    # Select backend
    backend = kwargs.pop("backend", "auto")
    allowed_backends = ("auto", "octarine", "vispy", "plotly", "k3d")
    if backend.lower() == "auto":
        global AUTO_BACKEND
        if AUTO_BACKEND is not None:
            backend = AUTO_BACKEND
        else:
            if utils.is_jupyter():
                if not len(JUPYTER_BACKENDS):
                    raise ModuleNotFoundError(
                        "No 3D plotting backends available for Jupyter "
                        "environment. Please install one of the following: "
                        "plotly, octarine, k3d."
                    )
                backend = os.environ.get(
                    "NAVIS_PLOT3D_JUPYTER_BACKEND", JUPYTER_BACKENDS[0]
                )
            else:
                if not len(NON_JUPYTER_BACKENDS):
                    raise ModuleNotFoundError(
                        "No 3D plotting backends available for REPL/script. Please "
                        "install one of the following: octarine, vispy, plotly."
                    )
                backend = os.environ.get(
                    "NAVIS_PLOT3D_BACKEND", NON_JUPYTER_BACKENDS[0]
                )

            # Set the backend for the next time
            AUTO_BACKEND = backend

            logger.info(f'Using "{backend}" backend for 3D plotting.')
    elif backend.lower() not in allowed_backends:
        raise ValueError(
            f'Unknown backend "{backend}". ' f'Permitted: {".".join(allowed_backends)}.'
        )
    elif backend.lower() not in BACKENDS:
        raise ModuleNotFoundError(
            f'Backend "{backend}" not installed. Please install it via pip '
            "(see https://navis.readthedocs.io/en/latest/source/install.html#optional-dependencies "
            "for more information)."
        )

    if backend == "vispy":
        return plot3d_vispy(x, **kwargs)
    elif backend == "k3d":
        if not utils.is_jupyter():
            logger.warning("k3d backend only works in Jupyter environments")
        return plot3d_k3d(x, **kwargs)
    elif backend == "plotly":
        return plot3d_plotly(x, **kwargs)
    elif backend == "octarine":
        return plot3d_octarine(x, **kwargs)
    else:
        raise ValueError(
            f'Unknown backend "{backend}". ' f'Permitted: {".".join(allowed_backends)}.'
        )

Plot neuron as flat diagrams.

PARAMETER DESCRIPTION
x
                A single neuron to plot.

TYPE: TreeNeuron

layout
                Layout to use. All but 'subway' require graphviz to
                be installed. For the 'fdp' and 'neato' it is highly
                recommended to downsample the neuron first.

TYPE: 'subway' | 'dot' | 'neato' | 'fdp' | 'sfpd' | 'twopi' | 'circo' DEFAULT: 'subway'

connectors
                If True (and neuron actually has connectors), will plot
                connectors.

TYPE: bool DEFAULT: False

highlight_connectors
                Will highlight these connector IDs.

TYPE: list of connector IDs DEFAULT: None

ax
                Ax to plot on. Will create new one if not provided.

TYPE: matplotlib.ax DEFAULT: None

shade_by_length
                Change shade of branch with length. For layout
                "subway" only.

TYPE: bool DEFAULT: False

normalize_distance
                If True, will normalise all distances to the longest
                neurite. For layout "subway" only.

TYPE: bool DEFAULT: False

**kwargs
                Keyword argument passed on to the respective
                plotting functions.

DEFAULT: {}

RETURNS DESCRIPTION
ax

TYPE: matplotlib.ax

pos

(X, Y) positions for each node: {node_id: (x, y)}.

TYPE: dict

Examples:

Plot neuron in "subway" layout:

>>> import navis
>>> n = navis.example_neurons(1).convert_units('nm')
>>> ax, pos = navis.plot_flat(n, layout='subway',
...                           figsize=(12, 2),
...                           connectors=True)
>>> _ = ax.set_xlabel('distance [nm]')
>>> plt.show()

Plot neuron in "dot" layout (requires pygraphviz and graphviz):

>>> # First downsample to speed up processing
>>> ds = navis.downsample_neuron(n, 10, preserve_nodes='connectors')
>>> ax, pos = navis.plot_flat(ds, layout='dot', connectors=True)
>>> plt.show()

To close all figures (only for doctests)

>>> plt.close('all')

See the plotting intro and the neuron topology tutorial for more examples.

Source code in navis/plotting/flat.py
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
def plot_flat(
    x,
    layout: Union[
        Literal["subway"],
        Literal["dot"],
        Literal["neato"],
        Literal["fpd"],
        Literal["sfpd"],
        Literal["twopi"],
        Literal["circo"],
    ] = "subway",
    connectors: bool = False,
    highlight_connectors: Optional[List[int]] = None,
    shade_by_length: bool = False,
    normalize_distance: bool = False,
    reroot_soma: bool = False,
    ax: Optional[Any] = None,
    **kwargs,
):
    """Plot neuron as flat diagrams.

    Parameters
    ----------
    x :                     TreeNeuron
                            A single neuron to plot.
    layout :                'subway' | 'dot' | 'neato' | 'fdp' | 'sfpd' | 'twopi' | 'circo'
                            Layout to use. All but 'subway' require graphviz to
                            be installed. For the 'fdp' and 'neato' it is highly
                            recommended to downsample the neuron first.
    connectors :            bool
                            If True (and neuron actually has connectors), will plot
                            connectors.
    highlight_connectors :  list of connector IDs, optional
                            Will highlight these connector IDs.
    ax :                    matplotlib.ax, optional
                            Ax to plot on. Will create new one if not provided.
    shade_by_length :       bool, optional
                            Change shade of branch with length. For layout
                            "subway" only.
    normalize_distance :    bool, optional
                            If True, will normalise all distances to the longest
                            neurite. For layout "subway" only.
    **kwargs
                            Keyword argument passed on to the respective
                            plotting functions.

    Returns
    -------
    ax :                    matplotlib.ax
    pos :                   dict
                            (X, Y) positions for each node: `{node_id: (x, y)}`.


    Examples
    --------
    Plot neuron in "subway" layout:

    >>> import navis
    >>> n = navis.example_neurons(1).convert_units('nm')
    >>> ax, pos = navis.plot_flat(n, layout='subway',
    ...                           figsize=(12, 2),
    ...                           connectors=True)
    >>> _ = ax.set_xlabel('distance [nm]')
    >>> plt.show() # doctest: +SKIP

    Plot neuron in "dot" layout (requires pygraphviz and graphviz):

    >>> # First downsample to speed up processing
    >>> ds = navis.downsample_neuron(n, 10, preserve_nodes='connectors')
    >>> ax, pos = navis.plot_flat(ds, layout='dot', connectors=True) # doctest: +SKIP
    >>> plt.show()                                                   # doctest: +SKIP

    To close all figures (only for doctests)

    >>> plt.close('all')

    See the [plotting intro](../../generated/gallery/1_plotting/tutorial_plotting_00_intro)
    and the [neuron topology tutorial](../../generated/gallery/1_plotting/tutorial_plotting_03_dend)
    for more examples.

    """
    if isinstance(x, core.NeuronList) and len(x) == 1:
        x = x[0]

    utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,))
    utils.eval_param(
        layout,
        name="layout",
        allowed_values=("subway", "dot", "neato", "fdp", "sfdp", "twopi", "circo"),
    )

    # Work on the copy of the neuron
    x = x.copy()

    # Reroot to soma (if applicable)
    if reroot_soma and x.soma:
        x.reroot(x.soma, inplace=True)

    if layout == "subway":
        return _plot_subway(
            x,
            connectors=connectors,
            highlight_connectors=highlight_connectors,
            shade_by_length=shade_by_length,
            normalize_distance=normalize_distance,
            ax=ax,
            **kwargs,
        )
    else:
        return _plot_force(
            x,
            prog=layout,
            connectors=connectors,
            highlight_connectors=highlight_connectors,
            ax=ax,
            **kwargs,
        )

Remove the last item added to the 3D canvas.

Source code in navis/plotting/vispy/vputils.py
64
65
66
67
def pop3d():
    """Remove the last item added to the 3D canvas."""
    viewer = get_viewer()
    viewer.pop()

Prune all neurites past a given distance from a source.

PARAMETER DESCRIPTION
x

TYPE: TreeNeuron | MeshNeuron | NeuronList

depth
        Distance from source at which to start pruning. If neuron
        has its `.units` set, you can also pass this as a string such
        as "50 microns".

TYPE: int | float | str

source
        Source node for depth calculation. If `None`, will use
        root (first root if multiple). If `x` is a
        list of neurons then must provide a source for each neuron.

TYPE: int DEFAULT: None

inplace
        If False, pruning is performed on copy of original neuron
        which is then returned.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

Pruned neuron(s).

Examples:

>>> import navis
>>> n = navis.example_neurons(2)
>>> # Reroot to soma
>>> n.reroot(n.soma, inplace=True)
>>> # Prune all twigs farther from the root than 100 microns
>>> # (example neuron are in 8x8x8nm units)
>>> n_pr = navis.prune_at_depth(n,
...                             depth=100e3 / 8,
...                             inplace=False)
>>> all(n.n_nodes > n_pr.n_nodes)
True
Source code in navis/morpho/manipulation.py
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
@utils.map_neuronlist(desc="Pruning", must_zip=["source"], allow_parallel=True)
@utils.meshneuron_skeleton(method="subset")
def prune_at_depth(
    x: NeuronObject,
    depth: Union[float, int],
    *,
    source: Optional[int] = None,
    inplace: bool = False,
) -> Optional[NeuronObject]:
    """Prune all neurites past a given distance from a source.

    Parameters
    ----------
    x :             TreeNeuron | MeshNeuron | NeuronList
    depth :         int | float | str
                    Distance from source at which to start pruning. If neuron
                    has its `.units` set, you can also pass this as a string such
                    as "50 microns".
    source :        int, optional
                    Source node for depth calculation. If `None`, will use
                    root (first root if multiple). If `x` is a
                    list of neurons then must provide a source for each neuron.
    inplace :       bool, optional
                    If False, pruning is performed on copy of original neuron
                    which is then returned.

    Returns
    -------
    TreeNeuron/List
                    Pruned neuron(s).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(2)
    >>> # Reroot to soma
    >>> n.reroot(n.soma, inplace=True)
    >>> # Prune all twigs farther from the root than 100 microns
    >>> # (example neuron are in 8x8x8nm units)
    >>> n_pr = navis.prune_at_depth(n,
    ...                             depth=100e3 / 8,
    ...                             inplace=False)
    >>> all(n.n_nodes > n_pr.n_nodes)
    True

    """
    # The decorator makes sure that at this point we only have single neurons
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Expected TreeNeuron, got {type(x)}")

    depth = x.map_units(depth, on_error="raise")
    if depth < 0:
        raise ValueError(f'`depth` must be > 0, got "{depth}"')

    if isinstance(source, type(None)):
        source = x.root[0]
    elif source not in x.nodes.node_id.values:
        raise ValueError(f'Source "{source}" not among nodes')

    # Get distance from source
    dist = graph.geodesic_matrix(x, from_=source, directed=False, limit=depth)
    keep = dist.columns[dist.values[0] < np.inf]

    if not inplace:
        x = x.copy()

    _ = subset.subset_neuron(x, subset=keep, inplace=True)

    return x

Prune neuron based on Strahler order.

PARAMETER DESCRIPTION
x
        Neuron(s) to prune.

TYPE: TreeNeuron | MeshNeuron | NeuronList

to_prune
        Strahler indices (SI) to prune. For example:
          1. `to_prune=1` removes all leaf branches
          2. `to_prune=[1, 2]` removes SI 1 and 2
          3. `to_prune=range(1, 4)` removes SI 1, 2 and 3
          4. `to_prune=slice(0, -1)` removes everything but the
             highest SI
          5. `to_prune=slice(-1, None)` removes only the highest
             SI

TYPE: int | list | range | slice

reroot_soma
        If True, neuron will be rerooted to its soma.

TYPE: bool DEFAULT: True

inplace
        If False, pruning is performed on copy of original neuron
        which is then returned.

TYPE: bool DEFAULT: False

force_strahler_update
                If True, will force update of Strahler order even
                if already exists in node table.

TYPE: bool DEFAULT: False

relocate_connectors
              If True, connectors on removed nodes will be
              reconnected to the closest still existing node.
              Works only in child->parent direction.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

Pruned neuron(s).

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> n_pr = navis.prune_by_strahler(n, to_prune=1, inplace=False)
>>> n.n_nodes > n_pr.n_nodes
True
Source code in navis/morpho/manipulation.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
@utils.map_neuronlist(desc="Pruning", allow_parallel=True)
@utils.meshneuron_skeleton(method="subset")
def prune_by_strahler(
    x: NeuronObject,
    to_prune: Union[int, List[int], range, slice],
    inplace: bool = False,
    reroot_soma: bool = True,
    force_strahler_update: bool = False,
    relocate_connectors: bool = False,
) -> NeuronObject:
    """Prune neuron based on [Strahler order](https://en.wikipedia.org/wiki/Strahler_number).

    Parameters
    ----------
    x :             TreeNeuron | MeshNeuron | NeuronList
                    Neuron(s) to prune.
    to_prune :      int | list | range | slice
                    Strahler indices (SI) to prune. For example:
                      1. `to_prune=1` removes all leaf branches
                      2. `to_prune=[1, 2]` removes SI 1 and 2
                      3. `to_prune=range(1, 4)` removes SI 1, 2 and 3
                      4. `to_prune=slice(0, -1)` removes everything but the
                         highest SI
                      5. `to_prune=slice(-1, None)` removes only the highest
                         SI
    reroot_soma :   bool, optional
                    If True, neuron will be rerooted to its soma.
    inplace :       bool, optional
                    If False, pruning is performed on copy of original neuron
                    which is then returned.
    force_strahler_update : bool, optional
                            If True, will force update of Strahler order even
                            if already exists in node table.
    relocate_connectors : bool, optional
                          If True, connectors on removed nodes will be
                          reconnected to the closest still existing node.
                          Works only in child->parent direction.

    Returns
    -------
    TreeNeuron/List
                    Pruned neuron(s).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n_pr = navis.prune_by_strahler(n, to_prune=1, inplace=False)
    >>> n.n_nodes > n_pr.n_nodes
    True

    """
    # The decorator makes sure that at this point we have single neurons
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Expected TreeNeuron(s), got {type(x)}")

    # Make a copy if necessary before making any changes
    neuron = x
    if not inplace:
        neuron = neuron.copy()

    if reroot_soma and not isinstance(neuron.soma, type(None)):
        neuron.reroot(neuron.soma, inplace=True)

    if "strahler_index" not in neuron.nodes or force_strahler_update:
        mmetrics.strahler_index(neuron)

    # Prepare indices
    if isinstance(to_prune, int) and to_prune < 0:
        to_prune = range(1, int(neuron.nodes.strahler_index.max() + (to_prune + 1)))

    if isinstance(to_prune, int):
        if to_prune < 1:
            raise ValueError(
                "SI to prune must be positive. Please see docs"
                "for additional options."
            )
        to_prune = [to_prune]
    elif isinstance(to_prune, range):
        to_prune = list(to_prune)
    elif isinstance(to_prune, slice):
        SI_range = range(1, int(neuron.nodes.strahler_index.max() + 1))
        to_prune = list(SI_range)[to_prune]

    # Prepare parent dict if needed later
    if relocate_connectors:
        parent_dict = {tn.node_id: tn.parent_id for tn in neuron.nodes.itertuples()}

    # Avoid setting the nodes as this potentiall triggers a regeneration
    # of the graph which in turn will raise an error because some nodes might
    # still have parents that don't exist anymore
    neuron._nodes = neuron._nodes[
        ~neuron._nodes.strahler_index.isin(to_prune)
    ].reset_index(drop=True, inplace=False)

    if neuron.has_connectors:
        if not relocate_connectors:
            neuron._connectors = neuron._connectors[
                neuron._connectors.node_id.isin(neuron._nodes.node_id.values)
            ].reset_index(drop=True, inplace=False)
        else:
            remaining_tns = set(neuron._nodes.node_id.values)
            for cn in neuron._connectors[
                ~neuron.connectors.node_id.isin(neuron._nodes.node_id.values)
            ].itertuples():
                this_tn = parent_dict[cn.node_id]
                while True:
                    if this_tn in remaining_tns:
                        break
                    this_tn = parent_dict[this_tn]
                neuron._connectors.loc[cn.Index, "node_id"] = this_tn

    # Reset indices of node and connector tables (important for igraph!)
    neuron._nodes.reset_index(inplace=True, drop=True)

    if neuron.has_connectors:
        neuron._connectors.reset_index(inplace=True, drop=True)

    # Theoretically we can end up with disconnected pieces, i.e. with more
    # than 1 root node -> we have to fix the nodes that lost their parents
    neuron._nodes.loc[
        ~neuron._nodes.parent_id.isin(neuron._nodes.node_id.values), "parent_id"
    ] = -1

    # Remove temporary attributes
    neuron._clear_temp_attr()

    return neuron

Prune terminal twigs under a given size.

By default this function will simply drop all terminal twigs shorter than size. This is very fast but rather stupid: for example, if a twig is just 1 nanometer longer than size it will not be touched at all. If you require precision, set exact=True which will prune exactly size off the terminals but is about an order of magnitude slower.

PARAMETER DESCRIPTION
x

TYPE: TreeNeuron | MeshNeuron | NeuronList

size
        Twigs shorter than this will be pruned. If the neuron has
        its `.units` set, you can also pass a string including the
        units, e.g. '5 microns'.

TYPE: int | float | str

exact
        See notes above.

TYPE: bool DEFAULT: False

mask
        Either a boolean mask, a list of node IDs or a callable taking
        a neuron as input and returning one of the former. If provided,
        only nodes that are in the mask will be considered for pruning.

TYPE: iterable | callable DEFAULT: None

inplace
        If False, pruning is performed on copy of original neuron
        which is then returned.

TYPE: bool DEFAULT: False

recursive
        If `int` will undergo that many rounds of recursive
        pruning. If True will prune iteratively until no more
        terminal twigs under the given size are left. Only
        relevant if `exact=False`.

TYPE: int | bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

Pruned neuron(s).

Examples:

Simple pruning

>>> import navis
>>> n = navis.example_neurons(2)
>>> # Prune twigs smaller than 5 microns
>>> # (example neuron are in 8x8x8nm units)
>>> n_pr = navis.prune_twigs(n,
...                          size=5000 / 8,
...                          recursive=float('inf'),
...                          inplace=False)
>>> all(n.n_nodes > n_pr.n_nodes)
True

Exact pruning

>>> n = navis.example_neurons(1)
>>> # Prune twigs by exactly 5 microns
>>> # (example neuron are in 8x8x8nm units)
>>> n_pr = navis.prune_twigs(n,
...                          size=5000 / 8,
...                          exact=True,
...                          inplace=False)
>>> n.n_nodes > n_pr.n_nodes
True

Prune using units

>>> import navis
>>> n = navis.example_neurons(1)
>>> # Example neurons are in 8x8x8nm units...
>>> n.units
<Quantity(8, 'nanometer')>
>>> # ... therefore we can use units for `size`
>>> n_pr = navis.prune_twigs(n,
...                          size='5 microns',
...                          inplace=False)
>>> n.n_nodes > n_pr.n_nodes
True
Source code in navis/morpho/manipulation.py
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
@utils.map_neuronlist(desc="Pruning", allow_parallel=True)
@utils.meshneuron_skeleton(method="subset")
def prune_twigs(
    x: NeuronObject,
    size: Union[float, str],
    exact: bool = False,
    mask: Optional[Union[Sequence[int], Callable]] = None,
    inplace: bool = False,
    recursive: Union[int, bool, float] = False,
) -> NeuronObject:
    """Prune terminal twigs under a given size.

    By default this function will simply drop all terminal twigs shorter than
    `size`. This is very fast but rather stupid: for example, if a twig is
    just 1 nanometer longer than `size` it will not be touched at all. If you
    require precision, set `exact=True` which will prune *exactly* `size`
    off the terminals but is about an order of magnitude slower.

    Parameters
    ----------
    x :             TreeNeuron | MeshNeuron | NeuronList
    size :          int | float | str
                    Twigs shorter than this will be pruned. If the neuron has
                    its `.units` set, you can also pass a string including the
                    units, e.g. '5 microns'.
    exact:          bool
                    See notes above.
    mask :          iterable | callable, optional
                    Either a boolean mask, a list of node IDs or a callable taking
                    a neuron as input and returning one of the former. If provided,
                    only nodes that are in the mask will be considered for pruning.
    inplace :       bool, optional
                    If False, pruning is performed on copy of original neuron
                    which is then returned.
    recursive :     int | bool, optional
                    If `int` will undergo that many rounds of recursive
                    pruning. If True will prune iteratively until no more
                    terminal twigs under the given size are left. Only
                    relevant if `exact=False`.

    Returns
    -------
    TreeNeuron/List
                    Pruned neuron(s).

    Examples
    --------
    Simple pruning

    >>> import navis
    >>> n = navis.example_neurons(2)
    >>> # Prune twigs smaller than 5 microns
    >>> # (example neuron are in 8x8x8nm units)
    >>> n_pr = navis.prune_twigs(n,
    ...                          size=5000 / 8,
    ...                          recursive=float('inf'),
    ...                          inplace=False)
    >>> all(n.n_nodes > n_pr.n_nodes)
    True

    Exact pruning

    >>> n = navis.example_neurons(1)
    >>> # Prune twigs by exactly 5 microns
    >>> # (example neuron are in 8x8x8nm units)
    >>> n_pr = navis.prune_twigs(n,
    ...                          size=5000 / 8,
    ...                          exact=True,
    ...                          inplace=False)
    >>> n.n_nodes > n_pr.n_nodes
    True

    Prune using units

    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> # Example neurons are in 8x8x8nm units...
    >>> n.units
    <Quantity(8, 'nanometer')>
    >>> # ... therefore we can use units for `size`
    >>> n_pr = navis.prune_twigs(n,
    ...                          size='5 microns',
    ...                          inplace=False)
    >>> n.n_nodes > n_pr.n_nodes
    True

    """
    # The decorator makes sure that at this point we have single neurons
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Expected TreeNeuron(s), got {type(x)}")

    # Convert to neuron units - numbers will be passed through
    size = x.map_units(size, on_error="raise")

    if not exact:
        return _prune_twigs_simple(
            x, size=size, inplace=inplace, recursive=recursive, mask=mask
        )
    else:
        return _prune_twigs_precise(x, size=size, mask=mask, inplace=inplace)

Read Neuron/List from Hdf5 file.

This import is following the schema specified here

PARAMETER DESCRIPTION
filepath
            Path to HDF5 file.

TYPE: filepath

read
            The HDF5 file might contain skeleton, dotprops and/or
            mesh representations for any given neuron. This
            parameter determines which one are returned. Some
            illustrative examples:

              - 'mesh', 'skeleton' or 'dotprops' will return only
                the given representation
              - 'mesh->skeleton->dotprops' will return a mesh if the
                neuron has one, a skeleton if it does not and
                dotprops if it has neither mesh nor skeleton
              - 'mesh,skeleton,dotprops' will return all available
                representations
              - 'mesh,dotprops' will only return meshes and dotprops
              - 'mesh,skeleton->dotprops' will return the mesh
                and a skeleton or alternatively the dotprops

            Note that neurons which have none of the requested
            representations are silently skipped!

TYPE: str DEFAULT: 'mesh->skeleton->dotprops'

subset
            If provided, will read only a subset of neurons from the
            file. IDs that don't exist are silently ignored. Also
            note that due to HDF5 restrictions numeric IDs will be
            converted to strings.

TYPE: list of IDs | slice DEFAULT: None

prefer_raw
            If True and a neuron is saved as both serialized and
            raw data, will load the neuron from the raw data.

TYPE: bool DEFAULT: False

parallel
            Defaults to `auto` which means only use parallel
            processing if more than 200 neurons are imported.
            Spawning and joining processes causes overhead and is
            considerably slower for imports of small numbers of
            neurons. Integer will be interpreted as the
            number of cores (otherwise defaults to
            `os.cpu_count() - 2`).

TYPE: "auto" | bool | int DEFAULT: 'auto'

on_error
            What to do if a neuron can not be parsed: "stop" and
            raise an exception, "warn" and keep going or silently
            "ignore" and skip.

TYPE: "stop" | "warn" | "ignore" DEFAULT: 'stop'

ret_errors
            If True, will also return a list of errors encountered
            while parsing the neurons.

TYPE: bool DEFAULT: False

Only

annotations
            Whether to load annotations associated with the
            neuron(s):

             - `True` reads all annotations
             - `False` reads no annotations
             - e.g. `["connenctors"]` reads only "connectors"

            Non-existing annotations are silently ignored!

TYPE: bool | str | list of str DEFAULT: True

strict
            If True, will read only the attributes/columns which
            are absolutely required to construct the respective
            neuron representation. This is useful if you either want
            to keep memory usage low or if any additional attributes
            are causing troubles. If False (default), will read
            every attribute and dataframe column and attach it to
            the neuron.

TYPE: bool DEFAULT: False

reader
            Which reader to use to parse the given format. By
            default ("auto") will try to pick the correct parser
            for you depending on the `format_spec` attribute in
            the HDF5 file. Alternatively, you can also provided either
            a format version (e.g. "v1") or a subclass of BaseH5Reader
            that is capable of reading neurons from the file.

TYPE: "auto" | str | subclass of BaseH5Reader DEFAULT: 'auto'

RETURNS DESCRIPTION
neurons

TYPE: navis.NeuronList

errors

If ret_errors=True return dictionary with errors: {id: "error"}.

TYPE: dict

Examples:

See navis.write_h5 for examples.

See Also

navis.write_h5 Write neurons to HDF5 file. navis.io.inspect_h5 Extract meta data (format, number of neurons, available annotations and representations) from HDF5 file. This is useful if you don't know what's actually contained within the HDF5 file.

Source code in navis/io/hdf_io.py
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
def read_h5(filepath: str,
            read='mesh->skeleton->dotprops',
            subset=None,
            prefer_raw=False,
            annotations=True,
            strict=False,
            reader='auto',
            on_error='stop',
            ret_errors=False,
            parallel='auto') -> 'core.NeuronObject':
    """Read Neuron/List from Hdf5 file.

    This import is following the schema specified
    [here](https://github.com/flyconnectome/hnf)

    Parameters
    ----------
    filepath :          filepath
                        Path to HDF5 file.
    read :              str
                        The HDF5 file might contain skeleton, dotprops and/or
                        mesh representations for any given neuron. This
                        parameter determines which one are returned. Some
                        illustrative examples:

                          - 'mesh', 'skeleton' or 'dotprops' will return only
                            the given representation
                          - 'mesh->skeleton->dotprops' will return a mesh if the
                            neuron has one, a skeleton if it does not and
                            dotprops if it has neither mesh nor skeleton
                          - 'mesh,skeleton,dotprops' will return all available
                            representations
                          - 'mesh,dotprops' will only return meshes and dotprops
                          - 'mesh,skeleton->dotprops' will return the mesh
                            and a skeleton or alternatively the dotprops

                        Note that neurons which have none of the requested
                        representations are silently skipped!
    subset :            list of IDs | slice
                        If provided, will read only a subset of neurons from the
                        file. IDs that don't exist are silently ignored. Also
                        note that due to HDF5 restrictions numeric IDs will be
                        converted to strings.
    prefer_raw :        bool
                        If True and a neuron is saved as both serialized and
                        raw data, will load the neuron from the raw data.
    parallel :          "auto" | bool | int
                        Defaults to `auto` which means only use parallel
                        processing if more than 200 neurons are imported.
                        Spawning and joining processes causes overhead and is
                        considerably slower for imports of small numbers of
                        neurons. Integer will be interpreted as the
                        number of cores (otherwise defaults to
                        `os.cpu_count() - 2`).
    on_error :          "stop" | "warn" | "ignore"
                        What to do if a neuron can not be parsed: "stop" and
                        raise an exception, "warn" and keep going or silently
                        "ignore" and skip.
    ret_errors :        bool
                        If True, will also return a list of errors encountered
                        while parsing the neurons.

    Only relevant for raw data:

    annotations :       bool | str | list of str
                        Whether to load annotations associated with the
                        neuron(s):

                         - `True` reads all annotations
                         - `False` reads no annotations
                         - e.g. `["connenctors"]` reads only "connectors"

                        Non-existing annotations are silently ignored!
    strict :            bool
                        If True, will read only the attributes/columns which
                        are absolutely required to construct the respective
                        neuron representation. This is useful if you either want
                        to keep memory usage low or if any additional attributes
                        are causing troubles. If False (default), will read
                        every attribute and dataframe column and attach it to
                        the neuron.
    reader :            "auto" | str | subclass of BaseH5Reader
                        Which reader to use to parse the given format. By
                        default ("auto") will try to pick the correct parser
                        for you depending on the `format_spec` attribute in
                        the HDF5 file. Alternatively, you can also provided either
                        a format version (e.g. "v1") or a subclass of BaseH5Reader
                        that is capable of reading neurons from the file.

    Returns
    -------
    neurons :           navis.NeuronList

    errors :            dict
                        If `ret_errors=True` return dictionary with errors:
                        `{id: "error"}`.

    Examples
    --------
    See [`navis.write_h5`][] for examples.


    See Also
    --------
    [`navis.write_h5`][]
                        Write neurons to HDF5 file.
    [`navis.io.inspect_h5`][]
                        Extract meta data (format, number of neurons,
                        available annotations and representations) from
                        HDF5 file. This is useful if you don't know what's
                        actually contained within the HDF5 file.

    """
    utils.eval_param(read, name='read', allowed_types=(str, ))
    utils.eval_param(on_error, name='on_error',
                     allowed_values=('stop', 'warn', 'ignore'))

    # Make sure the read string is "correct"
    for rep in read.split(','):
        rep = rep.strip()
        for prio in rep.split('->'):
            prio = prio.strip()
            if prio not in ('mesh', 'skeleton', 'dotprops'):
                raise ValueError(f'Unexpected representation in `read` parameter: {prio}')

    # Get info for this file
    filepath = os.path.expanduser(filepath)
    info = inspect_h5(filepath, inspect_neurons=True, inspect_annotations=False)

    # Get a reader for these specs
    if reader == 'auto':
        if info['format_spec'] is None:
            config.logger.warning(
                'No format specifier found in file, suggesting this file may not have '
                'been created using NAVis. We will try to read using the latest '
                'version of the schema. If this fails you may have to specify a reader '
                'or version manually (see the `reader` parameter).')
            reader = READERS['latest']
        elif info['format_spec'] not in READERS:
            raise TypeError(f'No reader for HDF5 format {info["format_spec"]}')
        reader = READERS[info['format_spec']]
    elif isinstance(reader, str):
        if reader not in READERS:
            raise TypeError(f'No reader for HDF5 format "{reader}"')
        reader = READERS[reader]
    elif not isinstance(reader, BaseH5Reader):
        raise TypeError('If provided, the reader must be a subclass of '
                        f'BaseH5Reader - got "{type(reader)}"')

    # By default only use parallel if there are more than 200 neurons
    if parallel == 'auto':
        if len(info['neurons']) > 200:
            parallel = True
        else:
            parallel = False

    if not parallel:
        # This opens the file
        with reader(filepath) as r:
            nl, errors = r.read_neurons(subset=subset,
                                        read=read,
                                        strict=strict,
                                        on_error=on_error,
                                        annotations=annotations)
    else:
        # Do not swap this as `isinstance(True, int)` returns `True`
        if isinstance(parallel, (bool, str)):
            n_cores = os.cpu_count() - 2
        else:
            n_cores = int(parallel)

        # If subset not specified, fetch all neurons
        if isinstance(subset, type(None)):
            subset = list(info['neurons'])
        elif isinstance(subset, slice):
            subset = list(info['neurons'])[subset]
        else:
            # Make sure it's an iterable and strings
            subset = utils.make_iterable(subset).astype(str)

        # Just to leave note that I tried splitting the array into
        # `n_cores` chunks but that caused massive memory usage in the
        # spawned processes without being any faster - reading and returning
        # one neuron at a time seems to be the most efficient way
        reader = READERS[info['format_spec']]
        with mp.Pool(processes=n_cores) as pool:
            futures = pool.imap(_h5_reader_worker, [dict(reader=reader,
                                                         filepath=filepath,
                                                         read=read,
                                                         strict=strict,
                                                         prefer_raw=prefer_raw,
                                                         on_error=on_error,
                                                         annotations=annotations,
                                                         subset=[x]) for x in subset],
                                chunksize=1)

            # Wait for results and show progress bar althewhile
            # Do not close the pool before doing this
            res = list(config.tqdm(futures,
                                   desc='Reading',
                                   total=len(subset)))

        # Unpack results
        nl = []
        errors = {}
        for n, e in res:
            nl += n
            errors.update(e)

        # Warnings will not have propagated
        if on_error == 'warn':
            for e in errors:
                warnings.warn(f"Error reading neuron {e}: {errors[e]}")

    if ret_errors:
        return core.NeuronList(nl), errors
    else:
        return core.NeuronList(nl)

Load neuron from JSON (file or string).

PARAMETER DESCRIPTION
s
    Either filepath or JSON-formatted string.

TYPE: str

**kwargs
    Parameters passed to `json.loads()` and
    `pandas.DataFrame.read_json()`.

DEFAULT: {}

RETURNS DESCRIPTION
[`navis.NeuronList`][]
See Also

[navis.neuron2json][] Turn neuron into json.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> js = navis.write_json(n, filepath=None)
>>> n2 = navis.read_json(js)
Source code in navis/io/json_io.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
def read_json(s: str, **kwargs) -> 'core.NeuronList':
    """Load neuron from JSON (file or string).

    Parameters
    ----------
    s :         str
                Either filepath or JSON-formatted string.
    **kwargs
                Parameters passed to `json.loads()` and
                `pandas.DataFrame.read_json()`.

    Returns
    -------
    [`navis.NeuronList`][]

    See Also
    --------
    [`navis.neuron2json`][]
                Turn neuron into json.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> js = navis.write_json(n, filepath=None)
    >>> n2 = navis.read_json(js)

    """
    if not isinstance(s, (str, Path)):
        raise TypeError(f'Expected str, got "{type(s)}"')

    # Try except is necessary because Path() throws a hissy fit if it is given
    # a long ass json string as filepath
    try:
        is_file = Path(s).is_file()
    except OSError:
        is_file = False
    except BaseException:
        raise

    if is_file:
        with open(Path(s), 'r') as f:
            data = json.load(f, **kwargs)
    else:
        data = json.loads(s, **kwargs)

    nl = core.NeuronList([])

    for n in data:
        cn = core.TreeNeuron(None)

        if '_nodes' in n:
            try:
                cn._nodes = pd.read_json(io.StringIO(n['_nodes']))
            except ValueError:
                cn._nodes = None

        if '_connectors' in n:
            try:
                cn._connectors = pd.read_json(io.StringIO(n['_connectors']))
            except ValueError:
                cn._connectors = None

        for key in n:
            if key in ['_nodes', '_connectors']:
                continue
            setattr(cn, key, n[key])

        nl += cn

    return nl

Load mesh file into Neuron/List.

This is a thin wrapper around trimesh.load_mesh which supports most commonly used formats (obj, ply, stl, etc.).

PARAMETER DESCRIPTION
f
            Filename(s) or folder. If folder should include file
            extension (e.g. `my/dir/*.ply`) otherwise all
            mesh files in the folder will be read.

TYPE: str | iterable

include_subdirs
            If True and `f` is a folder, will also search
            subdirectories for meshes.

TYPE: bool DEFAULT: False

parallel
            Defaults to `auto` which means only use parallel
            processing if more than 100 mesh files are imported.
            Spawning and joining processes causes overhead and is
            considerably slower for imports of small numbers of
            neurons. Integer will be interpreted as the number of
            cores (otherwise defaults to `os.cpu_count() - 2`).

TYPE: "auto" | bool | int, DEFAULT: 'auto'

output
            Determines function's output - see `Returns`.

TYPE: "neuron" | "volume" | "trimesh" DEFAULT: 'neuron'

errors
            If "log" or "ignore", errors will not be raised and the
            mesh will be skipped. Can result in empty output.

TYPE: "raise" | "log" | "ignore" DEFAULT: 'raise'

limit
            When reading from a folder or archive you can use this parameter to
            restrict the which files read:
             - if an integer, will read only the first `limit` mesh files
               (useful to get a sample from a large library of meshes)
             - if a string, will interpret it as filename (regex) pattern
               and only read files that match the pattern; e.g. `limit='.*_R.*'`
               will only read files that contain `_R` in their filename
             - if a slice (e.g. `slice(10, 20)`) will read only the files in
               that range
             - a list is expected to be a list of filenames to read from
               the folder/archive

TYPE: int | str | slice | list DEFAULT: None

**kwargs
            Keyword arguments passed to [`navis.MeshNeuron`][]
            or [`navis.Volume`][]. You can use this to e.g.
            set the units on the neurons.

DEFAULT: {}

RETURNS DESCRIPTION
MeshNeuron

If output="neuron" (default).

Volume

If output="volume".

Trimesh

If output="trimesh".

NeuronList

If output="neuron" and import has multiple meshes will return NeuronList of MeshNeurons.

list

If output!="neuron" and import has multiple meshes will return list of Volumes or Trimesh.

See Also

navis.read_precomputed Read meshes and skeletons from Neuroglancer's precomputed format.

Examples:

Read a single file into navis.MeshNeuron:

>>> m = navis.read_mesh('mesh.obj')

Read all e.g. .obj files in a directory:

>>> nl = navis.read_mesh('/some/directory/*.obj')

Sample first 50 files in folder:

>>> nl = navis.read_mesh('/some/directory/*.obj', limit=50)

Read single file into navis.Volume:

>>> nl = navis.read_mesh('mesh.obj', output='volume')
Source code in navis/io/mesh_io.py
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
def read_mesh(
    f: Union[str, Iterable],
    include_subdirs: bool = False,
    parallel: Union[bool, int] = "auto",
    output: Union[Literal["neuron"], Literal["volume"], Literal["trimesh"]] = "neuron",
    errors: Literal["raise", "log", "ignore"] = "raise",
    limit: Optional[int] = None,
    fmt: str = "{name}.",
    **kwargs,
) -> "core.NeuronObject":
    """Load mesh file into Neuron/List.

    This is a thin wrapper around `trimesh.load_mesh` which supports most
    commonly used formats (obj, ply, stl, etc.).

    Parameters
    ----------
    f :                 str | iterable
                        Filename(s) or folder. If folder should include file
                        extension (e.g. `my/dir/*.ply`) otherwise all
                        mesh files in the folder will be read.
    include_subdirs :   bool, optional
                        If True and `f` is a folder, will also search
                        subdirectories for meshes.
    parallel :          "auto" | bool | int,
                        Defaults to `auto` which means only use parallel
                        processing if more than 100 mesh files are imported.
                        Spawning and joining processes causes overhead and is
                        considerably slower for imports of small numbers of
                        neurons. Integer will be interpreted as the number of
                        cores (otherwise defaults to `os.cpu_count() - 2`).
    output :            "neuron" | "volume" | "trimesh"
                        Determines function's output - see `Returns`.
    errors :            "raise" | "log" | "ignore"
                        If "log" or "ignore", errors will not be raised and the
                        mesh will be skipped. Can result in empty output.
    limit :             int | str | slice | list, optional
                        When reading from a folder or archive you can use this parameter to
                        restrict the which files read:
                         - if an integer, will read only the first `limit` mesh files
                           (useful to get a sample from a large library of meshes)
                         - if a string, will interpret it as filename (regex) pattern
                           and only read files that match the pattern; e.g. `limit='.*_R.*'`
                           will only read files that contain `_R` in their filename
                         - if a slice (e.g. `slice(10, 20)`) will read only the files in
                           that range
                         - a list is expected to be a list of filenames to read from
                           the folder/archive
    **kwargs
                        Keyword arguments passed to [`navis.MeshNeuron`][]
                        or [`navis.Volume`][]. You can use this to e.g.
                        set the units on the neurons.

    Returns
    -------
    MeshNeuron
                        If `output="neuron"` (default).
    Volume
                        If `output="volume"`.
    Trimesh
                        If `output="trimesh"`.
    NeuronList
                        If `output="neuron"` and import has multiple meshes
                        will return NeuronList of MeshNeurons.
    list
                        If `output!="neuron"` and import has multiple meshes
                        will return list of Volumes or Trimesh.

    See Also
    --------
    [`navis.read_precomputed`][]
                        Read meshes and skeletons from Neuroglancer's precomputed format.

    Examples
    --------

    Read a single file into [`navis.MeshNeuron`][]:

    >>> m = navis.read_mesh('mesh.obj')                         # doctest: +SKIP

    Read all e.g. .obj files in a directory:

    >>> nl = navis.read_mesh('/some/directory/*.obj')           # doctest: +SKIP

    Sample first 50 files in folder:

    >>> nl = navis.read_mesh('/some/directory/*.obj', limit=50) # doctest: +SKIP

    Read single file into [`navis.Volume`][]:

    >>> nl = navis.read_mesh('mesh.obj', output='volume')       # doctest: +SKIP

    """
    utils.eval_param(
        output, name="output", allowed_values=("neuron", "volume", "trimesh")
    )

    reader = MeshReader(fmt=fmt, output=output, errors=errors, attrs=kwargs)
    return reader.read_any(f, include_subdirs, parallel, limit=limit)

Read xml-based NML files into Neuron/Lists.

PARAMETER DESCRIPTION
f
            Filename or folder. If folder, will import all `.nml`
            files.

TYPE: str

include_subdirs
            If True and `f` is a folder, will also search
            subdirectories for `.nml` files.

TYPE: bool DEFAULT: False

parallel
            Defaults to `auto` which means only use parallel
            processing if more than 200 files are imported. Spawning
            and joining processes causes overhead and is
            considerably slower for imports of small numbers of
            neurons. Integer will be interpreted as the
            number of cores (otherwise defaults to
            `os.cpu_count() // 2`).

TYPE: "auto" | bool | int DEFAULT: 'auto'

precision
            Precision for data. Defaults to 32 bit integers/floats.
            If `None` will let pandas infer data types - this
            typically leads to higher than necessary precision.

TYPE: int [8, 16, 32, 64] | None DEFAULT: 32

limit
            When reading from a folder or archive you can use this
            parameter to restrict the which files read:
             - if an integer, will read only the first `limit` NML files
               (useful to get a sample from a large library of skeletons)
             - if a string, will interpret it as filename (regex) pattern
               and only read files that match the pattern; e.g. `limit='.*_R.*'`
               will only read files that contain `_R` in their filename
             - if a slice (e.g. `slice(10, 20)`) will read only the files in
               that range
             - a list is expected to be a list of filenames to read from
               the folder/archive

TYPE: int | str | slice | list DEFAULT: None

**kwargs
            Keyword arguments passed to the construction of
            `navis.TreeNeuron`. You can use this to e.g. set
            meta data.

DEFAULT: {}

RETURNS DESCRIPTION
navis.NeuronList
See Also

navis.read_nmx Read NMX files (collections of NML files).

Source code in navis/io/nmx_io.py
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
def read_nml(
    f: Union[str, pd.DataFrame, Iterable],
    include_subdirs: bool = False,
    parallel: Union[bool, int] = "auto",
    precision: int = 32,
    limit: Optional[int] = None,
    **kwargs,
) -> "core.NeuronObject":
    """Read xml-based NML files into Neuron/Lists.

    Parameters
    ----------
    f :                 str
                        Filename or folder. If folder, will import all `.nml`
                        files.
    include_subdirs :   bool, optional
                        If True and `f` is a folder, will also search
                        subdirectories for `.nml` files.
    parallel :          "auto" | bool | int
                        Defaults to `auto` which means only use parallel
                        processing if more than 200 files are imported. Spawning
                        and joining processes causes overhead and is
                        considerably slower for imports of small numbers of
                        neurons. Integer will be interpreted as the
                        number of cores (otherwise defaults to
                        `os.cpu_count() // 2`).
    precision :         int [8, 16, 32, 64] | None
                        Precision for data. Defaults to 32 bit integers/floats.
                        If `None` will let pandas infer data types - this
                        typically leads to higher than necessary precision.
    limit :             int | str | slice | list, optional
                        When reading from a folder or archive you can use this
                        parameter to restrict the which files read:
                         - if an integer, will read only the first `limit` NML files
                           (useful to get a sample from a large library of skeletons)
                         - if a string, will interpret it as filename (regex) pattern
                           and only read files that match the pattern; e.g. `limit='.*_R.*'`
                           will only read files that contain `_R` in their filename
                         - if a slice (e.g. `slice(10, 20)`) will read only the files in
                           that range
                         - a list is expected to be a list of filenames to read from
                           the folder/archive
    **kwargs
                        Keyword arguments passed to the construction of
                        `navis.TreeNeuron`. You can use this to e.g. set
                        meta data.

    Returns
    -------
    navis.NeuronList

    See Also
    --------
    [`navis.read_nmx`][]
                        Read NMX files (collections of NML files).

    """
    reader = NMLReader(precision=precision, attrs=kwargs)
    # Read neurons
    neurons = reader.read_any(
        f, parallel=parallel, limit=limit, include_subdirs=include_subdirs
    )

    return neurons

Read NMX files into Neuron/Lists.

NMX is an xml-based format used by pyKNOSSOS. See e.g. here for a data dump of neurons from Wanner et al. (2016).

PARAMETER DESCRIPTION
f
            Filename or folder. If folder, will import all `.nmx`
            files.

TYPE: str

include_subdirs
            If True and `f` is a folder, will also search
            subdirectories for `.nmx` files.

TYPE: bool DEFAULT: False

parallel
            Defaults to `auto` which means only use parallel
            processing if more than 200 files are imported. Spawning
            and joining processes causes overhead and is
            considerably slower for imports of small numbers of
            neurons. Integer will be interpreted as the
            number of cores (otherwise defaults to
            `os.cpu_count() // 2`).

TYPE: "auto" | bool | int DEFAULT: 'auto'

precision
            Precision for data. Defaults to 32 bit integers/floats.
            If `None` will let pandas infer data types - this
            typically leads to higher than necessary precision.

TYPE: int [8, 16, 32, 64] | None DEFAULT: 32

limit
            When reading from a folder or archive you can use this parameter to
            restrict the which files read:
             - if an integer, will read only the first `limit` NMX files
               (useful to get a sample from a large library of skeletons)
             - if a string, will interpret it as filename (regex) pattern
               and only read files that match the pattern; e.g. `limit='.*_R.*'`
               will only read files that contain `_R` in their filename
             - if a slice (e.g. `slice(10, 20)`) will read only the files in
               that range
             - a list is expected to be a list of filenames to read from
               the folder/archive

TYPE: int | str | slice | list DEFAULT: None

errors
            If "log" or "ignore", errors will not be raised and the
            mesh will be skipped. Can result in empty output.

TYPE: "raise" | "log" | "ignore" DEFAULT: 'raise'

**kwargs
            Keyword arguments passed to the construction of
            `navis.TreeNeuron`. You can use this to e.g. set
            meta data.

DEFAULT: {}

RETURNS DESCRIPTION
navis.NeuronList
See Also

navis.read_nml Read NML file(s).

Source code in navis/io/nmx_io.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
def read_nmx(
    f: Union[str, pd.DataFrame, Iterable],
    include_subdirs: bool = False,
    parallel: Union[bool, int] = "auto",
    precision: int = 32,
    limit: Optional[int] = None,
    errors: str = "raise",
    **kwargs,
) -> "core.NeuronObject":
    """Read NMX files into Neuron/Lists.

    NMX is an xml-based format used by pyKNOSSOS.
    See e.g. [here](https://doi.org/10.5281/zenodo.58985) for a data dump
    of neurons from Wanner et al. (2016).

    Parameters
    ----------
    f :                 str
                        Filename or folder. If folder, will import all `.nmx`
                        files.
    include_subdirs :   bool, optional
                        If True and `f` is a folder, will also search
                        subdirectories for `.nmx` files.
    parallel :          "auto" | bool | int
                        Defaults to `auto` which means only use parallel
                        processing if more than 200 files are imported. Spawning
                        and joining processes causes overhead and is
                        considerably slower for imports of small numbers of
                        neurons. Integer will be interpreted as the
                        number of cores (otherwise defaults to
                        `os.cpu_count() // 2`).
    precision :         int [8, 16, 32, 64] | None
                        Precision for data. Defaults to 32 bit integers/floats.
                        If `None` will let pandas infer data types - this
                        typically leads to higher than necessary precision.
    limit :             int | str | slice | list, optional
                        When reading from a folder or archive you can use this parameter to
                        restrict the which files read:
                         - if an integer, will read only the first `limit` NMX files
                           (useful to get a sample from a large library of skeletons)
                         - if a string, will interpret it as filename (regex) pattern
                           and only read files that match the pattern; e.g. `limit='.*_R.*'`
                           will only read files that contain `_R` in their filename
                         - if a slice (e.g. `slice(10, 20)`) will read only the files in
                           that range
                         - a list is expected to be a list of filenames to read from
                           the folder/archive
    errors :            "raise" | "log" | "ignore"
                        If "log" or "ignore", errors will not be raised and the
                        mesh will be skipped. Can result in empty output.
    **kwargs
                        Keyword arguments passed to the construction of
                        `navis.TreeNeuron`. You can use this to e.g. set
                        meta data.

    Returns
    -------
    navis.NeuronList

    See Also
    --------
    [`navis.read_nml`][]
                        Read NML file(s).

    """
    reader = NMXReader(precision=precision, errors=errors, attrs=kwargs)
    # Read neurons
    neurons = reader.read_any(
        f, parallel=parallel, limit=limit, include_subdirs=include_subdirs
    )

    # Failed reads will produce empty neurons which we need to remove
    if isinstance(neurons, core.NeuronList):
        neurons = neurons[neurons.has_nodes]

    return neurons

Create Neuron/List from NRRD file.

See here for specs of NRRD file format including description of the headers.

PARAMETER DESCRIPTION
f
            Filename, folder or URL:
             - if folder, will import all `.nrrd` files
             - if a `.zip`, `.tar` or `.tar.gz` archive will read all
               NRRD files from the file
             - if a URL (http:// or https://), will download the
               file and import it
             - FTP address (ftp://) can point to a folder or a single
               file
            See also `limit` parameter to read only a subset of files.

TYPE: str | list thereof

output
            Determines function's output. See Returns for details.

TYPE: "voxels" | "dotprops" | "raw" DEFAULT: 'voxels'

threshold
            For `output='dotprops'` only: a threshold to filter
            low intensity voxels.
              - if `None`, all values > 0 are converted to points
              - if >=1, all values >= threshold are converted to points
              - if <1, all values >= threshold * max(data) are converted

TYPE: int | float | None DEFAULT: None

thin
            For `output='dotprops'` only: if True, will thin the
            point cloud using `skimage.morphology.skeletonize`
            after thresholding. Requires `scikit-image`.

TYPE: bool DEFAULT: False

include_subdirs
            If True and `f` is a folder, will also search
            subdirectories for `.nrrd` files.

TYPE: bool DEFAULT: False

parallel
            Defaults to `auto` which means only use parallel
            processing if more than 10 NRRD files are imported.
            Spawning and joining processes causes overhead and is
            considerably slower for imports of small numbers of
            neurons. Integer will be interpreted as the number of
            cores (otherwise defaults to `os.cpu_count() - 2`).

TYPE: "auto" | bool | int, DEFAULT: 'auto'

fmt
            Formatter to specify how filenames are parsed into neuron
            attributes. Some illustrative examples:
              - `{name}` (default) uses the filename
                (minus the suffix) as the neuron's name property
              - `{id}` (default) uses the filename as the neuron's ID
                property
              - `{name,id}` uses the filename as the neuron's
                name and ID properties
              - `{name}.{id}` splits the filename at a "."
                and uses the first part as name and the second as ID
              - `{name,id:int}` same as above but converts
                into integer for the ID
              - `{name}_{myproperty}` splits the filename at
                "_" and uses the first part as name and as a
                generic "myproperty" property
              - `{name}_{}_{id}` splits the filename at
                "_" and uses the first part as name and the last as
                ID. The middle part is ignored.

            Throws a ValueError if pattern can't be found in
            filename.

TYPE: str DEFAULT: '{name}.nrrd'

limit
            When reading from a folder or archive you can use this parameter to
            restrict the which files read:
             - if an integer, will read only the first `limit` NMX files
               (useful to get a sample from a large library of skeletons)
             - if a string, will interpret it as filename (regex) pattern
               and only read files that match the pattern; e.g. `limit='.*_R.*'`
               will only read files that contain `_R` in their filename
             - if a slice (e.g. `slice(10, 20)`) will read only the files in
               that range
             - a list is expected to be a list of filenames to read from
               the folder/archive

TYPE: int | str | slice | list DEFAULT: None

errors
            If "log" or "ignore", errors will not be raised and the
            mesh will be skipped. Can result in empty output.

TYPE: "raise" | "log" | "ignore" DEFAULT: 'raise'

**dotprops_kwargs
            Keyword arguments passed to [`navis.make_dotprops`][]
            if `output='dotprops'`. Use this to adjust e.g. the
            number of nearest neighbors used for calculating the
            tangent vector by passing e.g. `k=5`.

DEFAULT: {}

RETURNS DESCRIPTION
navis.VoxelNeuron

If output="voxels" (default): requires NRRD data to be 3-dimensional voxels. VoxelNeuron will have NRRD file header as .nrrd_header attribute.

navis.Dotprops

If output="dotprops": requires NRRD data to be either: - (N, M, K) (i.e. 3D) in which case we will turn voxels into a point cloud (see also threshold parameter) - (N, 3) = x/y/z points - (N, 6) = x/y/z points + x/y/z vectors - (N, 7) = x/y/z points + x/y/z vectors + alpha

Dotprops will contain NRRD header as .nrrd_header attribute.

navis.NeuronList

If import of multiple NRRD will return NeuronList of Dotprops/VoxelNeurons.

(image, header)(np.ndarray, OrderedDict)

If output='raw' return raw data contained in NRRD file.

Source code in navis/io/nrrd_io.py
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
def read_nrrd(
    f: Union[str, Iterable],
    output: Union[Literal["voxels"], Literal["dotprops"], Literal["raw"]] = "voxels",
    threshold: Optional[Union[int, float]] = None,
    thin: bool = False,
    include_subdirs: bool = False,
    parallel: Union[bool, int] = "auto",
    fmt: str = "{name}.nrrd",
    limit: Optional[int] = None,
    errors: str = "raise",
    **dotprops_kwargs,
) -> "core.NeuronObject":
    """Create Neuron/List from NRRD file.

    See [here](http://teem.sourceforge.net/nrrd/format.html) for specs of
    NRRD file format including description of the headers.

    Parameters
    ----------
    f :                 str | list thereof
                        Filename, folder or URL:
                         - if folder, will import all `.nrrd` files
                         - if a `.zip`, `.tar` or `.tar.gz` archive will read all
                           NRRD files from the file
                         - if a URL (http:// or https://), will download the
                           file and import it
                         - FTP address (ftp://) can point to a folder or a single
                           file
                        See also `limit` parameter to read only a subset of files.
    output :            "voxels" | "dotprops" | "raw"
                        Determines function's output. See Returns for details.
    threshold :         int | float | None
                        For `output='dotprops'` only: a threshold to filter
                        low intensity voxels.
                          - if `None`, all values > 0 are converted to points
                          - if >=1, all values >= threshold are converted to points
                          - if <1, all values >= threshold * max(data) are converted
    thin :              bool
                        For `output='dotprops'` only: if True, will thin the
                        point cloud using `skimage.morphology.skeletonize`
                        after thresholding. Requires `scikit-image`.
    include_subdirs :   bool, optional
                        If True and `f` is a folder, will also search
                        subdirectories for `.nrrd` files.
    parallel :          "auto" | bool | int,
                        Defaults to `auto` which means only use parallel
                        processing if more than 10 NRRD files are imported.
                        Spawning and joining processes causes overhead and is
                        considerably slower for imports of small numbers of
                        neurons. Integer will be interpreted as the number of
                        cores (otherwise defaults to `os.cpu_count() - 2`).
    fmt :               str
                        Formatter to specify how filenames are parsed into neuron
                        attributes. Some illustrative examples:
                          - `{name}` (default) uses the filename
                            (minus the suffix) as the neuron's name property
                          - `{id}` (default) uses the filename as the neuron's ID
                            property
                          - `{name,id}` uses the filename as the neuron's
                            name and ID properties
                          - `{name}.{id}` splits the filename at a "."
                            and uses the first part as name and the second as ID
                          - `{name,id:int}` same as above but converts
                            into integer for the ID
                          - `{name}_{myproperty}` splits the filename at
                            "_" and uses the first part as name and as a
                            generic "myproperty" property
                          - `{name}_{}_{id}` splits the filename at
                            "_" and uses the first part as name and the last as
                            ID. The middle part is ignored.

                        Throws a ValueError if pattern can't be found in
                        filename.
    limit :             int | str | slice | list, optional
                        When reading from a folder or archive you can use this parameter to
                        restrict the which files read:
                         - if an integer, will read only the first `limit` NMX files
                           (useful to get a sample from a large library of skeletons)
                         - if a string, will interpret it as filename (regex) pattern
                           and only read files that match the pattern; e.g. `limit='.*_R.*'`
                           will only read files that contain `_R` in their filename
                         - if a slice (e.g. `slice(10, 20)`) will read only the files in
                           that range
                         - a list is expected to be a list of filenames to read from
                           the folder/archive
    errors :            "raise" | "log" | "ignore"
                        If "log" or "ignore", errors will not be raised and the
                        mesh will be skipped. Can result in empty output.
    **dotprops_kwargs
                        Keyword arguments passed to [`navis.make_dotprops`][]
                        if `output='dotprops'`. Use this to adjust e.g. the
                        number of nearest neighbors used for calculating the
                        tangent vector by passing e.g. `k=5`.

    Returns
    -------
    navis.VoxelNeuron
                        If `output="voxels"` (default): requires NRRD data to
                        be 3-dimensional voxels. VoxelNeuron will have NRRD file
                        header as `.nrrd_header` attribute.
    navis.Dotprops
                        If `output="dotprops"`: requires NRRD data to be
                        either:
                          - `(N, M, K)` (i.e. 3D) in which case we will turn
                            voxels into a point cloud (see also `threshold`
                            parameter)
                          - `(N, 3)` = x/y/z points
                          - `(N, 6)` = x/y/z points + x/y/z vectors
                          - `(N, 7)` = x/y/z points + x/y/z vectors + alpha

                        Dotprops will contain NRRD header as `.nrrd_header`
                        attribute.
    navis.NeuronList
                        If import of multiple NRRD will return NeuronList of
                        Dotprops/VoxelNeurons.
    (image, header)     (np.ndarray, OrderedDict)
                        If `output='raw'` return raw data contained in NRRD
                        file.

    """
    if thin:
        try:
            from skimage.morphology import skeletonize
        except ModuleNotFoundError:
            raise ModuleNotFoundError(
                "The 'thin' option requires 'scikit-image' to be installed:\n"
                "    pip install scikit-image -U"
            )

    utils.eval_param(
        output, name="output", allowed_values=("raw", "dotprops", "voxels")
    )

    if parallel == "auto":
        # Set a lower threshold of 10 on parallel processing for NRRDs (default is 200)
        parallel = ("auto", 10)

    reader = NrrdReader(
        output=output, threshold=threshold, thin=thin, fmt=fmt, errors=errors, dotprop_kwargs=dotprops_kwargs
    )
    return reader.read_any(f, include_subdirs, parallel, limit=limit)

Read parquet file into Neuron/List.

See here for format specifications.

PARAMETER DESCRIPTION
f
            File to be read.

TYPE: str

read_meta
            Whether to read neuron meta data stored in the parquet
            file (e.g. name or units). Defaults to True but can be
            switched off in case there are any issues.

TYPE: bool DEFAULT: True

limit
            If reading from a file containing multiple neurons you
            can use this parameter to read only the first `limit`
            neurons. Useful if wanting to get a sample from a large
            library of neurons.

TYPE: int DEFAULT: None

subset
            If the parquet file contains multiple neurons you can
            use this to select the IDs of the neurons to load. Only
            works if the parquet file actually contains multiple
            neurons.

TYPE: str | int | list thereof DEFAULT: None

RETURNS DESCRIPTION
navis.TreeNeuron / Dotprops

If parquet file contains a single neuron.

navis.NeuronList

If parquet file contains multiple neurons.

See Also

navis.write_parquet Export neurons as parquet files. navis.scan_parquet Scan parquet file for its contents.

Examples:

See navis.write_parquet for examples.

Source code in navis/io/pq_io.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
def read_parquet(f: Union[str, Path],
                 read_meta: bool = True,
                 limit: Optional[int] = None,
                 subset: Optional[List[Union[str, int]]] = None,
                 progress=True
                 ) -> 'core.NeuronObject':
    """Read parquet file into Neuron/List.

    See [here](https://github.com/navis-org/navis/blob/master/navis/io/pq_io.md)
    for format specifications.

    Parameters
    ----------
    f :                 str
                        File to be read.
    read_meta :         bool
                        Whether to read neuron meta data stored in the parquet
                        file (e.g. name or units). Defaults to True but can be
                        switched off in case there are any issues.
    limit :             int, optional
                        If reading from a file containing multiple neurons you
                        can use this parameter to read only the first `limit`
                        neurons. Useful if wanting to get a sample from a large
                        library of neurons.
    subset :            str | int | list thereof
                        If the parquet file contains multiple neurons you can
                        use this to select the IDs of the neurons to load. Only
                        works if the parquet file actually contains multiple
                        neurons.

    Returns
    -------
    navis.TreeNeuron/Dotprops
                        If parquet file contains a single neuron.
    navis.NeuronList
                        If parquet file contains multiple neurons.

    See Also
    --------
    [`navis.write_parquet`][]
                        Export neurons as parquet files.
    [`navis.scan_parquet`][]
                        Scan parquet file for its contents.

    Examples
    --------
    See [`navis.write_parquet`][] for examples.

    """
    f = Path(f).expanduser()
    if not f.is_file():
        raise FileNotFoundError(f'File "{f}" does not exist.')

    try:
        import pyarrow.parquet as pq
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            'Reading parquet files requires the pyarrow library:\n'
            ' pip3 install pyarrow')

    if limit is not None:
        if subset not in (None, False):
            raise ValueError('You can provide either a `subset` or a `limit` but '
                             'not both.')
        scan = scan_parquet(f)
        subset = scan.id.values[:limit]

    if isinstance(subset, (pd.Series)):
        subset = subset.values

    # Read the table
    if subset is None or subset is False:
        table = pq.read_table(f)
    elif isinstance(subset, (str, int)):
        table = pq.read_table(f, filters=[("neuron", "=", subset)])
    elif isinstance(subset, (list, np.ndarray)):
        table = pq.read_table(f, filters=[("neuron", "in", subset)])
    else:
        raise TypeError(f'`subset` must be int, str or iterable, got "{type(subset)}')

    # Extract meta data (will be byte encoded)
    if read_meta:
        metadata = {k.decode(): v.decode() for k, v in table.schema.metadata.items()}
    else:
        metadata = {}

    # Extract neuron meta data once here instead of for every neuron individually
    # Meta data is encoded as {"{ID}_{PROPERTY}": VALUE}
    # Here we pre-emptively turn this into {(ID, PROPERTY): VALUE}
    # Note that we're dropping "private" properties where the key starts with "_"
    neuron_meta = {tuple(k.split(':')): v for k, v in metadata.items() if not k.startswith('_')}

    # Convert to pandas
    table = table.to_pandas()

    # Check if we're doing skeletons or dotprops
    if 'node_id' in table.columns:
        _extract_neuron = _extract_skeleton
    elif 'vect_x' in table.columns:
        _extract_neuron = _extract_dotprops
    else:
        raise TypeError('Unable to extract neuron from parquet file with '
                        f'columns {table.columns}')

    # If this is a single neuron
    if 'neuron' not in table.columns:
        if metadata:
            id = [v for k, v in metadata.items() if k[1] == 'id'][0]
        else:
            id = '0'  # <-- generic ID as fallback if we don't have metadata
        return _extract_neuron(table, id, neuron_meta)
    else:
        neurons = []
        # Note: this could be done in threads
        for i, (id, this_table) in enumerate(config.tqdm(table.groupby('neuron'),
                                             disable=not progress,
                                             leave=False,
                                             desc='Making nrn')):
            this_table = this_table.drop("neuron", axis=1)
            neurons.append(_extract_neuron(this_table, id, neuron_meta))
        return core.NeuronList(neurons)

Read skeletons and meshes from neuroglancer's precomputed format.

Follows the formats specified here.

PARAMETER DESCRIPTION
f
            Filename, folder or bytes. If folder, will import all
            files. If a `.zip`, `.tar` or `.tar.gz` file will
            read all files in the archive. See also `limit` parameter.

TYPE: filepath | folder | zip file | bytes

datatype
            Which data type we expect to read from the files. If
            "auto", we require a "info" file in the same directory
            as `f`.

TYPE: "auto" | "skeleton" | "mesh" DEFAULT: 'auto'

include_subdirs
            If True and `f` is a folder, will also search
            subdirectories for binary files.

TYPE: bool DEFAULT: False

fmt
            Formatter to specify how filenames are parsed into neuron
            attributes. Some illustrative examples:
              - `{name}` (default) uses the filename
                (minus the suffix) as the neuron's name property
              - `{id}` (default) uses the filename as the neuron's ID
                property
              - `{name,id}` uses the filename as the neuron's
                name and ID properties
              - `{name}.{id}` splits the filename at a "."
                and uses the first part as name and the second as ID
              - `{name,id:int}` same as above but converts
                into integer for the ID
              - `{name}_{myproperty}` splits the filename at
                "_" and uses the first part as name and as a
                generic "myproperty" property
              - `{name}_{}_{id}` splits the filename at
                "_" and uses the first part as name and the last as
                ID. The middle part is ignored.

            Throws a ValueError if pattern can't be found in
            filename.

TYPE: str DEFAULT: '{id}'

info
            An info file describing the data:
              - `True` = will look for `info` file in base folder
              - `False` = do not use/look for `info` file
              - `str` = filepath to `info` file
              - `dict` = already parsed info file

TYPE: bool | str | dict DEFAULT: True

limit
            When reading from a folder or archive you can use this parameter to
            restrict the which files read:
             - if an integer, will read only the first `limit` files
               (useful to get a sample from a large library of neurons)
             - if a string, will interpret it as filename (regex) pattern
               and only read files that match the pattern; e.g. `limit='.*_R.*'`
               will only read files that contain `_R` in their filename
             - if a slice (e.g. `slice(10, 20)`) will read only the files in
               that range
             - a list is expected to be a list of filenames to read from
               the folder/archive

TYPE: int | str | slice | list DEFAULT: None

parallel
            Defaults to `auto` which means only use parallel
            processing if more than 200 files are imported. Spawning
            and joining processes causes overhead and is
            considerably slower for imports of small numbers of
            neurons. Integer will be interpreted as the
            number of cores (otherwise defaults to
            `os.cpu_count() // 2`).

TYPE: "auto" | bool | int DEFAULT: 'auto'

errors
            If "log" or "ignore", errors will not be raised and the
            mesh will be skipped. Can result in empty output.

TYPE: "raise" | "log" | "ignore" DEFAULT: 'raise'

**kwargs
            Keyword arguments passed to the construction of the
            neurons. You can use this to e.g. set meta data such
            as `units`.

DEFAULT: {}

RETURNS DESCRIPTION
navis.MeshNeuron
navis.NeuronList
See Also

navis.write_precomputed Export neurons/volumes to precomputed format. navis.read_mesh Read common mesh formats (obj, stl, etc).

Source code in navis/io/precomputed_io.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
def read_precomputed(
    f: Union[str, io.BytesIO],
    datatype: Union[Literal["auto"], Literal["mesh"], Literal["skeleton"]] = "auto",
    include_subdirs: bool = False,
    fmt: str = "{id}",
    info: Union[bool, str, dict] = True,
    limit: Optional[int] = None,
    parallel: Union[bool, int] = "auto",
    errors: Literal["raise", "log", "ignore"] = "raise",
    **kwargs,
) -> "core.NeuronObject":
    """Read skeletons and meshes from neuroglancer's precomputed format.

    Follows the formats specified
    [here](https://github.com/google/neuroglancer/tree/master/src/neuroglancer/datasource/precomputed).

    Parameters
    ----------
    f :                 filepath | folder | zip file | bytes
                        Filename, folder or bytes. If folder, will import all
                        files. If a `.zip`, `.tar` or `.tar.gz` file will
                        read all files in the archive. See also `limit` parameter.
    datatype :          "auto" | "skeleton" | "mesh"
                        Which data type we expect to read from the files. If
                        "auto", we require a "info" file in the same directory
                        as `f`.
    include_subdirs :   bool, optional
                        If True and `f` is a folder, will also search
                        subdirectories for binary files.
    fmt :               str
                        Formatter to specify how filenames are parsed into neuron
                        attributes. Some illustrative examples:
                          - `{name}` (default) uses the filename
                            (minus the suffix) as the neuron's name property
                          - `{id}` (default) uses the filename as the neuron's ID
                            property
                          - `{name,id}` uses the filename as the neuron's
                            name and ID properties
                          - `{name}.{id}` splits the filename at a "."
                            and uses the first part as name and the second as ID
                          - `{name,id:int}` same as above but converts
                            into integer for the ID
                          - `{name}_{myproperty}` splits the filename at
                            "_" and uses the first part as name and as a
                            generic "myproperty" property
                          - `{name}_{}_{id}` splits the filename at
                            "_" and uses the first part as name and the last as
                            ID. The middle part is ignored.

                        Throws a ValueError if pattern can't be found in
                        filename.
    info :              bool | str | dict
                        An info file describing the data:
                          - `True` = will look for `info` file in base folder
                          - `False` = do not use/look for `info` file
                          - `str` = filepath to `info` file
                          - `dict` = already parsed info file
    limit :             int | str | slice | list, optional
                        When reading from a folder or archive you can use this parameter to
                        restrict the which files read:
                         - if an integer, will read only the first `limit` files
                           (useful to get a sample from a large library of neurons)
                         - if a string, will interpret it as filename (regex) pattern
                           and only read files that match the pattern; e.g. `limit='.*_R.*'`
                           will only read files that contain `_R` in their filename
                         - if a slice (e.g. `slice(10, 20)`) will read only the files in
                           that range
                         - a list is expected to be a list of filenames to read from
                           the folder/archive
    parallel :          "auto" | bool | int
                        Defaults to `auto` which means only use parallel
                        processing if more than 200 files are imported. Spawning
                        and joining processes causes overhead and is
                        considerably slower for imports of small numbers of
                        neurons. Integer will be interpreted as the
                        number of cores (otherwise defaults to
                        `os.cpu_count() // 2`).
    errors :            "raise" | "log" | "ignore"
                        If "log" or "ignore", errors will not be raised and the
                        mesh will be skipped. Can result in empty output.
    **kwargs
                        Keyword arguments passed to the construction of the
                        neurons. You can use this to e.g. set meta data such
                        as `units`.

    Returns
    -------
    navis.MeshNeuron
    navis.NeuronList

    See Also
    --------
    [`navis.write_precomputed`][]
                        Export neurons/volumes to precomputed format.
    [`navis.read_mesh`][]
                        Read common mesh formats (obj, stl, etc).

    """
    utils.eval_param(
        datatype, name="datatype", allowed_values=("skeleton", "mesh", "auto")
    )

    # See if we can get the info file from somewhere
    if info is True and not isinstance(f, bytes):
        # Find info in zip archive
        if str(f).endswith(".zip"):
            with ZipFile(Path(f).expanduser(), "r") as zip:
                if "info" in [f.filename for f in zip.filelist]:
                    info = json.loads(zip.read("info").decode())
                elif datatype == "auto":
                    raise ValueError(
                        "No `info` file found in zip file. Please "
                        "specify data type using the `datatype` "
                        "parameter."
                    )
        # Try loading info from URL
        elif utils.is_url(str(f)):
            base_url = "/".join(str(f).split("/")[:-1])
            info = _fetch_info_file(base_url, raise_missing=False)
        # Try loading info from parent path
        else:
            fp = Path(str(f))
            # Find first existing root
            while not fp.is_dir():
                fp = fp.parent
            fp = fp / "info"
            if fp.is_file():
                with open(fp, "r") as info_file:
                    info = json.load(info_file)

    # At this point we should have a dictionary - even if it's empty
    if not isinstance(info, dict):
        info = {}

    # Parse data type from info file (if required)
    if datatype == "auto":
        if "@type" not in info:
            raise ValueError(
                "Either no `info` file found or it does not specify "
                "a data type. Please provide data type using the "
                "`datatype` parameter."
            )

        if info.get("@type", None) == "neuroglancer_legacy_mesh":
            datatype = "mesh"
        elif info.get("@type", None) == "neuroglancer_skeletons":
            datatype = "skeleton"
        else:
            raise ValueError(
                'Data type specified in `info` file unknown: '
                f'{info.get("@type", None)}. Please provide data '
                'type using the `datatype` parameter.'
            )

    if isinstance(f, bytes):
        f = io.BytesIO(f)

    if datatype == "skeleton":
        if not isinstance(info, dict):
            info = {}
        reader = PrecomputedSkeletonReader(
            fmt=fmt, errors=errors, attrs=kwargs, info=info
        )
    else:
        reader = PrecomputedMeshReader(fmt=fmt, errors=errors, attrs=kwargs)

    return reader.read_any(f, include_subdirs, parallel, limit=limit)

Read objects from nat R data (.rda) file.

Currently supports parsing neurons, dotprops and mesh3d. Note that this is rather slow and I do not recommend doing this for large collections of neurons. For large scale conversion I recommend using the R interface (navis.interfaces.r, see online tutorials) via rpy2.

PARAMETER DESCRIPTION
f
            Filepath.

TYPE: str

combined
            What to do if there are multiple neuronlists contained
            in the RDA files. By default, we will combine them into
            a single NeuronList but you can also choose to keep them
            as separate neuronlists.

TYPE: bool

neurons_only
            Whether to only parse and return neurons and dotprops
            found in the RDA file.

TYPE: bool DEFAULT: True

**kwargs
            Keyword arguments passed to the construction of
            `Tree/MeshNeuron/Dotprops`. You can use this to e.g. set
            meta data.

DEFAULT: {}

RETURNS DESCRIPTION
navis.NeuronList

If combine=True and neurons_only=True returns a single NeuronList with the parsed neurons.

dict

If combine=False or neurons_only=False returns a dictionary with the original R object name as key and the parsed object as value.

Source code in navis/io/rda_io.py
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
def read_rda(f: str,
             combine: bool = True,
             neurons_only: bool = True,
             **kwargs) -> 'core.NeuronList':
    """Read objects from nat R data (.rda) file.

    Currently supports parsing neurons, dotprops and mesh3d. Note that this is
    rather slow and I do not recommend doing this for large collections of
    neurons. For large scale conversion I recommend using the R interface
    (`navis.interfaces.r`, see online tutorials) via `rpy2`.

    Parameters
    ----------
    f :                 str
                        Filepath.
    combined :          bool
                        What to do if there are multiple neuronlists contained
                        in the RDA files. By default, we will combine them into
                        a single NeuronList but you can also choose to keep them
                        as separate neuronlists.
    neurons_only :      bool
                        Whether to only parse and return neurons and dotprops
                        found in the RDA file.
    **kwargs
                        Keyword arguments passed to the construction of
                        `Tree/MeshNeuron/Dotprops`. You can use this to e.g. set
                        meta data.

    Returns
    -------
    navis.NeuronList
                        If `combine=True` and `neurons_only=True` returns
                        a single NeuronList with the parsed neurons.
    dict
                        If `combine=False` or `neurons_only=False` returns
                        a dictionary with the original R object name as key and
                        the parsed object as value.

    """
    # Parse the file
    parsed = rdata.parser.parse_file(f)

    # Now convert to Python objects
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        converted = rdata.conversion.convert(parsed, CLASS_MAP_EXT)

    # Some clean-up
    for k, v in converted.items():
        # Convert single neurons to neuronlist
        if isinstance(v, core.BaseNeuron):
            converted[k] = core.NeuronList(v)
        # Give volumes a name
        elif isinstance(v, core.Volume):
            converted[k].name = k

    if combine:
        nl = core.NeuronList([n for n in converted.values() if isinstance(n, core.NeuronList)])
        if nl:
            converted = {k: v for k, v in converted.items() if not isinstance(v, core.NeuronList)}
            converted['neurons'] = nl

    if neurons_only:
        if combine:
            converted = converted['neurons']
        else:
            converted = {k: v for k, v in converted.items() if isinstance(v, core.NeuronList)}

    return converted

Create Neuron/List from SWC file.

This import is following format specified here.

PARAMETER DESCRIPTION
f
            Filename, folder, SWC string, URL or DataFrame:
             - if folder, will import all `.swc` files
             - if a `.zip`, `.tar` or `.tar.gz` archive will read all
               SWC files from the file
             - if a URL (http:// or https://), will download the
               file and import it
             - FTP address (ftp://) can point to a folder or a single
               file
             - DataFrames are interpreted as a SWC tables
            See also `limit` parameter to read only a subset of files.

TYPE: str | pandas.DataFrame | list thereof

connector_labels
            If provided will extract connectors from SWC.
            Dictionary must map types to labels:
            `{'presynapse': 7, 'postsynapse': 8}`

TYPE: dict DEFAULT: {}

include_subdirs
            If True and `f` is a folder, will also search
            subdirectories for `.swc` files.

TYPE: bool DEFAULT: False

delimiter
            Delimiter to use. Passed to `pandas.read_csv`.

TYPE: str DEFAULT: ' '

parallel
            Whether to use parallel processes for reading:
             - "auto" (default): will use parallel processing if
                more than 200 SWCs are imported.
              - Integers will be interpreted as the number of
                processes to use. Defaults to `os.cpu_count() // 2`.
              - False will use a single process.
            Ignored for tar archives. Please note that spawning
            processes incurs an overhead and might not be faster
            for small numbers of files.

TYPE: "auto" | bool | int DEFAULT: 'auto'

precision
            Precision for data. Defaults to 32 bit integers/floats.
            If `None` will let pandas infer data types - this
            typically leads to higher than necessary precision.

TYPE: int [8, 16, 32, 64] | None DEFAULT: 32

fmt
            Formatter to specify how filenames are parsed into
            neuron attributes. Some illustrative examples:

              - `{name}.swc` (default) uses the filename
                (minus the suffix) as the neuron's name property
              - `{id}.swc` uses the filename as the neuron's ID
                property
              - `{name,id}.swc` uses the filename as the neuron's
                name and ID properties
              - `{name}.{id}.swc` splits the filename at a "."
                and uses the first part as name and the second as ID
              - `{name,id:int}.swc` same as above but converts
                into integer for the ID
              - `{name}_{myproperty}.swc` splits the filename at
                "_" and uses the first part as name and as a
                generic "myproperty" property
              - `{name}_{}_{id}.swc` splits the filename at
                "_" and uses the first part as name and the last as
                ID. The middle part is ignored.

            Throws a ValueError if pattern can't be found in
            filename. Ignored for DataFrames.

TYPE: str DEFAULT: '{name}.swc'

read_meta
            If True and SWC header contains a line with JSON-encoded
            meta data e.g. (`# Meta: {'id': 123}`), these data
            will be read as neuron properties. `fmt` still takes
            precedence. Will try to assign meta data directly as
            neuron attribute (e.g. `neuron.id`). Failing that
            (can happen for properties intrinsic to `TreeNeurons`),
            will add a `.meta` dictionary to the neuron.

TYPE: bool DEFAULT: True

limit
            When reading from a folder or archive you can use this parameter to
            restrict the which files read:
             - if an integer, will read only the first `limit` SWC files
              (useful to get a sample from a large library of skeletons)
             - if a string, will interpret it as filename (regex) pattern
               and only read files that match the pattern; e.g. `limit='.*_R.*'`
               will only read files that contain `_R` in their filename
             - if a slice (e.g. `slice(10, 20)`) will read only the files in
               that range
             - a list is expected to be a list of filenames to read from
               the folder/archive

TYPE: int | str | slice | list DEFAULT: None

errors
            If "log" or "ignore", errors will not be raised and the
            mesh will be skipped. Can result in empty output.

TYPE: "raise" | "log" | "ignore" DEFAULT: 'raise'

**kwargs
            Keyword arguments passed to the construction of
            `navis.TreeNeuron`. You can use this to e.g. set
            meta data.

DEFAULT: {}

RETURNS DESCRIPTION
navis.TreeNeuron

Contains SWC file header as .swc_header attribute.

navis.NeuronList

If import of multiple SWCs will return NeuronList of TreeNeurons.

See Also

navis.write_swc Export neurons as SWC files.

Examples:

Read a single file:

>>> s = navis.read_swc('skeleton.swc')

Read all .swc files in a directory:

>>> s = navis.read_swc('/some/directory/')

Read all .swc files in a zip archive:

>>> s = navis.read_swc('skeletons.zip')

Sample the first 100 SWC files in a zip archive:

>>> s = navis.read_swc('skeletons.zip', limit=100)

Read first all SWC files an ftp folder:

>>> s = navis.read_swc('ftp://server:port/path/to/swc/')
Source code in navis/io/swc_io.py
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
def read_swc(
    f: Union[str, pd.DataFrame, Iterable],
    connector_labels: Optional[Dict[str, Union[str, int]]] = {},
    soma_label: Union[str, int] = 1,
    include_subdirs: bool = False,
    delimiter: str = " ",
    parallel: Union[bool, int] = "auto",
    precision: int = 32,
    fmt: str = "{name}.swc",
    read_meta: bool = True,
    limit: Optional[int] = None,
    errors: str = "raise",
    **kwargs,
) -> "core.NeuronObject":
    """Create Neuron/List from SWC file.

    This import is following format specified
    [here](http://www.neuronland.org/NLMorphologyConverter/MorphologyFormats/SWC/Spec.html).

    Parameters
    ----------
    f :                 str | pandas.DataFrame | list thereof
                        Filename, folder, SWC string, URL or DataFrame:
                         - if folder, will import all `.swc` files
                         - if a `.zip`, `.tar` or `.tar.gz` archive will read all
                           SWC files from the file
                         - if a URL (http:// or https://), will download the
                           file and import it
                         - FTP address (ftp://) can point to a folder or a single
                           file
                         - DataFrames are interpreted as a SWC tables
                        See also `limit` parameter to read only a subset of files.
    connector_labels :  dict, optional
                        If provided will extract connectors from SWC.
                        Dictionary must map types to labels:
                        `{'presynapse': 7, 'postsynapse': 8}`
    include_subdirs :   bool, optional
                        If True and `f` is a folder, will also search
                        subdirectories for `.swc` files.
    delimiter :         str
                        Delimiter to use. Passed to `pandas.read_csv`.
    parallel :          "auto" | bool | int
                        Whether to use parallel processes for reading:
                         - "auto" (default): will use parallel processing if
                            more than 200 SWCs are imported.
                          - Integers will be interpreted as the number of
                            processes to use. Defaults to `os.cpu_count() // 2`.
                          - False will use a single process.
                        Ignored for tar archives. Please note that spawning
                        processes incurs an overhead and might not be faster
                        for small numbers of files.
    precision :         int [8, 16, 32, 64] | None
                        Precision for data. Defaults to 32 bit integers/floats.
                        If `None` will let pandas infer data types - this
                        typically leads to higher than necessary precision.
    fmt :               str
                        Formatter to specify how filenames are parsed into
                        neuron attributes. Some illustrative examples:

                          - `{name}.swc` (default) uses the filename
                            (minus the suffix) as the neuron's name property
                          - `{id}.swc` uses the filename as the neuron's ID
                            property
                          - `{name,id}.swc` uses the filename as the neuron's
                            name and ID properties
                          - `{name}.{id}.swc` splits the filename at a "."
                            and uses the first part as name and the second as ID
                          - `{name,id:int}.swc` same as above but converts
                            into integer for the ID
                          - `{name}_{myproperty}.swc` splits the filename at
                            "_" and uses the first part as name and as a
                            generic "myproperty" property
                          - `{name}_{}_{id}.swc` splits the filename at
                            "_" and uses the first part as name and the last as
                            ID. The middle part is ignored.

                        Throws a ValueError if pattern can't be found in
                        filename. Ignored for DataFrames.
    read_meta :         bool
                        If True and SWC header contains a line with JSON-encoded
                        meta data e.g. (`# Meta: {'id': 123}`), these data
                        will be read as neuron properties. `fmt` still takes
                        precedence. Will try to assign meta data directly as
                        neuron attribute (e.g. `neuron.id`). Failing that
                        (can happen for properties intrinsic to `TreeNeurons`),
                        will add a `.meta` dictionary to the neuron.
    limit :             int | str | slice | list, optional
                        When reading from a folder or archive you can use this parameter to
                        restrict the which files read:
                         - if an integer, will read only the first `limit` SWC files
                          (useful to get a sample from a large library of skeletons)
                         - if a string, will interpret it as filename (regex) pattern
                           and only read files that match the pattern; e.g. `limit='.*_R.*'`
                           will only read files that contain `_R` in their filename
                         - if a slice (e.g. `slice(10, 20)`) will read only the files in
                           that range
                         - a list is expected to be a list of filenames to read from
                           the folder/archive
    errors :            "raise" | "log" | "ignore"
                        If "log" or "ignore", errors will not be raised and the
                        mesh will be skipped. Can result in empty output.
    **kwargs
                        Keyword arguments passed to the construction of
                        `navis.TreeNeuron`. You can use this to e.g. set
                        meta data.

    Returns
    -------
    navis.TreeNeuron
                        Contains SWC file header as `.swc_header` attribute.
    navis.NeuronList
                        If import of multiple SWCs will return NeuronList of
                        TreeNeurons.

    See Also
    --------
    [`navis.write_swc`][]
                        Export neurons as SWC files.

    Examples
    --------

    Read a single file:

    >>> s = navis.read_swc('skeleton.swc')                      # doctest: +SKIP

    Read all .swc files in a directory:

    >>> s = navis.read_swc('/some/directory/')                  # doctest: +SKIP

    Read all .swc files in a zip archive:

    >>> s = navis.read_swc('skeletons.zip')                     # doctest: +SKIP

    Sample the first 100 SWC files in a zip archive:

    >>> s = navis.read_swc('skeletons.zip', limit=100)          # doctest: +SKIP

    Read first all SWC files an ftp folder:

    >>> s = navis.read_swc('ftp://server:port/path/to/swc/')    # doctest: +SKIP

    """
    # SwcReader will try its best to read whatever you throw at it - with limited
    # sanity checks. For example: if you misspell a filepath, it will assume
    # that it's a SWC string (because anything that's a string but doesn't
    # point to an existing file or a folder MUST be a SWC) which will lead to
    # strange error messages.
    # The easiest fix is to implement a small sanity check here:
    if isinstance(f, str) and "\n" not in f and not utils.is_url(f):
        # If this looks like a path
        p = Path(f).expanduser()
        if not p.is_dir() and not p.is_file():
            raise FileNotFoundError(
                f'"{f}" looks like a directory or filepath '
                "but does not appear to exist."
            )

    reader = SwcReader(
        connector_labels=connector_labels,
        soma_label=soma_label,
        delimiter=delimiter,
        precision=precision,
        read_meta=read_meta,
        fmt=fmt,
        errors=errors,
        attrs=kwargs,
    )
    res = reader.read_any(f, include_subdirs, parallel, limit=limit)

    failed = []
    for n in core.NeuronList(res):
        if not hasattr(n, "meta"):
            continue
        failed += list(n.meta.keys())

    if failed:
        failed = list(set(failed))
        logger.warning(
            "Some meta data could not be directly attached to the "
            "neuron(s) - probably some clash with intrinsic "
            "properties. You can find these data attached as "
            "`.meta` dictionary."
        )

    return res

Create Neuron/List from TIFF file.

Requires tifffile library which is not automatically installed!

PARAMETER DESCRIPTION
f
            Filename(s) or folder. If folder, will import all
            `.tif` files.

TYPE: str | iterable

output
            Determines function's output. See Returns for details.

TYPE: "voxels" | "dotprops" | "raw" DEFAULT: 'voxels'

channel
            Which channel to import. Ignored if file has only one
            channel or when `output="raw". Can use e.g. -1 to
            get the last channel.

TYPE: int DEFAULT: 0

threshold
            For `output='dotprops'` only: a threshold to filter
            low intensity voxels. If `None`, no threshold is
            applied and all values > 0 are converted to points.

TYPE: int | float | None DEFAULT: None

thin
            For `output='dotprops'` only: if True, will thin the
            point cloud using `skimage.morphology.skeletonize`
            after thresholding. Requires `scikit-image`.

TYPE: bool DEFAULT: False

include_subdirs
            If True and `f` is a folder, will also search
            subdirectories for `.tif` files.

TYPE: bool DEFAULT: False

parallel
            Defaults to `auto` which means only use parallel
            processing if more than 10 TIFF files are imported.
            Spawning and joining processes causes overhead and is
            considerably slower for imports of small numbers of
            neurons. Integer will be interpreted as the number of
            cores (otherwise defaults to `os.cpu_count() - 2`).

TYPE: "auto" | bool | int, DEFAULT: 'auto'

fmt
            Formatter to specify how filenames are parsed into neuron
            attributes. Some illustrative examples:
              - `{name}` (default) uses the filename
                (minus the suffix) as the neuron's name property
              - `{id}` (default) uses the filename as the neuron's ID
                property
              - `{name,id}` uses the filename as the neuron's
                name and ID properties
              - `{name}.{id}` splits the filename at a "."
                and uses the first part as name and the second as ID
              - `{name,id:int}` same as above but converts
                into integer for the ID
              - `{name}_{myproperty}` splits the filename at
                "_" and uses the first part as name and as a
                generic "myproperty" property
              - `{name}_{}_{id}` splits the filename at
                "_" and uses the first part as name and the last as
                ID. The middle part is ignored.

            Throws a ValueError if pattern can't be found in
            filename.

TYPE: str DEFAULT: '{name}.tif'

limit
            When reading from a folder or archive you can use this parameter to
            restrict the which files read:
             - if an integer, will read only the first `limit` NMX files
               (useful to get a sample from a large library of skeletons)
             - if a string, will interpret it as filename (regex) pattern
               and only read files that match the pattern; e.g. `limit='.*_R.*'`
               will only read files that contain `_R` in their filename
             - if a slice (e.g. `slice(10, 20)`) will read only the files in
               that range
             - a list is expected to be a list of filenames to read from
               the folder/archive

TYPE: int | str | slice | list DEFAULT: None

errors
            If "log" or "ignore", errors will not be raised and the
            mesh will be skipped. Can result in empty output.

TYPE: "raise" | "log" | "ignore" DEFAULT: 'raise'

**dotprops_kwargs
            Keyword arguments passed to [`navis.make_dotprops`][]
            if `output='dotprops'`. Use this to adjust e.g. the
            number of nearest neighbors used for calculating the
            tangent vector by passing e.g. `k=5`.

DEFAULT: {}

RETURNS DESCRIPTION
navis.VoxelNeuron

If output="voxels" (default): requires TIFF data to be 3-dimensional voxels. VoxelNeuron will have TIFF file info as .tiff_header attribute.

navis.Dotprops

If output="dotprops". Dotprops will contain TIFF header as .tiff_header attribute.

navis.NeuronList

If import of multiple TIFF will return NeuronList of Dotprops/VoxelNeurons.

(image, header)(np.ndarray, OrderedDict)

If output='raw' return raw data contained in TIFF file.

Source code in navis/io/tiff_io.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
def read_tiff(
    f: Union[str, Iterable],
    output: Union[Literal["voxels"], Literal["dotprops"], Literal["raw"]] = "voxels",
    channel: int = 0,
    threshold: Optional[Union[int, float]] = None,
    thin: bool = False,
    include_subdirs: bool = False,
    parallel: Union[bool, int] = "auto",
    fmt: str = "{name}.tif",
    limit: Optional[int] = None,
    errors: str = "raise",
    **dotprops_kwargs,
) -> "core.NeuronObject":
    """Create Neuron/List from TIFF file.

    Requires `tifffile` library which is not automatically installed!

    Parameters
    ----------
    f :                 str | iterable
                        Filename(s) or folder. If folder, will import all
                        `.tif` files.
    output :            "voxels" | "dotprops" | "raw"
                        Determines function's output. See Returns for details.
    channel :           int
                        Which channel to import. Ignored if file has only one
                        channel or when `output="raw". Can use e.g. -1 to
                        get the last channel.
    threshold :         int | float | None
                        For `output='dotprops'` only: a threshold to filter
                        low intensity voxels. If `None`, no threshold is
                        applied and all values > 0 are converted to points.
    thin :              bool
                        For `output='dotprops'` only: if True, will thin the
                        point cloud using `skimage.morphology.skeletonize`
                        after thresholding. Requires `scikit-image`.
    include_subdirs :   bool, optional
                        If True and `f` is a folder, will also search
                        subdirectories for `.tif` files.
    parallel :          "auto" | bool | int,
                        Defaults to `auto` which means only use parallel
                        processing if more than 10 TIFF files are imported.
                        Spawning and joining processes causes overhead and is
                        considerably slower for imports of small numbers of
                        neurons. Integer will be interpreted as the number of
                        cores (otherwise defaults to `os.cpu_count() - 2`).
    fmt :               str
                        Formatter to specify how filenames are parsed into neuron
                        attributes. Some illustrative examples:
                          - `{name}` (default) uses the filename
                            (minus the suffix) as the neuron's name property
                          - `{id}` (default) uses the filename as the neuron's ID
                            property
                          - `{name,id}` uses the filename as the neuron's
                            name and ID properties
                          - `{name}.{id}` splits the filename at a "."
                            and uses the first part as name and the second as ID
                          - `{name,id:int}` same as above but converts
                            into integer for the ID
                          - `{name}_{myproperty}` splits the filename at
                            "_" and uses the first part as name and as a
                            generic "myproperty" property
                          - `{name}_{}_{id}` splits the filename at
                            "_" and uses the first part as name and the last as
                            ID. The middle part is ignored.

                        Throws a ValueError if pattern can't be found in
                        filename.
    limit :             int | str | slice | list, optional
                        When reading from a folder or archive you can use this parameter to
                        restrict the which files read:
                         - if an integer, will read only the first `limit` NMX files
                           (useful to get a sample from a large library of skeletons)
                         - if a string, will interpret it as filename (regex) pattern
                           and only read files that match the pattern; e.g. `limit='.*_R.*'`
                           will only read files that contain `_R` in their filename
                         - if a slice (e.g. `slice(10, 20)`) will read only the files in
                           that range
                         - a list is expected to be a list of filenames to read from
                           the folder/archive
    errors :            "raise" | "log" | "ignore"
                        If "log" or "ignore", errors will not be raised and the
                        mesh will be skipped. Can result in empty output.

    **dotprops_kwargs
                        Keyword arguments passed to [`navis.make_dotprops`][]
                        if `output='dotprops'`. Use this to adjust e.g. the
                        number of nearest neighbors used for calculating the
                        tangent vector by passing e.g. `k=5`.

    Returns
    -------
    navis.VoxelNeuron
                        If `output="voxels"` (default): requires TIFF data to
                        be 3-dimensional voxels. VoxelNeuron will have TIFF file
                        info as `.tiff_header` attribute.
    navis.Dotprops
                        If `output="dotprops"`. Dotprops will contain TIFF
                        header as `.tiff_header` attribute.
    navis.NeuronList
                        If import of multiple TIFF will return NeuronList of
                        Dotprops/VoxelNeurons.
    (image, header)     (np.ndarray, OrderedDict)
                        If `output='raw'` return raw data contained in TIFF
                        file.

    """
    try:
        import tifffile
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            "`navis.read_tiff` requires the `tifffile` library:\n"
            "  pip3 install tifffile -U"
        )

    utils.eval_param(
        output, name="output", allowed_values=("raw", "dotprops", "voxels")
    )

    if parallel == "auto":
        # Set a lower threshold of 10 on parallel processing for TIFFs (default is 200)
        parallel = ("auto", 10)

    reader = TiffReader(
        channel=channel,
        output=output,
        threshold=threshold,
        thin=thin,
        fmt=fmt,
        dotprop_kwargs=dotprops_kwargs,
        errors=errors,
    )
    return reader.read_any(f, include_subdirs, parallel, limit=limit)

Drop nodes from neuron without disconnecting it.

Dropping node 2 from 1->2->3 will lead to connectivity 1->3.

PARAMETER DESCRIPTION
x
    Neuron to remove nodes from.

TYPE: TreeNeuron

which
    IDs of nodes to remove.

TYPE: list of node IDs

inplace
    If True, will rewire the neuron inplace. If False, will return
    a rewired copy of the neuron.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron

Examples:

Drop points from a neuron

>>> import navis
>>> n = navis.example_neurons(1)
>>> n.n_nodes
4465
>>> # Drop a hundred nodes
>>> n2 = navis.remove_nodes(n, n.nodes.node_id.values[100:200])
>>> n2.n_nodes
4365
Source code in navis/graph/graph_utils.py
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
def remove_nodes(
    x: "core.TreeNeuron", which: List[int], inplace: bool = False
) -> Optional["core.TreeNeuron"]:
    """Drop nodes from neuron without disconnecting it.

    Dropping node 2 from 1->2->3 will lead to connectivity 1->3.

    Parameters
    ----------
    x :         TreeNeuron
                Neuron to remove nodes from.
    which :     list of node IDs
                IDs of nodes to remove.
    inplace :   bool
                If True, will rewire the neuron inplace. If False, will return
                a rewired copy of the neuron.

    Returns
    -------
    TreeNeuron

    Examples
    --------
    Drop points from a neuron

    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n.n_nodes
    4465
    >>> # Drop a hundred nodes
    >>> n2 = navis.remove_nodes(n, n.nodes.node_id.values[100:200])
    >>> n2.n_nodes
    4365

    """
    utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,))

    if not utils.is_iterable(which):
        which = [which]
    which = np.asarray(which)

    miss = ~np.isin(which, x.nodes.node_id.values)
    if np.any(miss):
        raise ValueError(f"{len(miss)} node IDs not found in neuron")

    if not inplace:
        x = x.copy()

    # Generate new list of parents
    lop = dict(zip(x.nodes.node_id.values, x.nodes.parent_id.values))

    # Rewire to skip the to-be-removed nodes
    for n in which:
        lop.update({c: lop[n] for c, p in lop.items() if p == n})

    # Rewire neuron
    x.nodes["parent_id"] = x.nodes.node_id.map(lop)

    # Drop nodes
    x.nodes = x.nodes[~x.nodes.node_id.isin(which)].copy()

    # Clear temporary attributes
    x._clear_temp_attr()

    return x

Reroot neuron to new root.

PARAMETER DESCRIPTION
x
   List must contain only a SINGLE neuron.

TYPE: TreeNeuron | NeuronList

new_root
   Node ID(s) of node(s) to reroot to. If multiple new roots are
   provided, they will be rerooted in sequence.

TYPE: int | iterable

inplace
   If True the input neuron will be rerooted in place. If False will
   reroot and return a copy of the original.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron

Rerooted neuron.

See Also

navis.TreeNeuron.reroot Quick access to reroot directly from TreeNeuron/List objects.

Examples:

>>> import navis
>>> n = navis.example_neurons(1, kind='skeleton')
>>> # Reroot neuron to its soma
>>> n2 = navis.reroot_skeleton(n, n.soma)
Source code in navis/graph/graph_utils.py
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
@utils.lock_neuron
def reroot_skeleton(
    x: "core.NeuronObject", new_root: Union[int, str], inplace: bool = False
) -> "core.TreeNeuron":
    """Reroot neuron to new root.

    Parameters
    ----------
    x :        TreeNeuron | NeuronList
               List must contain only a SINGLE neuron.
    new_root : int | iterable
               Node ID(s) of node(s) to reroot to. If multiple new roots are
               provided, they will be rerooted in sequence.
    inplace :  bool, optional
               If True the input neuron will be rerooted in place. If False will
               reroot and return a copy of the original.

    Returns
    -------
    TreeNeuron
               Rerooted neuron.

    See Also
    --------
    [`navis.TreeNeuron.reroot`][]
                Quick access to reroot directly from TreeNeuron/List
                objects.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1, kind='skeleton')
    >>> # Reroot neuron to its soma
    >>> n2 = navis.reroot_skeleton(n, n.soma)

    """
    if isinstance(x, core.NeuronList):
        if len(x) == 1:
            x = x[0]
        else:
            raise ValueError(f"Expected a single neuron, got {len(x)}")

    if not isinstance(x, core.TreeNeuron):
        raise ValueError(f'Unable to reroot object of type "{type(x)}"')

    # Make new root an iterable
    new_roots = utils.make_iterable(new_root)

    # Parse new roots
    for i, root in enumerate(new_roots):
        if root is None:
            raise ValueError("New root can not be <None>")

        # If new root is a tag, rather than a ID, try finding that node
        if isinstance(root, str):
            if x.tags is None:
                raise ValueError("Neuron does not have tags")

            if root not in x.tags:
                raise ValueError(
                    f"#{x.id}: Found no nodes with tag {root}" " - please double check!"
                )

            elif len(x.tags[root]) > 1:
                raise ValueError(
                    f"#{x.id}: Found multiple node with tag "
                    f"{root} - please double check!"
                )
            else:
                new_roots[i] = x.tags[root][0]

    # At this point x is TreeNeuron
    x: core.TreeNeuron
    # At this point new_roots is list of int
    new_roots: Iterable[int]

    if not inplace:
        # Make a copy
        x = x.copy()
        # Run this in a separate function so that the lock is applied to copy
        _ = reroot_skeleton(x, new_root=new_roots, inplace=True)
        return x

    # Keep track of node ID dtype
    nodeid_dtype = x.nodes.node_id.dtype

    # Go over each new root
    for new_root in new_roots:
        # Skip if new root is old root
        if any(x.root == new_root):
            continue

        if x.igraph and config.use_igraph:
            # Grab graph once to avoid overhead from stale checks
            g = x.igraph

            # Prevent warnings in the following code - querying paths between
            # unreachable nodes will otherwise generate a runtime warning
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")

                # Find paths to all roots
                path = g.get_shortest_paths(
                    g.vs.find(node_id=new_root), [g.vs.find(node_id=r) for r in x.root]
                )
                epath = g.get_shortest_paths(
                    g.vs.find(node_id=new_root),
                    [g.vs.find(node_id=r) for r in x.root],
                    output="epath",
                )

            # Extract paths that actually worked (i.e. within a continuous fragment)
            path = [p for p in path if p][0]
            epath = [p for p in epath if p][0]

            edges = [(s, t) for s, t in zip(path[:-1], path[1:])]

            weights = [g.es[e]["weight"] for e in epath]

            # Get all weights and append inversed new weights
            all_weights = g.es["weight"] + weights

            # Add inverse edges: old_root->new_root
            g.add_edges([(e[1], e[0]) for e in edges])

            # Re-set weights
            g.es["weight"] = all_weights

            # Remove new_root->old_root
            g.delete_edges(edges)

            # Get degree of old root for later categorisation
            old_root_deg = len(g.es.select(_target=path[-1]))

            # Translate path indices to node IDs
            ix2id = {
                ix: n
                for ix, n in zip(g.vs.indices, g.vs.get_attribute_values("node_id"))
            }
            path = [ix2id[i] for i in path]
        else:
            # Grab graph once to avoid overhead from stale checks
            g = x.graph
            # If this NetworkX graph is just an (immutable) view, turn it into a
            # full, independent graph
            nx_main_version = ".".join(nx.__version__.split(".")[:2])
            if float(nx_main_version) < 2.2:
                if isinstance(g, nx.classes.graphviews.ReadOnlyGraph):
                    x._graph_nx = g = nx.DiGraph(g)
            elif hasattr(g, "_NODE_OK"):
                x._graph_nx = g = nx.DiGraph(g)
            elif nx.is_frozen(g):
                x._graph_nx = g = nx.DiGraph(g)

            # Walk from new root to old root and remove edges along the way
            parent = next(g.successors(new_root), None)
            if not parent:
                # new_root is already the root
                continue

            path = [new_root]
            weights = []
            while parent is not None:
                weights.append(g[path[-1]][parent]["weight"])
                g.remove_edge(path[-1], parent)
                path.append(parent)
                parent = next(g.successors(parent), None)

            # Invert path and add weights
            new_edges = [
                (path[i + 1], path[i], {"weight": weights[i]})
                for i in range(len(path) - 1)
            ]

            # Add inverted path between old and new root
            g.add_edges_from(new_edges)

            # Get degree of old root for later categorisation
            old_root_deg = g.in_degree(path[-1])

        # Set index to node ID for later
        x.nodes.set_index("node_id", inplace=True)

        # Propagate changes in graph back to node table
        # Assign new node type to old root
        x.nodes.loc[path[1:], "parent_id"] = path[:-1]
        if old_root_deg == 1:
            x.nodes.loc[path[-1], "type"] = "slab"
        elif old_root_deg > 1:
            x.nodes.loc[path[-1], "type"] = "branch"
        else:
            x.nodes.loc[path[-1], "type"] = "end"
        # Make new root node type "root"
        x.nodes.loc[path[0], "type"] = "root"

        # Set new root's parent to None
        x.nodes.loc[new_root, "parent_id"] = -1

        # Reset index
        x.nodes.reset_index(drop=False, inplace=True)

    # Make sure node ID has the same datatype as before
    if x.nodes.node_id.dtype != nodeid_dtype:
        x.nodes["node_id"] = x.nodes.node_id.astype(nodeid_dtype, copy=False)

    # Finally: only reset non-graph related attributes
    if x.igraph and config.use_igraph:
        x._clear_temp_attr(exclude=["igraph", "classify_nodes"])
    else:
        x._clear_temp_attr(exclude=["graph", "classify_nodes"])

    return x

Resample neuron such that nodes lie exactly on given 1d grid.

This function does not simply snap nodes to the closest grid line but instead adds new nodes where edges between existing nodes intersect with the planes defined by the grid.

PARAMETER DESCRIPTION
x
        Neuron(s) to resample.

TYPE: TreeNeuron | NeuronList

interval
        Intervals defining a 1-dimensional grid along given axes
        (see examples). If neuron(s) have `.units` set, you can also
        pass a string such as "50 nm".

TYPE: float | int | str

axis
        Along which axes (x/y/z) to resample.

TYPE: 0 | 1 | 2 DEFAULT: 2

old_nodes
        Existing nodes are unlikely to intersect with the planes as
        defined by the grid interval. There are three possible ways
        to deal with them:
         - "remove" (default) will simply drop old nodes: this
           guarantees all remaining nodes will lie on a plane
         - "keep" will keep old nodes without changing them
         - "snap" will snap those nodes to the closest coordinate
           on the grid without interpolation

TYPE: "remove" | "keep" | "snap" DEFAULT: 'remove'

inplace
        If False, will resample and return a copy of the original. If
        True, will resample input neuron in place.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

The resampled neuron(s).

See Also

navis.resample_skeleton Resample neuron such that edges between nodes have a given length. navis.downsample_neuron This function reduces the number of nodes instead of resample to certain resolution. Useful if you are just after some simplification e.g. for speeding up your calculations or you want to preserve node IDs.

Examples:

Resample neuron such that we have one node in every 40nm slice along z axis

>>> import navis
>>> n = navis.example_neurons(1)
>>> n.n_nodes
4465
>>> res = navis.resample_along_axis(n, interval='40 nm',
...                                 axis=2, old_nodes='remove')
>>> res.n_nodes < n.n_nodes
True
Source code in navis/sampling/resampling.py
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
@utils.map_neuronlist(desc='Binning', allow_parallel=True)
def resample_along_axis(x: 'core.TreeNeuron',
                        interval: Union[int, float, str],
                        axis: int = 2,
                        old_nodes: Union[Literal['remove'],
                                         Literal['keep'],
                                         Literal['snap']] = 'remove',
                        inplace: bool = False
                        ) -> Optional['core.TreeNeuron']:
    """Resample neuron such that nodes lie exactly on given 1d grid.

    This function does not simply snap nodes to the closest grid line but
    instead adds new nodes where edges between existing nodes intersect
    with the planes defined by the grid.

    Parameters
    ----------
    x :             TreeNeuron | NeuronList
                    Neuron(s) to resample.
    interval :      float | int | str
                    Intervals defining a 1-dimensional grid along given axes
                    (see examples). If neuron(s) have `.units` set, you can also
                    pass a string such as "50 nm".
    axis :           0 | 1 | 2
                    Along which axes (x/y/z) to resample.
    old_nodes :     "remove" | "keep" | "snap"
                    Existing nodes are unlikely to intersect with the planes as
                    defined by the grid interval. There are three possible ways
                    to deal with them:
                     - "remove" (default) will simply drop old nodes: this
                       guarantees all remaining nodes will lie on a plane
                     - "keep" will keep old nodes without changing them
                     - "snap" will snap those nodes to the closest coordinate
                       on the grid without interpolation

    inplace :       bool
                    If False, will resample and return a copy of the original. If
                    True, will resample input neuron in place.

    Returns
    -------
    TreeNeuron/List
                    The resampled neuron(s).

    See Also
    --------
    [`navis.resample_skeleton`][]
                        Resample neuron such that edges between nodes have a
                        given length.
    [`navis.downsample_neuron`][]
                        This function reduces the number of nodes instead of
                        resample to certain resolution. Useful if you are
                        just after some simplification e.g. for speeding up
                        your calculations or you want to preserve node IDs.

    Examples
    --------
    Resample neuron such that we have one node in every 40nm slice along z axis

    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n.n_nodes
    4465
    >>> res = navis.resample_along_axis(n, interval='40 nm',
    ...                                 axis=2, old_nodes='remove')
    >>> res.n_nodes < n.n_nodes
    True

    """
    utils.eval_param(axis, name='axis', allowed_values=(0, 1, 2))
    utils.eval_param(old_nodes, name='old_nodes',
                     allowed_values=("remove", "keep", "snap"))
    utils.eval_param(x, name='x', allowed_types=(core.TreeNeuron, ))

    interval = x.map_units(interval, on_error='raise')

    if not inplace:
        x = x.copy()

    # Collect coordinates of nodes and their parents
    nodes = x.nodes
    not_root = nodes.loc[nodes.parent_id >= 0]
    node_locs = not_root[['x', 'y', 'z']].values
    parent_locs = nodes.set_index('node_id').loc[not_root.parent_id.values,
                                                 ['x', 'y', 'z']].values

    # Get all vectors
    vecs = parent_locs - node_locs

    # Get coordinates along this axis
    loc1 = node_locs[:, axis]
    loc2 = parent_locs[:, axis]

    # This prevents runtime warnings e.g. from division by zero
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        # Find out which grid interval these are on
        int1 = (loc1 / interval).astype(int)
        int2 = (loc2 / interval).astype(int)

        # Difference in bin between both locs
        diff = int2 - int1
        sign = diff / np.abs(diff)

        # Figure out by how far we are from the gridline
        dist = np.zeros(diff.shape[0])
        dist[diff < 0] = loc1[diff < 0] % interval
        dist[diff > 0] = -loc1[diff > 0] % interval

        # Now we need to calculate the new position
        # Get other axes
        other_axes = list({0, 1, 2} - {axis})
        # Normalize other vectors by this vector
        other_vecs_norm = vecs[:, other_axes] / vecs[:, [axis]]

        # Get offset for other axis
        other_offset = other_vecs_norm * dist.reshape(dist.shape[0], 1)

        # Offset for this axis
        this_offset = dist * sign

    # Apply offsets
    new_coords = node_locs.copy()
    new_coords[:, other_axes] += other_offset * sign.reshape(sign.shape[0], 1)
    new_coords[:, [axis]] += this_offset.reshape(this_offset.shape[0], 1)

    # Now extract nodes that need to be inserted
    insert_between = not_root.loc[diff != 0, ['node_id', 'parent_id']].values
    new_coords = new_coords[diff != 0]

    # Insert nodes
    graph.insert_nodes(x, where=insert_between, coords=new_coords, inplace=True)

    # Figure out what to do with nodes that are not on the grid
    if old_nodes == 'remove':
        mod = x.nodes[['x', 'y', 'z'][axis]].values % interval
        not_lined_up = mod != 0
        to_remove = x.nodes.loc[not_lined_up, 'node_id'].values
    elif old_nodes == 'keep':
        to_remove = insert_between[:, 0]
    elif old_nodes == 'snap':
        not_lined_up = x.nodes[['x', 'y', 'z']].values[:, axis] % interval != 0
        to_snap = x.nodes.loc[not_lined_up, ['x', 'y', 'z'][axis]].values
        snapped = (to_snap / interval).round() * interval
        x.nodes.loc[not_lined_up, ['x', 'y', 'z'][axis]] = snapped
        to_remove = []

    if np.any(to_remove):
        graph.remove_nodes(x, which=to_remove, inplace=True)

    return x

Resample skeleton(s) to given resolution.

Preserves root, leafs and branchpoints. Soma, connectors and node tags (if present) are mapped onto the closest node in the resampled neuron.

Important

A few things to keep in mind: - This generates an entirely new set of node IDs! They will be unique within a neuron, but you may encounter duplicates across neurons. - Any non-standard node table columns (e.g. "labels") will be lost. - Soma(s) will be pinned to the closest node in the resampled neuron.

Also: be aware that high-resolution neurons will use A LOT of memory.

PARAMETER DESCRIPTION
x
            Neuron(s) to resample.

TYPE: TreeNeuron | NeuronList

resample_to
            Target sampling resolution, i.e. one node every
            N units of cable. Note that hitting the exact
            sampling resolution might not be possible e.g. if
            a branch is shorter than the target resolution. If
            neuron(s) have their `.units` parameter, you can also
            pass a string such as "1 micron".

TYPE: int | float | str

method
            See `scipy.interpolate.interp1d` for possible
            options. By default, we're using linear interpolation.

TYPE: str DEFAULT: 'linear'

map_columns
            Names of additional columns to carry over to the resampled
            neuron. Numerical columns will be interpolated according to
            `method`. Non-numerical columns will be interpolated
            using nearest neighbour interpolation.

TYPE: list of str DEFAULT: None

inplace
            If True, will modify original neuron. If False, a
            resampled copy is returned.

TYPE: bool DEFAULT: False

skip_errors
            If True, will skip errors during interpolation and
            only print summary.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
TreeNeuron / List

Downsampled neuron(s).

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> # Check sampling resolution (nodes/cable)
>>> round(n.sampling_resolution)
60
>>> # Resample to 1 micron (example neurons are in 8x8x8nm)
>>> n_rs = navis.resample_skeleton(n,
...                                resample_to=1000 / 8,
...                                inplace=False)
>>> round(n_rs.sampling_resolution)
134
See Also

navis.downsample_neuron This function reduces the number of nodes instead of resample to certain resolution. Useful if you are just after some simplification - e.g. for speeding up your calculations or you want to preserve node IDs. navis.resample_along_axis Resample neuron along a single axis such that nodes align with given 1-dimensional grid.

Source code in navis/sampling/resampling.py
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
@utils.map_neuronlist(desc='Resampling', allow_parallel=True)
def resample_skeleton(x: 'core.NeuronObject',
                      resample_to: Union[int, str],
                      inplace: bool = False,
                      method: str = 'linear',
                      map_columns: Optional[list] = None,
                      skip_errors: bool = True
                      ) -> Optional['core.NeuronObject']:
    """Resample skeleton(s) to given resolution.

    Preserves root, leafs and branchpoints. Soma, connectors and node tags
    (if present) are mapped onto the closest node in the resampled neuron.

    Important
    ---------
    A few things to keep in mind:
      - This generates an entirely new set of node IDs! They will be unique
        within a neuron, but you may encounter duplicates across neurons.
      - Any non-standard node table columns (e.g. "labels") will be lost.
      - Soma(s) will be pinned to the closest node in the resampled neuron.


    Also: be aware that high-resolution neurons will use A LOT of memory.

    Parameters
    ----------
    x :                 TreeNeuron | NeuronList
                        Neuron(s) to resample.
    resample_to :       int | float | str
                        Target sampling resolution, i.e. one node every
                        N units of cable. Note that hitting the exact
                        sampling resolution might not be possible e.g. if
                        a branch is shorter than the target resolution. If
                        neuron(s) have their `.units` parameter, you can also
                        pass a string such as "1 micron".
    method :            str, optional
                        See `scipy.interpolate.interp1d` for possible
                        options. By default, we're using linear interpolation.
    map_columns :       list of str, optional
                        Names of additional columns to carry over to the resampled
                        neuron. Numerical columns will be interpolated according to
                        `method`. Non-numerical columns will be interpolated
                        using nearest neighbour interpolation.
    inplace :           bool, optional
                        If True, will modify original neuron. If False, a
                        resampled copy is returned.
    skip_errors :       bool, optional
                        If True, will skip errors during interpolation and
                        only print summary.

    Returns
    -------
    TreeNeuron/List
                        Downsampled neuron(s).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> # Check sampling resolution (nodes/cable)
    >>> round(n.sampling_resolution)
    60
    >>> # Resample to 1 micron (example neurons are in 8x8x8nm)
    >>> n_rs = navis.resample_skeleton(n,
    ...                                resample_to=1000 / 8,
    ...                                inplace=False)
    >>> round(n_rs.sampling_resolution)
    134

    See Also
    --------
    [`navis.downsample_neuron`][]
                        This function reduces the number of nodes instead of
                        resample to certain resolution. Useful if you are
                        just after some simplification - e.g. for speeding up
                        your calculations or you want to preserve node IDs.
    [`navis.resample_along_axis`][]
                        Resample neuron along a single axis such that nodes
                        align with given 1-dimensional grid.

    """
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Unable to resample data of type "{type(x)}"')

    # Map units (non-str are just passed through)
    resample_to = x.map_units(resample_to, on_error="raise")

    if not inplace:
        x = x.copy()

    num_cols = ["x", "y", "z", "radius"]
    non_num_cols = []

    if map_columns:
        if isinstance(map_columns, str):
            map_columns = [map_columns]

        for col in map_columns:
            if col in num_cols or col in non_num_cols:
                continue
            if col not in x.nodes.columns:
                raise ValueError(f'Column "{col}" not found in node table')
            if pd.api.types.is_numeric_dtype(x.nodes[col].dtype):
                num_cols.append(col)
            else:
                non_num_cols.append(col)

    # Collect coordinates
    locs = dict(zip(x.nodes.node_id.values, x.nodes[["x", "y", "z"]].values))

    # Collect values for all columns
    values = {
        col: dict(zip(x.nodes.node_id.values, x.nodes[col].values))
        for col in num_cols + non_num_cols
    }

    # For categorical columns, we need to translate them to numerical values
    cat2num = {}
    num2cat = {}
    for col in non_num_cols:
        cat2num[col] = {c: i for i, c in enumerate(x.nodes[col].unique())}
        num2cat[col] = {i: c for c, i in cat2num[col].items()}

    new_nodes: List = []
    max_tn_id = x.nodes.node_id.max() + 1

    errors = 0

    # Iterate over segments
    for i, seg in enumerate(x.small_segments):
        # Get coordinates
        coords = np.vstack([locs[n] for n in seg])
        # Get radii
        # rad = [radii[tn] for tn in seg]

        # Vecs between subsequently measured points
        vecs = np.diff(coords.T)

        # path: cum distance along points (norm from first to Nth point)
        dist = np.cumsum(np.linalg.norm(vecs, axis=0))
        dist = np.insert(dist, 0, 0)

        # If path is too short, just keep the first and last node
        if dist[-1] < resample_to or (method == "cubic" and len(seg) <= 3):
            new_nodes += [
                [seg[0], seg[-1]] + [values[c][seg[0]] for c in num_cols + non_num_cols]
            ]
            continue

        # Distances (i.e. resolution) of interpolation
        n_nodes = np.round(dist[-1] / resample_to)
        new_dist = np.linspace(dist[0], dist[-1], int(n_nodes))

        samples = {}
        # Interpolate numerical columns
        for col in num_cols:
            try:
                samples[col] = scipy.interpolate.interp1d(
                    dist, [values[col][n] for n in seg], kind=method
                )
            except ValueError as e:
                if skip_errors:
                    errors += 1
                    new_nodes += x.nodes.loc[
                        x.nodes.node_id.isin(seg[:-1]),
                        ["node_id", "parent_id"] + num_cols + non_num_cols,
                    ].values.tolist()
                    continue
                else:
                    raise e
        # Interpolate non-numerical columns
        for col in non_num_cols:
            try:
                samples[col] = scipy.interpolate.interp1d(
                    dist, [cat2num[col][values[col][n]] for n in seg], kind="nearest"
                )
            except ValueError as e:
                if skip_errors:
                    errors += 1
                    new_nodes += x.nodes.loc[
                        x.nodes.node_id.isin(seg[:-1]),
                        ["node_id", "parent_id"] + num_cols + non_num_cols,
                    ].values.tolist()
                    continue
                else:
                    raise e

        # Sample each column
        new_values = {}
        for col in num_cols:
            new_values[col] = samples[col](new_dist)
        for col in non_num_cols:
            new_values[col] = [num2cat[col][int(samples[col](d))] for d in new_dist]

        # Generate new ids (start and end node IDs of this segment are kept)
        new_ids = np.concatenate(
            (seg[:1], [max_tn_id + i for i in range(len(new_dist) - 2)], seg[-1:])
        )

        # Increase max index
        max_tn_id += len(new_ids)

        # Keep track of new nodes
        new_nodes += [
            [tn, pn] + [new_values[c][i] for c in num_cols + non_num_cols]
            for i, (tn, pn) in enumerate(zip(new_ids[:-1], new_ids[1:]))
        ]

    if errors:
        logger.warning(f"{errors} ({errors/i:.0%}) segments skipped due to " "errors")

    # Add root node(s)
    root = x.nodes.loc[
        x.nodes.node_id.isin(utils.make_iterable(x.root)),
        ["node_id", "parent_id"] + num_cols + non_num_cols,
    ]
    new_nodes += [list(r) for r in root.values]

    # Generate new nodes dataframe
    new_nodes = pd.DataFrame(
        data=new_nodes, columns=["node_id", "parent_id"] + num_cols + non_num_cols
    )

    # Convert columns to appropriate dtypes
    dtypes = {
        k: x.nodes[k].dtype for k in ["node_id", "parent_id"] + num_cols + non_num_cols
    }

    for cols in new_nodes.columns:
        new_nodes = new_nodes.astype(dtypes, errors="ignore")

    # Remove duplicate nodes (branch points)
    new_nodes = new_nodes[~new_nodes.node_id.duplicated()]

    # Generate KDTree
    tree = scipy.spatial.cKDTree(new_nodes[["x", "y", "z"]].values)
    # Map soma onto new nodes if required
    # Note that if `._soma` is a soma detection function we can't tell
    # how to deal with it. Ideally the new soma node will
    # be automatically detected but it is possible, for example, that
    # the radii of nodes have changed due to interpolation such that more
    # than one soma is detected now. Also a "label" column in the node
    # table would be lost at this point.
    # We will go for the easy option which is to pin the soma at this point.
    nodes = x.nodes.set_index("node_id", inplace=False)
    if np.any(getattr(x, "soma")):
        soma_nodes = utils.make_iterable(x.soma)
        old_pos = nodes.loc[soma_nodes, ["x", "y", "z"]].values

        # Get nearest neighbours
        dist, ix = tree.query(old_pos)
        node_map = dict(zip(soma_nodes, new_nodes.node_id.values[ix]))

        # Map back onto neuron
        if utils.is_iterable(x.soma):
            # Use _soma to avoid checks - the new nodes have not yet been
            # assigned to the neuron!
            x._soma = [node_map[n] for n in x.soma]
        else:
            x._soma = node_map[x.soma]
    else:
        # If `._soma` was (read: is) a function but it didn't detect anything in
        # the original neurons, this makes sure that the resampled neuron
        # doesn't have a soma either:
        x.soma = None

    # Map connectors back if necessary
    if x.has_connectors:
        # Get position of old synapse-bearing nodes
        old_tn_position = nodes.loc[x.connectors.node_id, ["x", "y", "z"]].values

        # Get nearest neighbours
        dist, ix = tree.query(old_tn_position)

        # Map back onto neuron
        x.connectors["node_id"] = new_nodes.node_id.values[ix]

    # Map tags back if necessary
    # Expects `tags` to be a dictionary {'tag': [node_id1, node_id2, ...]}
    if x.has_tags and isinstance(x.tags, dict):
        # Get nodes that need remapping
        nodes_to_remap = list({n for l in x.tags.values() for n in l})

        # Get position of old tag-bearing nodes
        old_tn_position = nodes.loc[nodes_to_remap, ["x", "y", "z"]].values

        # Get nearest neighbours
        dist, ix = tree.query(old_tn_position)

        # Map back onto tags
        node_map = dict(zip(nodes_to_remap, new_nodes.node_id.values[ix]))
        x.tags = {k: [node_map[n] for n in v] for k, v in x.tags.items()}

    # Set nodes (avoid setting on copy warning)
    x.nodes = new_nodes.copy()

    # Clear and regenerate temporary attributes
    x._clear_temp_attr()

    return x

Rewire neuron from graph.

This function takes a graph representation of a neuron and rewires its node table accordingly. This is useful if we made changes to the graph (i.e. adding or removing edges) and want those to propagate to the node table.

PARAMETER DESCRIPTION
x
    Neuron to be rewired.

TYPE: TreeNeuron

g
    Graph to use for rewiring. Please note that directionality (if
    present) is not taken into account. Nodes not included in the
    graph will be disconnected (i.e. won't have a parent). Nodes
    in the graph but not in the table are ignored!

TYPE: networkx.Graph

root
    Node ID for the new root. If not given, will try to use the
    current root.

TYPE: int DEFAULT: None

inplace
    If True, will rewire the neuron inplace. If False, will return
    a rewired copy of the neuron.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> n.n_trees
1
>>> # Drop one edge from graph
>>> g = n.graph.copy()
>>> g.remove_edge(310, 309)
>>> # Rewire neuron
>>> n2 = navis.rewire_skeleton(n, g, inplace=False)
>>> n2.n_trees
2
Source code in navis/graph/graph_utils.py
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
def rewire_skeleton(
    x: "core.TreeNeuron", g: nx.Graph, root: Optional[id] = None, inplace: bool = False
) -> Optional["core.TreeNeuron"]:
    """Rewire neuron from graph.

    This function takes a graph representation of a neuron and rewires its
    node table accordingly. This is useful if we made changes to the graph
    (i.e. adding or removing edges) and want those to propagate to the node
    table.

    Parameters
    ----------
    x :         TreeNeuron
                Neuron to be rewired.
    g :         networkx.Graph
                Graph to use for rewiring. Please note that directionality (if
                present) is not taken into account. Nodes not included in the
                graph will be disconnected (i.e. won't have a parent). Nodes
                in the graph but not in the table are ignored!
    root :      int
                Node ID for the new root. If not given, will try to use the
                current root.
    inplace :   bool
                If True, will rewire the neuron inplace. If False, will return
                a rewired copy of the neuron.

    Returns
    -------
    TreeNeuron

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> n.n_trees
    1
    >>> # Drop one edge from graph
    >>> g = n.graph.copy()
    >>> g.remove_edge(310, 309)
    >>> # Rewire neuron
    >>> n2 = navis.rewire_skeleton(n, g, inplace=False)
    >>> n2.n_trees
    2

    """
    assert isinstance(x, core.TreeNeuron), f"Expected TreeNeuron, got {type(x)}"
    assert isinstance(g, nx.Graph), f"Expected networkx graph, got {type(g)}"

    if not inplace:
        x = x.copy()

    if g.is_directed():
        g = g.to_undirected()

    g = nx.minimum_spanning_tree(g, weight="weight")

    if not root:
        root = x.root[0] if x.root[0] in g.nodes else next(iter(g.nodes))

    # Generate tree for the main component
    tree = nx.dfs_tree(g, source=root)

    # Generate list of parents
    lop = {e[1]: e[0] for e in tree.edges}

    # If the graph has more than one connected component,
    # the remaining components have arbitrary roots
    if len(tree.edges) != len(g.edges):
        for cc in nx.connected_components(g):
            if root not in cc:
                tree = nx.dfs_tree(g, source=cc.pop())
                lop.update({e[1]: e[0] for e in tree.edges})

    # Update parent IDs
    x.nodes["parent_id"] = x.nodes.node_id.map(lambda x: lop.get(x, -1))

    x._clear_temp_attr()

    return x

Scan parquet file.

PARAMETER DESCRIPTION
file
            File to be scan.

TYPE: str

RETURNS DESCRIPTION
pd.DataFrame

Summary of file's content.

See Also

navis.write_parquet Export neurons as parquet files. navis.read_parquet Read parquet file into neurons.

Examples:

See navis.write_parquet for examples.

Source code in navis/io/pq_io.py
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
def scan_parquet(file: Union[str, Path]):
    """Scan parquet file.

    Parameters
    ----------
    file :              str
                        File to be scan.

    Returns
    -------
    pd.DataFrame
                        Summary of file's content.

    See Also
    --------
    [`navis.write_parquet`][]
                        Export neurons as parquet files.
    [`navis.read_parquet`][]
                        Read parquet file into neurons.

    Examples
    --------
    See [`navis.write_parquet`][] for examples.

    """
    try:
        import pyarrow.parquet as pq
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            'Reading parquet files requires the pyarrow library:\n'
            ' pip3 install pyarrow')

    f = Path(file).expanduser()
    if not f.is_file():
        raise FileNotFoundError(f'File "{f}" does not exist.')

    metadata = pq.read_metadata(f)

    try:
        meta = {k.decode(): v.decode() for k, v in metadata.metadata.items()}
    except BaseException:
        logger.warning(f'Unable to decode meta data for parquet file {f}')

    # Parse meta data
    ids = [v for k, v in meta.items() if k.endswith(':id') and not k.startswith('_')]
    records = {i: {} for i in ids}
    for k, v in meta.items():
        if k.startswith('_'):
            continue
        if ':' not in k:
            continue

        id, prop = k.split(':')

        if id not in records:  # there might be an "ARROW:schema" entry
            continue

        records[id][prop] = v

    # Turn into DataFrame
    df =  pd.DataFrame.from_records(list(records.values()))

    # Move ID column to front
    ids = df['id']
    df.drop(labels=['id'], axis=1, inplace=True)
    df.insert(0, 'id', ids)

    return df

Calculate morphometric properties a neuron's segments.

This currently includes Strahler index, length, distance to root and tortuosity. If neuron has a radius will also calculate radius-based metrics such as volume.

PARAMETER DESCRIPTION
x
            Neuron(s) to produce segment analysis for.

TYPE: TreeNeuron | MeshNeuron

RETURNS DESCRIPTION
pandas.DataFrame

Each row represents one linear segment between leafs/branch nodes (corresponds to x.small_segments): - strahler_index is the Strahler Index of this segment - length is the geodesic length of the segment - tortuosity is the arc-chord ratio, i.e. the ratio of length to the distance between its ends - root_dist is the geodesic distance from the base of the segment to the root If neuron node table has a radius column will also compute the following properties: - radius_mean - radius_max - radius_min - volume

See Also

navis.strahler_index This function calculates the Strahler index for every nodes/vertex in the neuron. navis.tortuosity This function calculates a tortuosity for the entire neuron.

Examples:

Run analysis on a single neuron:

>>> import navis
>>> n = navis.example_neurons(1, kind='skeleton')
>>> n.reroot(n.soma, inplace=True)
>>> sa = navis.segment_analysis(n)
>>> sa.head()
        length  tortuosity     root_dist  strahler_index  ...        volume
0  1073.535053    1.151022    229.448586               1  ...  4.159788e+07
1   112.682839    1.092659  10279.037511               1  ...  1.153095e+05
2   214.124934    1.013030   9557.521377               1  ...  8.618440e+05
3   159.585328    1.074575   9747.866968               1  ...  9.088157e+05
4   229.448586    1.000000      0.000000               6  ...  3.206231e+07
>>> # Get per Strahler index means
>>> sa.groupby('strahler_index').mean()
                    length  tortuosity     root_dist  ...        volume
strahler_index
1               200.957415    1.111979  13889.593659  ...  8.363172e+05
2               171.283617    1.047736  14167.056400  ...  1.061405e+06
3               134.788019    1.023672  13409.920288  ...  9.212662e+05
4               711.063734    1.016606  15768.886051  ...  7.304981e+06
5               146.350195    1.000996   8443.345668  ...  2.262917e+06
6               685.852990    1.056258   1881.594266  ...  1.067976e+07

Compare across neurons:

>>> import navis
>>> nl = navis.example_neurons(5, kind='skeleton')
>>> sa = navis.segment_analysis(nl)
>>> # Note the `neuron` column when running the analysis on NeuronLists
>>> sa.head()
       neuron       length  tortuosity     root_dist  ...        volume
0  1734350788   112.682839    1.092659  11123.123978  ...  1.153095e+05
1  1734350788   214.124934    1.013030  10401.607843  ...  8.618440e+05
2  1734350788   159.585328    1.074575  10591.953435  ...  9.088157e+05
3  1734350788  1073.535053    1.151022      0.000000  ...  4.159788e+07
4  1734350788   260.538727    1.000000   1073.535053  ...  3.593405e+07
>>> # Get Strahler index counts for each neuron
>>> si_counts = sa.groupby(['neuron', 'strahler_index']).size().unstack()
>>> si_counts
strahler_index      1      2      3      4     5     6     7
neuron
722817260       656.0  336.0  167.0   74.0  32.0  24.0   NaN
754534424       726.0  345.0  176.0  111.0  37.0   9.0  18.0
754538881       642.0  344.0  149.0   88.0  21.0  24.0   NaN
1734350788      618.0  338.0  138.0   74.0  38.0  11.0   NaN
1734350908      761.0  363.0  203.0  116.0  20.0  33.0   NaN
Source code in navis/morpho/mmetrics.py
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
@utils.map_neuronlist_df(desc="Analyzing", allow_parallel=True, reset_index=True)
@utils.meshneuron_skeleton(method="pass_through", reroot_soma=True)
def segment_analysis(x: "core.NeuronObject") -> "core.NeuronObject":
    """Calculate morphometric properties a neuron's segments.

    This currently includes Strahler index, length, distance to root and
    tortuosity. If neuron has a radius will also calculate radius-based metrics
    such as volume.

    Parameters
    ----------
    x :                 TreeNeuron | MeshNeuron
                        Neuron(s) to produce segment analysis for.

    Returns
    -------
    pandas.DataFrame
                        Each row represents one linear segment between
                        leafs/branch nodes (corresponds to `x.small_segments`):
                          - `strahler_index` is the Strahler Index of this segment
                          - `length` is the geodesic length of the segment
                          - `tortuosity` is the arc-chord ratio, i.e. the
                            ratio of `length` to the distance between its ends
                          - `root_dist` is the geodesic distance from the base
                            of the segment to the root
                        If neuron node table has a `radius` column will also
                        compute the following properties:
                          - `radius_mean`
                          - `radius_max`
                          - `radius_min`
                          - `volume`

    See Also
    --------
    [`navis.strahler_index`][]
                        This function calculates the Strahler index for every
                        nodes/vertex in the neuron.
    [`navis.tortuosity`][]
                        This function calculates a tortuosity for the entire
                        neuron.

    Examples
    --------

    Run analysis on a single neuron:

    >>> import navis
    >>> n = navis.example_neurons(1, kind='skeleton')
    >>> n.reroot(n.soma, inplace=True)
    >>> sa = navis.segment_analysis(n)
    >>> sa.head()                                               # doctest: +SKIP
            length  tortuosity     root_dist  strahler_index  ...        volume
    0  1073.535053    1.151022    229.448586               1  ...  4.159788e+07
    1   112.682839    1.092659  10279.037511               1  ...  1.153095e+05
    2   214.124934    1.013030   9557.521377               1  ...  8.618440e+05
    3   159.585328    1.074575   9747.866968               1  ...  9.088157e+05
    4   229.448586    1.000000      0.000000               6  ...  3.206231e+07
    >>> # Get per Strahler index means
    >>> sa.groupby('strahler_index').mean()                     # doctest: +SKIP
                        length  tortuosity     root_dist  ...        volume
    strahler_index
    1               200.957415    1.111979  13889.593659  ...  8.363172e+05
    2               171.283617    1.047736  14167.056400  ...  1.061405e+06
    3               134.788019    1.023672  13409.920288  ...  9.212662e+05
    4               711.063734    1.016606  15768.886051  ...  7.304981e+06
    5               146.350195    1.000996   8443.345668  ...  2.262917e+06
    6               685.852990    1.056258   1881.594266  ...  1.067976e+07

    Compare across neurons:

    >>> import navis
    >>> nl = navis.example_neurons(5, kind='skeleton')
    >>> sa = navis.segment_analysis(nl)
    >>> # Note the `neuron` column when running the analysis on NeuronLists
    >>> sa.head()                                               # doctest: +SKIP
           neuron       length  tortuosity     root_dist  ...        volume
    0  1734350788   112.682839    1.092659  11123.123978  ...  1.153095e+05
    1  1734350788   214.124934    1.013030  10401.607843  ...  8.618440e+05
    2  1734350788   159.585328    1.074575  10591.953435  ...  9.088157e+05
    3  1734350788  1073.535053    1.151022      0.000000  ...  4.159788e+07
    4  1734350788   260.538727    1.000000   1073.535053  ...  3.593405e+07
    >>> # Get Strahler index counts for each neuron
    >>> si_counts = sa.groupby(['neuron', 'strahler_index']).size().unstack()
    >>> si_counts                                               # doctest: +SKIP
    strahler_index      1      2      3      4     5     6     7
    neuron
    722817260       656.0  336.0  167.0   74.0  32.0  24.0   NaN
    754534424       726.0  345.0  176.0  111.0  37.0   9.0  18.0
    754538881       642.0  344.0  149.0   88.0  21.0  24.0   NaN
    1734350788      618.0  338.0  138.0   74.0  38.0  11.0   NaN
    1734350908      761.0  363.0  203.0  116.0  20.0  33.0   NaN

    """
    utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,))

    if "strahler_index" not in x.nodes:
        strahler_index(x)

    # Get small segments for this neuron
    segs = graph._break_segments(x)

    # For each segment get the SI
    nodes = x.nodes.set_index("node_id")
    SI = nodes.loc[[s[0] for s in segs], "strahler_index"].values

    # Get segment lengths
    seg_lengths = np.array([graph.segment_length(x, s) for s in segs])

    # Get tortuosity
    start = nodes.loc[[s[0] for s in segs], ["x", "y", "z"]].values
    end = nodes.loc[[s[-1] for s in segs], ["x", "y", "z"]].values
    L = np.sqrt(((start - end) ** 2).sum(axis=1))
    tort = seg_lengths / L

    # Get distance from root
    root_dists_dict = graph.dist_to_root(x, weight="weight")
    root_dists = np.array([root_dists_dict[s[-1]] for s in segs])

    # Compile results
    res = pd.DataFrame()
    res["length"] = seg_lengths
    res["tortuosity"] = tort
    res["root_dist"] = root_dists
    res["strahler_index"] = SI

    if "radius" in nodes:
        # Generate radius dict
        radii = nodes.radius.to_dict()

        seg_radii = [[radii.get(n, 0) for n in s] for s in segs]
        res["radius_mean"] = [np.nanmean(s) for s in seg_radii]
        res["radius_min"] = [np.nanmin(s) for s in seg_radii]
        res["radius_max"] = [np.nanmax(s) for s in seg_radii]

        # Get radii for each cylinder
        r1 = nodes.index.map(radii).values
        r2 = nodes.parent_id.map(radii).values
        r2[np.isnan(r2)] = 0

        # Get the height for each node -> parent cylinder
        h = parent_dist(x, root_dist=0)

        # Radii for top and bottom of tapered cylinder
        vols = 1 / 3 * np.pi * (r1**2 + r1 * r2 + r2**2) * h
        vols_dict = dict(zip(nodes.index.values, vols))

        # For each segment get the volume
        res["volume"] = [np.nansum([vols_dict.get(n, 0) for n in s[:-1]]) for s in segs]

    return res

Get length of a linear segment.

This function is superfast but has no checks - you must provide a valid segment.

PARAMETER DESCRIPTION
x
    Neuron to which this segment belongs.

TYPE: TreeNeuron

segment
    Linear segment as list of node IDs ordered child->parent.

TYPE: list of ints

RETURNS DESCRIPTION
length

TYPE: float

See Also

navis.dist_between If you only know start and end points of the segment.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> l = navis.segment_length(n, n.segments[0])
>>> round(l)
56356
Source code in navis/graph/graph_utils.py
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
@utils.lock_neuron
def segment_length(x: "core.TreeNeuron", segment: List[int]) -> float:
    """Get length of a linear segment.

    This function is superfast but has no checks - you must provide a
    valid segment.

    Parameters
    ----------
    x :         TreeNeuron
                Neuron to which this segment belongs.
    segment :   list of ints
                Linear segment as list of node IDs ordered child->parent.

    Returns
    -------
    length :    float

    See Also
    --------
    [`navis.dist_between`][]
        If you only know start and end points of the segment.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> l = navis.segment_length(n, n.segments[0])
    >>> round(l)
    56356

    """
    if not isinstance(x, core.TreeNeuron):
        raise ValueError(f'Unable to process data of type "{type(x)}"')

    # Get graph once to avoid overhead from validation - do NOT change this
    graph = x.graph
    dist = np.array(
        [graph.edges[(c, p)]["weight"] for c, p in zip(segment[:-1], segment[1:])]
    )
    return sum(dist)

Calculate segregation index (SI).

The segregation index as established by Schneider-Mizell et al. (eLife, 2016) is a measure for how polarized a neuron is. SI of 1 indicates total segregation of inputs and outputs into dendrites and axon, respectively. SI of 0 indicates homogeneous distribution.

PARAMETER DESCRIPTION
x
            Neuron to calculate segregation index (SI) for. If a
            NeuronList, will assume that it contains
            fragments (e.g. from axon/ dendrite splits) of a
            single neuron. If list, must be records containing
            number of pre- and postsynapses for each fragment::

                [{'presynapses': 10, 'postsynapses': 320},
                 {'presynapses': 103, 'postsynapses': 21}]

TYPE: NeuronList | list

Notes

From Schneider-Mizell et al. (2016): "Note that even a modest amount of mixture (e.g. axo-axonic inputs) corresponds to values near H = 0.5–0.6 (Figure 7—figure supplement 1). We consider an unsegregated neuron (H ¡ 0.05) to be purely dendritic due to their anatomical similarity with the dendritic domains of those segregated neurons that have dendritic outputs."

RETURNS DESCRIPTION
H

Segregation Index (SI).

TYPE: float

Source code in navis/morpho/mmetrics.py
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
def segregation_index(x: Union["core.NeuronObject", dict]) -> float:
    """Calculate segregation index (SI).

    The segregation index as established by Schneider-Mizell et al. (eLife,
    2016) is a measure for how polarized a neuron is. SI of 1 indicates total
    segregation of inputs and outputs into dendrites and axon, respectively.
    SI of 0 indicates homogeneous distribution.

    Parameters
    ----------
    x :                 NeuronList | list
                        Neuron to calculate segregation index (SI) for. If a
                        NeuronList, will assume that it contains
                        fragments (e.g. from axon/ dendrite splits) of a
                        single neuron. If list, must be records containing
                        number of pre- and postsynapses for each fragment::

                            [{'presynapses': 10, 'postsynapses': 320},
                             {'presynapses': 103, 'postsynapses': 21}]

    Notes
    -----
    From Schneider-Mizell et al. (2016): "Note that even a modest amount of
    mixture (e.g. axo-axonic inputs) corresponds to values near H = 0.5–0.6
    (Figure 7—figure supplement 1). We consider an unsegregated neuron
    (H ¡ 0.05) to be purely dendritic due to their anatomical similarity with
    the dendritic domains of those segregated neurons that have dendritic
    outputs."

    Returns
    -------
    H :                 float
                        Segregation Index (SI).

    """
    if not isinstance(x, (core.NeuronList, list)):
        raise ValueError(f'Expected NeuronList or list got "{type(x)}"')

    if isinstance(x, core.NeuronList) and len(x) <= 1:
        raise ValueError(f"Expected multiple neurons, got {len(x)}")

    # Turn NeuronList into records
    if isinstance(x, core.NeuronList):
        x = [
            {"presynapses": n.n_presynapses, "postsynapses": n.n_postsynapses}
            for n in x
        ]

    # Extract the total number of pre- and postsynapses
    total_pre = sum([n["presynapses"] for n in x])
    total_post = sum([n["postsynapses"] for n in x])
    total_syn = total_pre + total_post

    # Calculate entropy for each fragment
    entropy = []
    for n in x:
        n["total_syn"] = n["postsynapses"] + n["presynapses"]

        # This is to avoid warnings
        if n["total_syn"]:
            p = n["postsynapses"] / n["total_syn"]
        else:
            p = float("inf")

        if 0 < p < 1:
            S = -(p * math.log(p) + (1 - p) * math.log(1 - p))
        else:
            S = 0

        entropy.append(S)

    # Calc entropy between fragments
    S = 1 / total_syn * sum([e * n["total_syn"] for n, e in zip(x, entropy)])

    # Normalize to entropy in whole neuron
    p_norm = total_post / total_syn
    if 0 < p_norm < 1:
        S_norm = -(p_norm * math.log(p_norm) + (1 - p_norm) * math.log(1 - p_norm))
        H = 1 - S / S_norm
    else:
        S_norm = 0
        H = 0

    return H

Set/update default connector colors.

PARAMETER DESCRIPTION
x
    New default connector colors. Can be:

       {'cn_label': (r, g, b), ..}
       {'cn_label': {'color': (r, g, b)}, ..}

TYPE: dict

Source code in navis/utils/misc.py
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
def set_default_connector_colors(x: Union[List[tuple], Dict[str, tuple]]
                                 ) -> None:
    """Set/update default connector colors.

    Parameters
    ----------
    x :         dict
                New default connector colors. Can be:

                   {'cn_label': (r, g, b), ..}
                   {'cn_label': {'color': (r, g, b)}, ..}

    """
    if not isinstance(x, dict):
        raise TypeError(f'Expect dict, got "{type(x)}"')

    for k, v in x.items():
        if isinstance(v, dict):
            config.default_connector_colors[k].update(v)
        else:
            config.default_connector_colors[k]['color'] = v

    return

Set levels for all associated module loggers.

Examples:

>>> from navis.utils import set_loggers
>>> from navis import config
>>> # Get current level
>>> lvl = config.logger.level
>>> # Set new level
>>> set_loggers('INFO')
>>> # Revert to old level
>>> set_loggers(lvl)
Source code in navis/utils/misc.py
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
def set_loggers(level: str = 'INFO'):
    """Set levels for all associated module loggers.

    Examples
    --------
    >>> from navis.utils import set_loggers
    >>> from navis import config
    >>> # Get current level
    >>> lvl = config.logger.level
    >>> # Set new level
    >>> set_loggers('INFO')
    >>> # Revert to old level
    >>> set_loggers(lvl)

    """
    config.logger.setLevel(level)

Set global progress bar behaviors.

PARAMETER DESCRIPTION
hide
    Set to True to hide all progress bars.

TYPE: bool DEFAULT: None

leave
    Set to False to clear progress bars after they have finished.

TYPE: bool DEFAULT: None

jupyter
    Set to False to force using of classic tqdm even if in
    Jupyter environment.

TYPE: bool DEFAULT: None

RETURNS DESCRIPTION
Nothing

Examples:

>>> from navis.utils import set_pbars
>>> # Hide progress bars after finishing
>>> set_pbars(leave=False)
>>> # Never show progress bars
>>> set_pbars(hide=True)
>>> # Never use Jupyter widget progress bars
>>> set_pbars(jupyter=False)
Source code in navis/utils/misc.py
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
def set_pbars(hide: Optional[bool] = None,
              leave: Optional[bool] = None,
              jupyter: Optional[bool] = None) -> None:
    """Set global progress bar behaviors.

    Parameters
    ----------
    hide :      bool, optional
                Set to True to hide all progress bars.
    leave :     bool, optional
                Set to False to clear progress bars after they have finished.
    jupyter :   bool, optional
                Set to False to force using of classic tqdm even if in
                Jupyter environment.

    Returns
    -------
    Nothing

    Examples
    --------
    >>> from navis.utils import set_pbars
    >>> # Hide progress bars after finishing
    >>> set_pbars(leave=False)
    >>> # Never show progress bars
    >>> set_pbars(hide=True)
    >>> # Never use Jupyter widget progress bars
    >>> set_pbars(jupyter=False)

    """
    if isinstance(hide, bool):
        config.pbar_hide = hide

    if isinstance(leave, bool):
        config.pbar_leave = leave

    if isinstance(jupyter, bool):
        if jupyter:
            if not is_jupyter():
                logger.error('No Jupyter environment detected.')
            else:
                config.tqdm = config.tqdm_notebook
                config.trange = config.trange_notebook
        else:
            config.tqdm = config.tqdm_classic
            config.trange = config.trange_classic

    return

Run Sholl analysis for given neuron(s).

PARAMETER DESCRIPTION
x
    Neuron to analyze. If MeshNeuron, will generate and
    use a skeleton representation.

TYPE: TreeNeuron | MeshNeuron | NeuronList

radii
    If integer, will produce N evenly space radii covering the
    distance between the center and the most distal node.
    Alternatively, you can also provide a list of radii to check.
    If `x` is multiple neurons, must provide a list of `radii`!

TYPE: int | list-like DEFAULT: 10

center
    The center to use for Sholl analysis:
        - "centermass" (default) uses the mean across nodes positions
        - "root" uses the current root of the skeleton
        - "soma" uses the neuron's soma (will raise error if no soma)
        - int is interpreted as a node ID
        - (3, ) list-like is interpreted as x/y/z coordinate

TYPE: "centermass" | "root" | "soma" | int | list-like DEFAULT: 'centermass'

geodesic
    If True, will use geodesic (along-the-arbor) instead of
    Euclidean distances. This does not work if center is an x/y/z
    coordinate.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
results

Results contain, for each spherical bin, the number of intersections, cable length and number of branch points.

TYPE: pd.DataFrame

References

See the Wikipedia article for a brief explanation.

Examples:

>>> import navis
>>> n = navis.example_neurons(1, kind='skeleton')
>>> # Sholl analysis
>>> sha = navis.sholl_analysis(n, radii=100, center='root')
>>> # Plot distributions
>>> ax = sha.plot()
>>> # Sholl analysis but using geodesic distance
>>> sha = navis.sholl_analysis(n, radii=100, center='root', geodesic=True)
Source code in navis/morpho/mmetrics.py
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
@utils.map_neuronlist(desc="Sholl analysis", allow_parallel=True)
def sholl_analysis(
    x: "core.NeuronObject",
    radii: Union[int, list] = 10,
    center: Union[Literal["root"], Literal["soma"], list, int] = "centermass",
    geodesic=False,
) -> Union[float, Sequence[float], pd.DataFrame]:
    """Run Sholl analysis for given neuron(s).

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | NeuronList
                Neuron to analyze. If MeshNeuron, will generate and
                use a skeleton representation.
    radii :     int | list-like
                If integer, will produce N evenly space radii covering the
                distance between the center and the most distal node.
                Alternatively, you can also provide a list of radii to check.
                If `x` is multiple neurons, must provide a list of `radii`!
    center :    "centermass" | "root" | "soma" | int | list-like
                The center to use for Sholl analysis:
                    - "centermass" (default) uses the mean across nodes positions
                    - "root" uses the current root of the skeleton
                    - "soma" uses the neuron's soma (will raise error if no soma)
                    - int is interpreted as a node ID
                    - (3, ) list-like is interpreted as x/y/z coordinate
    geodesic :  bool
                If True, will use geodesic (along-the-arbor) instead of
                Euclidean distances. This does not work if center is an x/y/z
                coordinate.

    Returns
    -------
    results :   pd.DataFrame
                Results contain, for each spherical bin, the number of
                intersections, cable length and number of branch points.

    References
    ----------
    See the [Wikipedia article](https://en.wikipedia.org/wiki/Sholl_analysis)
    for a brief explanation.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1, kind='skeleton')
    >>> # Sholl analysis
    >>> sha = navis.sholl_analysis(n, radii=100, center='root')
    >>> # Plot distributions
    >>> ax = sha.plot()                                         # doctest: +SKIP
    >>> # Sholl analysis but using geodesic distance
    >>> sha = navis.sholl_analysis(n, radii=100, center='root', geodesic=True)

    """
    # Use MeshNeuron's skeleton
    if isinstance(x, core.MeshNeuron):
        x = x.skeleton

    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Expected TreeNeuron or MeshNeuron(s), got {type(x)}")

    if geodesic and len(x.root) > 1:
        raise ValueError(
            "Unable to use `geodesic=True` with fragmented "
            "neurons. Use `navis.heal_fragmented_neuron` first."
        )

    if center == "soma" and not x.has_soma:
        raise ValueError(f"Neuron {x.id} has no soma.")
    elif utils.is_iterable(center):
        center = np.asarray(center)
        if center.ndim != 1 or len(center) != 3:
            raise ValueError(
                "`center` must be (3, ) list-like when providing "
                f"a coordinate. Got {center.shape}"
            )
        if geodesic:
            raise ValueError(
                "Must not provide a `center` as coordinate when " "geodesic=True"
            )
    elif center == "root" and len(x.root) > 1:
        raise ValueError(
            f"Neuron {x.id} has multiple roots. Please specify "
            "which node/coordinate to use as center."
        )

    if center == "centermass":
        center = x.nodes[["x", "y", "z"]].mean(axis=0).values

    # Calculate distances for each node
    nodes = x.nodes.set_index("node_id").copy()
    if not geodesic:
        if isinstance(center, int):
            if center not in nodes.index.values:
                raise ValueError(f"{center} is not a valid node ID.")

            center = nodes.loc[center, ["x", "y", "z"]].values
        elif center == "soma":
            center = nodes.loc[utils.make_iterable(x.soma)[0], ["x", "y", "z"]].values
        elif center == "root":
            center = nodes.loc[utils.make_iterable(x.root)[0], ["x", "y", "z"]].values
        center = center.astype(float)

        nodes["dist"] = np.sqrt(
            ((x.nodes[["x", "y", "z"]].values - center) ** 2).sum(axis=1)
        )
    else:
        if center == "soma":
            center = x.soma[0]
        elif center == "root":
            center = x.root[0]

        nodes["dist"] = graph.geodesic_matrix(x, from_=center)[
            x.nodes.node_id.values
        ].values[0]

    not_root = nodes.parent_id >= 0
    dists = nodes.loc[not_root, "dist"].values
    pdists = nodes.loc[nodes[not_root].parent_id.values, "dist"].values
    le = parent_dist(x)[not_root]
    ty = nodes.loc[not_root, "type"].values

    # Generate radii for the Sholl spheres
    if isinstance(radii, int):
        radii = np.linspace(0, dists.max(), radii + 1)
    else:
        if radii[0] != 0:
            radii = np.insert(radii, 0, 0)

    data = []
    for i in range(1, len(radii)):
        # Find the number of crossings
        crossings = ((dists <= radii[i]) & (pdists > radii[i])).sum()

        # Get the (approximate) cable length in this sphere
        this_sphere = (dists > radii[i - 1]) & (dists < radii[i])
        cable = le[this_sphere].sum()

        # The number of branch points in this sphere
        n_branchpoints = (ty[this_sphere] == "branch").sum()

        data.append([radii[i], crossings, cable, n_branchpoints])

    return pd.DataFrame(
        data, columns=["radius", "intersections", "cable_length", "branch_points"]
    ).set_index("radius")

Simplify meshes (TriMesh, MeshNeuron, Volume).

PARAMETER DESCRIPTION
x
    Mesh(es) to simplify.

TYPE: navis.MeshNeuron/List | navis.Volume | trimesh.Trimesh

F
    Determines how much the mesh is simplified:
    Floats (0-1) are interpreted as ratio. For example, an F of
    0.5 will reduce the number of faces to 50%.
    Integers (>1) are intepreted as target face count. For example,
    an F of 5000 will attempt to reduce the number of faces to 5000.

TYPE: float | int

backend
    Which backend to use. Currenly we support `pyfqmr`, `open3d`,
    Blender 3D and `pymeshlab`.

TYPE: "auto" | "pyfqmr" | "open3d" | "blender" | "pymeshlab" DEFAULT: 'auto'

inplace
    If True, will perform simplication on `x`. If False, will
    simplify and return a copy.

TYPE: bool DEFAULT: False

**kwargs
    Keyword arguments are passed through to the respective backend's
    functions (see below).

DEFAULT: {}

RETURNS DESCRIPTION
simplified

Simplified object.

See Also

navis.downsample_neuron Downsample all kinds of neurons. navis.meshes.simplify_mesh_fqmr pyfqmr implementation for mesh simplification. navis.meshes.simplify_mesh_open3d Open3D implementation for mesh simplification. navis.meshes.simplify_mesh_pyml PyMeshLab implementation for mesh simplification. navis.meshes.simplify_mesh_blender Blender 3D implementation for mesh simplification.

Source code in navis/meshes/operations.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
@utils.map_neuronlist(desc='Simplifying', allow_parallel=True)
def simplify_mesh(x, F, backend='auto', inplace=False, **kwargs):
    """Simplify meshes (TriMesh, MeshNeuron, Volume).

    Parameters
    ----------
    x :         navis.MeshNeuron/List | navis.Volume | trimesh.Trimesh
                Mesh(es) to simplify.
    F :         float | int
                Determines how much the mesh is simplified:
                Floats (0-1) are interpreted as ratio. For example, an F of
                0.5 will reduce the number of faces to 50%.
                Integers (>1) are intepreted as target face count. For example,
                an F of 5000 will attempt to reduce the number of faces to 5000.
    backend :   "auto" | "pyfqmr" | "open3d" | "blender" | "pymeshlab"
                Which backend to use. Currenly we support `pyfqmr`, `open3d`,
                Blender 3D and `pymeshlab`.
    inplace :   bool
                If True, will perform simplication on `x`. If False, will
                simplify and return a copy.
    **kwargs
                Keyword arguments are passed through to the respective backend's
                functions (see below).

    Returns
    -------
    simplified
                Simplified object.

    See Also
    --------
    [`navis.downsample_neuron`][]
                Downsample all kinds of neurons.
    [`navis.meshes.simplify_mesh_fqmr`][]
                pyfqmr implementation for mesh simplification.
    [`navis.meshes.simplify_mesh_open3d`][]
                Open3D implementation for mesh simplification.
    [`navis.meshes.simplify_mesh_pyml`][]
                PyMeshLab implementation for mesh simplification.
    [`navis.meshes.simplify_mesh_blender`][]
                Blender 3D implementation for mesh simplification.

    """
    if not isinstance(backend, str):
        raise TypeError(f'`backend` must be string, got "{type(backend)}"')

    backend = backend.lower()
    backends = available_backends(only_first=backend == 'auto')

    if not backends:
        raise BaseException("None of the supported backends appear to be "
                            "available. Please install either `pyfqmr`, `open3d` "
                            "or `pymeshlab` via `pip`, or install Blender 3D.")
    elif backend == 'auto':
        backend = backends[0]
    elif backend not in backends:
        raise ValueError(f'Backend "{backend}" appears to not be available. '
                         'Please choose one of the available backends: '
                         f'{", ".join(backends)}')

    if not inplace:
        x = x.copy()

    if backend == 'pyfqmr':
        # This expects a target face count
        if F < 1:
            F = F * len(x.faces)
        _ = simplify_mesh_fqmr(x, F=F, inplace=True, **kwargs)
    elif backend == 'open3d':
        # This expects a target face count
        if F < 1:
            F = F * len(x.faces)
        _ = simplify_mesh_open3d(x, F=F, inplace=True, **kwargs)
    elif backend == 'blender':
        # This expects a ratio
        if F > 1:
            F = F / len(x.faces)
        _ = simplify_mesh_blender(x, F=F, inplace=True)
    elif backend == 'pymeshlab':
        # This expects a ratio
        if F > 1:
            F = F / len(x.faces)
        _ = simplify_mesh_pyml(x, F=F, inplace=True, **kwargs)

    return x

Turn neuron into skeleton.

Currently, we can only skeletonize meshes, dotprops and point clouds but are looking into ways to also do it for VoxelNeurons.

For meshes, this function is a thin-wrapper for skeletor. It uses sensible defaults for neurons but if you want to fine-tune your skeletons you should look into using skeletor directly.

PARAMETER DESCRIPTION
x
    Mesh(es) to skeletonize. Note that the quality of the results
    very much depends on the mesh, so it might be worth doing some
    pre-processing (see below).

TYPE: MeshNeuron | trimesh.Trimesh | Dotprops

**kwargs
    Keyword arguments are passed through to the respective
    converters:
        - meshes: [`navis.conversion.mesh2skeleton`][]
        - dotprops and point clouds: [`navis.conversion.points2skeleton`][]

DEFAULT: {}

RETURNS DESCRIPTION
skeleton

For meshes, this has a .vertex_map attribute that maps each vertex in the input mesh to a skeleton node ID.

TYPE: navis.TreeNeuron

See Also

navis.drop_fluff Use this if your mesh has lots of tiny free floating bits to reduce noise and speed up skeletonization.

Examples:

>>> import navis
>>> # Get a mesh neuron
>>> n = navis.example_neurons(1, kind='mesh')
>>> # Convert to skeleton
>>> sk = navis.skeletonize(n)
>>> # Mesh vertex indices to node IDs map
>>> sk.vertex_map
array([938, 990, 990, ...,  39, 234, 234])
>>> import navis
>>> # Get a skeleton and turn into dotprops
>>> dp = navis.make_dotprops(navis.example_neurons(1))
>>> # Turn back into a skeleton
>>> sk = navis.skeletonize(dp)
Source code in navis/conversion/wrappers.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
@utils.map_neuronlist(desc='Skeletonizing', allow_parallel=True)
def skeletonize(x: Union['core.MeshNeuron', 'core.Dotprops', np.ndarray],
                **kwargs):
    """Turn neuron into skeleton.

    Currently, we can only skeletonize meshes, dotprops and point clouds but
    are looking into ways to also do it for `VoxelNeurons`.

    For meshes, this function is a thin-wrapper for `skeletor`. It uses sensible
    defaults for neurons but if you want to fine-tune your skeletons you should
    look into using `skeletor` directly.

    Parameters
    ----------
    x :         MeshNeuron | trimesh.Trimesh | Dotprops
                Mesh(es) to skeletonize. Note that the quality of the results
                very much depends on the mesh, so it might be worth doing some
                pre-processing (see below).
    **kwargs
                Keyword arguments are passed through to the respective
                converters:
                    - meshes: [`navis.conversion.mesh2skeleton`][]
                    - dotprops and point clouds: [`navis.conversion.points2skeleton`][]

    Returns
    -------
    skeleton :  navis.TreeNeuron
                For meshes, this has a `.vertex_map` attribute that maps each
                vertex in the input mesh to a skeleton node ID.

    See Also
    --------
    [`navis.drop_fluff`][]
                Use this if your mesh has lots of tiny free floating bits to
                reduce noise and speed up skeletonization.

    Examples
    --------
    # Skeletonize a mesh
    >>> import navis
    >>> # Get a mesh neuron
    >>> n = navis.example_neurons(1, kind='mesh')
    >>> # Convert to skeleton
    >>> sk = navis.skeletonize(n)
    >>> # Mesh vertex indices to node IDs map
    >>> sk.vertex_map                                           # doctest: +SKIP
    array([938, 990, 990, ...,  39, 234, 234])

    # Skeletonize dotprops (i.e. point-clouds)
    >>> import navis
    >>> # Get a skeleton and turn into dotprops
    >>> dp = navis.make_dotprops(navis.example_neurons(1))
    >>> # Turn back into a skeleton
    >>> sk = navis.skeletonize(dp)

    """
    if isinstance(x, (core.MeshNeuron, tm.Trimesh)):
        return mesh2skeleton(x, **kwargs)
    elif isinstance(x, (core.Dotprops, )):
        sk = points2skeleton(x.points, **kwargs)
        for attr in ('id', 'units', 'name'):
            if hasattr(x, attr):
                setattr(sk, attr, getattr(x, attr))
        return sk
    elif isinstance(x, np.ndarray):
        return points2skeleton(x.points, **kwargs)

    raise TypeError(f'Unable to skeletonize data of type {type(x)}')

Smooth meshes (TriMesh, MeshNeuron, Volume).

Uses Laplacian smoothing. Not necessarily because that is always the best approach but because there are three backends (see below) that offer similar interfaces.

PARAMETER DESCRIPTION
x
        Mesh(es) to simplify.

TYPE: navis.MeshNeuron/List | navis.Volume | trimesh.Trimesh

iterations
        Round of smoothing to apply.

TYPE: int DEFAULT: 5

L
        Diffusion speed constant lambda. Larger = more aggressive
        smoothing.

TYPE: float [0-1] DEFAULT: 0.5

backend
        Which backend to use. Currenly we support `open3d`,
        Blender 3D or `trimesh`.

TYPE: "auto" | "open3d" | "blender" | "trimesh" DEFAULT: 'auto'

inplace
        If True, will perform simplication on `x`. If False, will
        simplify and return a copy.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
smoothed

Smoothed object.

Source code in navis/meshes/operations.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
@utils.map_neuronlist(desc='Smoothing', allow_parallel=True)
def smooth_mesh(x, iterations=5, L=.5, backend='auto', inplace=False):
    """Smooth meshes (TriMesh, MeshNeuron, Volume).

    Uses Laplacian smoothing. Not necessarily because that is always the best
    approach but because there are three backends (see below) that offer similar
    interfaces.

    Parameters
    ----------
    x :             navis.MeshNeuron/List | navis.Volume | trimesh.Trimesh
                    Mesh(es) to simplify.
    iterations :    int
                    Round of smoothing to apply.
    L :             float [0-1]
                    Diffusion speed constant lambda. Larger = more aggressive
                    smoothing.
    backend :       "auto" | "open3d" | "blender" | "trimesh"
                    Which backend to use. Currenly we support `open3d`,
                    Blender 3D or `trimesh`.
    inplace :       bool
                    If True, will perform simplication on `x`. If False, will
                    simplify and return a copy.

    Returns
    -------
    smoothed
                    Smoothed object.

    """
    if not isinstance(backend, str):
        raise TypeError(f'`backend` must be string, got "{type(backend)}"')

    backend = backend.lower()
    backends = available_backends() + ['trimesh']

    # Drop pymeshlab from backend
    if 'pymeshlab' in backends:
        backends.remove('pymeshlab')

    if backend == 'auto':
        backend = backends[0]
    elif backend not in backends:
        raise ValueError(f'Backend "{backend}" appears to not be available. '
                         'Please choose one of the available backends: '
                         f'{", ".join(backends)}')

    if not inplace:
        x = x.copy()

    if backend == 'open3d':
        _ = smooth_mesh_open3d(x, iterations=iterations, L=L, inplace=True)
    elif backend == 'blender':
        _ = smooth_mesh_blender(x, iterations=iterations, L=L, inplace=True)
    elif backend == 'trimesh':
        _ = smooth_mesh_trimesh(x, iterations=iterations, L=L, inplace=True)

    return x

Smooth skeleton(s) using rolling windows.

PARAMETER DESCRIPTION
x
        Neuron(s) to be processed.

TYPE: TreeNeuron | NeuronList

window
        Size (N observations) of the rolling window in number of
        nodes.

TYPE: int DEFAULT: 5

to_smooth
        Columns of the node table to smooth. Should work with any
        numeric column (e.g. 'radius').

TYPE: list DEFAULT: ['x', 'y', 'z']

inplace
        If False, will use and return copy of original neuron(s).

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron / List

Smoothed neuron(s).

Examples:

Smooth x/y/z locations (default):

>>> import navis
>>> nl = navis.example_neurons(2)
>>> smoothed = navis.smooth_skeleton(nl, window=5)

Smooth only radii:

>>> rad_smoothed = navis.smooth_skeleton(nl, to_smooth='radius')
See Also

navis.smooth_mesh For smoothing MeshNeurons and other mesh-likes. navis.smooth_voxels For smoothing VoxelNeurons.

Source code in navis/morpho/manipulation.py
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
@utils.map_neuronlist(desc="Smoothing", allow_parallel=True)
def smooth_skeleton(
    x: NeuronObject,
    window: int = 5,
    to_smooth: list = ["x", "y", "z"],
    inplace: bool = False,
) -> NeuronObject:
    """Smooth skeleton(s) using rolling windows.

    Parameters
    ----------
    x :             TreeNeuron | NeuronList
                    Neuron(s) to be processed.
    window :        int, optional
                    Size (N observations) of the rolling window in number of
                    nodes.
    to_smooth :     list
                    Columns of the node table to smooth. Should work with any
                    numeric column (e.g. 'radius').
    inplace :       bool, optional
                    If False, will use and return copy of original neuron(s).

    Returns
    -------
    TreeNeuron/List
                    Smoothed neuron(s).

    Examples
    --------
    Smooth x/y/z locations (default):

    >>> import navis
    >>> nl = navis.example_neurons(2)
    >>> smoothed = navis.smooth_skeleton(nl, window=5)

    Smooth only radii:

    >>> rad_smoothed = navis.smooth_skeleton(nl, to_smooth='radius')

    See Also
    --------
    [`navis.smooth_mesh`][]
                    For smoothing MeshNeurons and other mesh-likes.
    [`navis.smooth_voxels`][]
                    For smoothing VoxelNeurons.

    """
    # The decorator makes sure that at this point we have single neurons
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Can only process TreeNeurons, not {type(x)}")

    if not inplace:
        x = x.copy()

    # Prepare nodes (add parent_dist for later, set index)
    # mmetrics.parent_dist(x, root_dist=0)
    nodes = x.nodes.set_index("node_id", inplace=False).copy()

    to_smooth = utils.make_iterable(to_smooth)

    miss = to_smooth[~np.isin(to_smooth, nodes.columns)]
    if len(miss):
        raise ValueError(f"Column(s) not found in node table: {miss}")

    # Go over each segment and smooth
    for s in config.tqdm(
        x.segments[::-1],
        desc="Smoothing",
        disable=config.pbar_hide,
        leave=config.pbar_leave,
    ):
        # Get this segment's parent distances and get cumsum
        this_co = nodes.loc[s, to_smooth]

        interp = this_co.rolling(window, min_periods=1).mean()

        for i, c in enumerate(to_smooth):
            nodes.loc[s, c] = interp.iloc[:, i].values.astype(
                nodes[c].dtype, copy=False
            )

    # Reassign nodes
    x.nodes = nodes.reset_index(drop=False, inplace=False)

    x._clear_temp_attr()

    return x

Smooth voxel(s) using a Gaussian filter.

PARAMETER DESCRIPTION
x
        Neuron(s) to be processed.

TYPE: TreeNeuron | NeuronList

sigma
        Standard deviation for Gaussian kernel. The standard
        deviations of the Gaussian filter are given for each axis
        as a sequence, or as a single number, in which case it is
        equal for all axes.

TYPE: int | (3, ) ints DEFAULT: 1

inplace
        If False, will use and return copy of original neuron(s).

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
VoxelNeuron / List

Smoothed neuron(s).

Examples:

>>> import navis
>>> n = navis.example_neurons(1, kind='mesh')
>>> vx = navis.voxelize(n, pitch='1 micron')
>>> smoothed = navis.smooth_voxels(vx, sigma=2)
See Also

navis.smooth_mesh For smoothing MeshNeurons and other mesh-likes. navis.smooth_skeleton For smoothing TreeNeurons.

Source code in navis/morpho/images.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@utils.map_neuronlist(desc="Smoothing", allow_parallel=True)
def smooth_voxels(
    x: NeuronObject, sigma: int = 1, inplace: bool = False
) -> NeuronObject:
    """Smooth voxel(s) using a Gaussian filter.

    Parameters
    ----------
    x :             TreeNeuron | NeuronList
                    Neuron(s) to be processed.
    sigma :         int | (3, ) ints, optional
                    Standard deviation for Gaussian kernel. The standard
                    deviations of the Gaussian filter are given for each axis
                    as a sequence, or as a single number, in which case it is
                    equal for all axes.
    inplace :       bool, optional
                    If False, will use and return copy of original neuron(s).

    Returns
    -------
    VoxelNeuron/List
                    Smoothed neuron(s).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1, kind='mesh')
    >>> vx = navis.voxelize(n, pitch='1 micron')
    >>> smoothed = navis.smooth_voxels(vx, sigma=2)

    See Also
    --------
    [`navis.smooth_mesh`][]
                    For smoothing MeshNeurons and other mesh-likes.
    [`navis.smooth_skeleton`][]
                    For smoothing TreeNeurons.

    """
    # The decorator makes sure that at this point we have single neurons
    if not isinstance(x, core.VoxelNeuron):
        raise TypeError(f"Can only process VoxelNeurons, not {type(x)}")

    if not inplace:
        x = x.copy()

    # Apply gaussian
    x._data = gaussian_filter(x.grid.astype(np.float32), sigma=sigma)
    x._clear_temp_attr()

    return x

Split a neuron into axon and dendrite.

The result is highly dependent on the method and on your neuron's morphology and works best for "typical" neurons.

PARAMETER DESCRIPTION
x
            Neuron(s) to split into axon, dendrite (and cell body
            fiber if possible).

TYPE: TreeNeuron | MeshNeuron | NeuronList

metric
            Defines which flow metric we will try to maximize when
            splitting the neuron(s). There are four flavors:

             - 'synapse_flow_centrality' via [`navis.synapse_flow_centrality`][]
               (note that this metric was previously called just "flow_centrality")
             - 'bending_flow' via [`navis.bending_flow`][]
             - 'segregation_index' via [`navis.arbor_segregation_index`][]
             - 'flow_centrality' via [`navis.flow_centrality`][]

            Will try using existing columns in the node table. If
            not present, will invoke the respective functions with
            default parameters. All but `flow_centrality` require
            the neuron to have connectors.

TYPE: 'synapse_flow_centrality' | 'bending_flow' | 'segregation_index' | "flow_centrality" DEFAULT: 'synapse_flow_centrality'

flow_thresh
            The "linker" between axon and dendrites will be the part
            of the neuron with the highest flow (see metric). We
            define it by `max(flow) * flow_thresh`. You might have
            to decrease this value for atypical or not well
            segregated neurons.

TYPE: float [0-1] DEFAULT: 0.9

split
            Method for determining which compartment is axon and
            which is the dendrites:

                - 'prepost' uses number of in- vs. outputs. By default,
                  a ratio of >1 (more out- than inputs) is considered
                  axon and vice versa. You can provide a custom threshold
                  by setting `split='prepost:0.5'` for example. Values
                  above 1.0 will bias towards dendrites and below 1.0
                  towards axon.
                - 'distance' assumes the compartment proximal to the
                  soma is the dendrites.

TYPE: 'prepost' | 'distance' DEFAULT: 'prepost'

cellbodyfiber
            Determines whether we will try to find a cell body
            fiber (CBF).

                - "soma" will try finding the CBF only if the neuron
                  has a soma
                - "root" will consider the root to be the source
                  of the CBF as fallback if there is no soma
                - `False` will not attempt to extract the CBF

            A CBF is something typically found in insect neurons
            which are not bipolar unlike most vertebrate neurons but
            rather have a passive soma some distance away from
            axon/dendrites.

TYPE: "soma" | "root" | False DEFAULT: False

reroot_soma
            If True and neuron has a soma, will make sure the neuron
            is rooted to its soma.

TYPE: bool, DEFAULT: True

label_only
            If True, will not split the neuron but rather add a
            "compartment" column to the node and connector table of
            the input neuron.

TYPE: bool, DEFAULT: False

RETURNS DESCRIPTION
NeuronList

Axon, dendrite, linker and CBF (the latter two aren't guaranteed). Fragments will have a new property compartment (see example).

Examples:

>>> import navis
>>> x = navis.example_neurons(1)
>>> split = navis.split_axon_dendrite(x, metric='synapse_flow_centrality',
...                                   reroot_soma=True)
>>> split
<class 'navis.NeuronList'> of 3 neurons
                      neuron_name  id  n_nodes  n_connectors  compartment
0                  neuron 123457   16      148             0         axon
1                  neuron 123457   16     9682          1766       linker
2                  neuron 123457   16     2892           113     dendrite
>>> # For convenience, split_axon_dendrite assigns colors to the resulting
>>> # fragments: axon = red, dendrites = blue, CBF = green
>>> _ = split.plot3d(color=split.color)

Alternatively just label the compartments

>>> x = navis.split_axon_dendrite(x, label_only=True)
>>> x.nodes[~x.nodes.compartment.isnull()].head()
         node_id label        x        y        z     radius  parent_id  type compartment
110      111     0  17024.0  33790.0  26602.0  72.462097        110  slab      linker
111      112     0  17104.0  33670.0  26682.0  72.462097        111  slab      linker
112      113     0  17184.0  33450.0  26782.0  70.000000        112  slab      linker
113      114     0  17244.0  33270.0  26822.0  70.000000        113  slab      linker
114      115     0  17324.0  33150.0  26882.0  74.852798        114  slab      linker
See Also

navis.heal_skeleton Axon/dendrite split works only on neurons consisting of a single tree. Use this function to heal fragmented neurons before trying the axon/dendrite split.

Source code in navis/morpho/manipulation.py
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
@utils.map_neuronlist(desc="Splitting", allow_parallel=True)
@utils.meshneuron_skeleton(
    method="split",
    include_connectors=True,
    copy_properties=["color", "compartment"],
    disallowed_kwargs={"label_only": True},
    heal=True,
)
def split_axon_dendrite(
    x: NeuronObject,
    metric: Union[
        Literal["synapse_flow_centrality"],
        Literal["flow_centrality"],
        Literal["bending_flow"],
        Literal["segregation_index"],
    ] = "synapse_flow_centrality",
    flow_thresh: float = 0.9,
    split: Union[Literal["prepost"], Literal["distance"]] = "prepost",
    cellbodyfiber: Union[Literal["soma"], Literal["root"], bool] = False,
    reroot_soma: bool = True,
    label_only: bool = False,
) -> "core.NeuronList":
    """Split a neuron into axon and dendrite.

    The result is highly dependent on the method and on your neuron's
    morphology and works best for "typical" neurons.

    Parameters
    ----------
    x :                 TreeNeuron | MeshNeuron | NeuronList
                        Neuron(s) to split into axon, dendrite (and cell body
                        fiber if possible).
    metric :            'synapse_flow_centrality' | 'bending_flow' | 'segregation_index' | "flow_centrality", optional
                        Defines which flow metric we will try to maximize when
                        splitting the neuron(s). There are four flavors:

                         - 'synapse_flow_centrality' via [`navis.synapse_flow_centrality`][]
                           (note that this metric was previously called just "flow_centrality")
                         - 'bending_flow' via [`navis.bending_flow`][]
                         - 'segregation_index' via [`navis.arbor_segregation_index`][]
                         - 'flow_centrality' via [`navis.flow_centrality`][]

                        Will try using existing columns in the node table. If
                        not present, will invoke the respective functions with
                        default parameters. All but `flow_centrality` require
                        the neuron to have connectors.
    flow_thresh :       float [0-1]
                        The "linker" between axon and dendrites will be the part
                        of the neuron with the highest flow (see metric). We
                        define it by `max(flow) * flow_thresh`. You might have
                        to decrease this value for atypical or not well
                        segregated neurons.
    split :             'prepost' | 'distance'
                        Method for determining which compartment is axon and
                        which is the dendrites:

                            - 'prepost' uses number of in- vs. outputs. By default,
                              a ratio of >1 (more out- than inputs) is considered
                              axon and vice versa. You can provide a custom threshold
                              by setting `split='prepost:0.5'` for example. Values
                              above 1.0 will bias towards dendrites and below 1.0
                              towards axon.
                            - 'distance' assumes the compartment proximal to the
                              soma is the dendrites.

    cellbodyfiber :     "soma" | "root" | False
                        Determines whether we will try to find a cell body
                        fiber (CBF).

                            - "soma" will try finding the CBF only if the neuron
                              has a soma
                            - "root" will consider the root to be the source
                              of the CBF as fallback if there is no soma
                            - `False` will not attempt to extract the CBF

                        A CBF is something typically found in insect neurons
                        which are not bipolar unlike most vertebrate neurons but
                        rather have a passive soma some distance away from
                        axon/dendrites.
    reroot_soma :       bool,
                        If True and neuron has a soma, will make sure the neuron
                        is rooted to its soma.
    label_only :        bool,
                        If True, will not split the neuron but rather add a
                        "compartment" column to the node and connector table of
                        the input neuron.

    Returns
    -------
    NeuronList
                        Axon, dendrite, linker and CBF (the latter two aren't
                        guaranteed). Fragments will have a new property
                        `compartment` (see example).

    Examples
    --------
    >>> import navis
    >>> x = navis.example_neurons(1)
    >>> split = navis.split_axon_dendrite(x, metric='synapse_flow_centrality',
    ...                                   reroot_soma=True)
    >>> split                                                   # doctest: +SKIP
    <class 'navis.NeuronList'> of 3 neurons
                          neuron_name  id  n_nodes  n_connectors  compartment
    0                  neuron 123457   16      148             0         axon
    1                  neuron 123457   16     9682          1766       linker
    2                  neuron 123457   16     2892           113     dendrite
    >>> # For convenience, split_axon_dendrite assigns colors to the resulting
    >>> # fragments: axon = red, dendrites = blue, CBF = green
    >>> _ = split.plot3d(color=split.color)

    Alternatively just label the compartments

    >>> x = navis.split_axon_dendrite(x, label_only=True)
    >>> x.nodes[~x.nodes.compartment.isnull()].head()           # doctest: +SKIP
             node_id label        x        y        z     radius  parent_id  type compartment
    110      111     0  17024.0  33790.0  26602.0  72.462097        110  slab      linker
    111      112     0  17104.0  33670.0  26682.0  72.462097        111  slab      linker
    112      113     0  17184.0  33450.0  26782.0  70.000000        112  slab      linker
    113      114     0  17244.0  33270.0  26822.0  70.000000        113  slab      linker
    114      115     0  17324.0  33150.0  26882.0  74.852798        114  slab      linker

    See Also
    --------
    [`navis.heal_skeleton`][]
            Axon/dendrite split works only on neurons consisting of a single
            tree. Use this function to heal fragmented neurons before trying
            the axon/dendrite split.

    """
    COLORS = {
        "axon": (178, 34, 34),
        "dendrite": (0, 0, 255),
        "cellbodyfiber": (50, 50, 50),
        "linker": (150, 150, 150),
    }

    # The decorator makes sure that at this point we have single neurons
    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Can only process TreeNeurons, got "{type(x)}"')

    if not x.has_connectors:
        if metric != "flow_centrality":
            raise ValueError("Neuron must have connectors.")
        elif split == "prepost":
            raise ValueError(
                'Set `split="distance"` when trying to split neurons '
                "without connectors."
            )

    split_val = 1
    if isinstance(split, str) and ":" in split:
        split, split_val = split.split(":")
        split_val = float(split_val)

    _METRIC = (
        "synapse_flow_centrality",
        "bending_flow",
        "segregation_index",
        "flow_centrality",
    )
    utils.eval_param(metric, "metric", allowed_values=_METRIC)
    utils.eval_param(split, "split", allowed_values=("prepost", "distance"))
    utils.eval_param(
        cellbodyfiber, "cellbodyfiber", allowed_values=("soma", "root", False)
    )

    if metric == "flow_centrality":
        msg = (
            "As of navis version 1.4.0, `method='flow_centrality'` "
            "uses synapse-independent, morphology-only flow to generate splits."
            "Please use `method='synapse_flow_centrality' for "
            "synapse-based axon-dendrite splits. "
            "This warning will be removed in a future version of navis."
        )
        warnings.warn(msg, DeprecationWarning)
        logger.warning(msg)

    if len(x.root) > 1:
        raise ValueError(
            f"Unable to split neuron {x.id}: multiple roots. "
            "Try `navis.heal_skeleton(x)` to merged "
            "disconnected fragments."
        )

    # Make copy, so that we don't screw things up
    original = x
    x = x.copy()

    if np.any(x.soma) and not np.all(np.isin(x.soma, x.root)) and reroot_soma:
        x.reroot(x.soma, inplace=True)

    FUNCS = {
        "bending_flow": mmetrics.bending_flow,
        "synapse_flow_centrality": mmetrics.synapse_flow_centrality,
        "flow_centrality": mmetrics.flow_centrality,
        "segregation_index": mmetrics.arbor_segregation_index,
    }

    if metric not in FUNCS:
        raise ValueError(f'Unknown `metric`: "{metric}"')

    # Add metric if not already present
    if metric not in x.nodes.columns:
        _ = FUNCS[metric](x)

    # We can lock this neuron indefinitely since we are not returning it
    x._lock = 1

    # Make sure we have a metric for every single node
    if np.any(np.isnan(x.nodes[metric].values)):
        raise ValueError(f'NaN values encountered in "{metric}"')

    # The first step is to remove the linker -> that's the bit that connects
    # the axon and dendrite
    is_linker = x.nodes[metric] >= x.nodes[metric].max() * flow_thresh
    linker = set(x.nodes.loc[is_linker, "node_id"].values)

    # We try to perform processing on the graph to avoid overhead from
    # (re-)generating neurons
    g = x.graph.to_undirected()

    # Drop linker nodes
    g.remove_nodes_from(linker)

    # Break into connected components
    cc = list(nx.connected_components(g))

    # Figure out which one is which
    axon = set()
    if split == "prepost":
        # Collect # of pre- and postsynapses on each of the connected components
        sm = pd.DataFrame()
        sm["n_nodes"] = [len(c) for c in cc]
        pre = x.presynapses
        post = x.postsynapses
        sm["n_pre"] = [pre[pre.node_id.isin(c)].shape[0] for c in cc]
        sm["n_post"] = [post[post.node_id.isin(c)].shape[0] for c in cc]
        sm["prepost_ratio"] = sm.n_pre / sm.n_post
        sm["frac_post"] = sm.n_post / sm.n_post.sum()
        sm["frac_pre"] = sm.n_pre / sm.n_pre.sum()

        # In theory, we can encounter neurons with either no pre- or no
        # postsynapses (e.g. sensory neurons).
        # For those n_pre/post.sum() would cause a division by 0 which in turn
        # causes frac_pre/post to be NaN. By filling, we make sure that the
        # split doesn't fail further down but they might end up missing either
        # an axon or a dendrite (which may actually be OK?).
        sm["frac_post"] = sm["frac_post"].fillna(0)
        sm["frac_pre"] = sm["frac_pre"].fillna(0)

        # Produce the ratio of pre- to postsynapses
        sm["frac_prepost"] = sm.frac_pre / sm.frac_post

        # Some small side branches might have either no pre- or no postsynapses.
        # Even if they have synapses: if the total count is low they might be
        # incorrectly assigned to a compartment. Here, we will make sure that
        # they are disregarded for now to avoid introducing noise. Instead we
        # will connect them onto their parent compartment later.
        sm.loc[
            sm[["frac_pre", "frac_post"]].max(axis=1) < 0.01,
            ["prepost_ratio", "frac_prepost"],
        ] = np.nan

        # Each fragment is considered separately as either giver or recipient
        # of flow:
        # - prepost < 1 = dendritic
        # - prepost > 1 = axonic
        dendrite = [cc[i] for i in sm[sm.frac_prepost < split_val].index.values]
        if len(dendrite):
            dendrite = set.union(*dendrite)
        axon = [cc[i] for i in sm[sm.frac_prepost >= split_val].index.values]
        if len(axon):
            axon = set.union(*axon)
    else:
        for c in cc:
            # If original root present assume it's the proximal dendrites
            if x.root[0] in c:
                dendrite = c
            else:
                axon = axon | c

    # Now that we have in principle figured out what's what we need to do some
    # clean-up
    # First: it is quite likely that the axon(s) and/or the dendrites fragmented
    # and we need to stitch them back together using linker but not dendrites!
    g = x.graph.subgraph(np.append(list(axon), list(linker)))
    axon = set(graph.connected_subgraph(g, axon)[0])

    # Remove nodes that were re-assigned to axon from linker
    linker = linker - axon

    g = x.graph.subgraph(np.append(list(dendrite), list(linker)))
    dendrite = set(graph.connected_subgraph(g, dendrite)[0])

    # Remove nodes that were re-assigned to axon from linker
    linker = linker - set(dendrite)

    # Next up: finding the CBF
    # The CBF is defined as the part of the neuron between the soma (or root)
    # and the first branch point with sizeable synapse flow
    cbf = set()
    if cellbodyfiber and (np.any(x.soma) or cellbodyfiber == "root"):
        # To excise the CBF, we subset the neuron to those parts with
        # no/hardly any flow and find the part that contains the soma
        no_flow = x.nodes[x.nodes[metric] <= x.nodes[metric].max() * 0.05]
        g = x.graph.subgraph(no_flow.node_id.values)

        # Find the connected component containing the soma
        for c in nx.connected_components(g.to_undirected()):
            if x.root[0] in c:
                cbf = set(c)
                dendrite = dendrite - cbf
                axon = axon - cbf
                linker = linker - cbf
                break

    # See if we lost any nodes on the way
    miss = set(original.nodes.node_id.values) - linker - axon - dendrite - cbf
    miss = np.array(list(miss))

    # From hereon we can use lists
    linker = list(linker)
    axon = list(axon)
    cbf = list(cbf)
    dendrite = list(dendrite)

    # If we have, assign these nodes to the closest node with a compartment
    if any(miss):
        # Find the closest nodes with a compartment
        m = graph.geodesic_matrix(original, directed=False, weight=None, from_=miss)

        # Subset geodesic matrix to nodes that have a compartment
        nodes_w_comp = original.nodes.node_id.values[
            ~np.isin(original.nodes.node_id.values, miss)
        ]
        closest = np.argmin(m.loc[:, nodes_w_comp].values, axis=1)
        closest_id = nodes_w_comp[closest]

        linker += m.index.values[np.isin(closest_id, linker)].tolist()
        axon += m.index.values[np.isin(closest_id, axon)].tolist()
        dendrite += m.index.values[np.isin(closest_id, dendrite)].tolist()
        cbf += m.index.values[np.isin(closest_id, cbf)].tolist()

    # Add labels
    if label_only:
        nodes = original.nodes
        nodes["compartment"] = None
        is_linker = nodes.node_id.isin(linker)
        is_axon = nodes.node_id.isin(axon)
        is_dend = nodes.node_id.isin(dendrite)
        is_cbf = nodes.node_id.isin(cbf)
        nodes.loc[is_linker, "compartment"] = "linker"
        nodes.loc[is_dend, "compartment"] = "dendrite"
        nodes.loc[is_axon, "compartment"] = "axon"
        nodes.loc[is_cbf, "compartment"] = "cellbodyfiber"

        # Set connector compartments
        cmp_map = original.nodes.set_index("node_id").compartment.to_dict()
        original.connectors["compartment"] = original.connectors.node_id.map(cmp_map)

        # Turn into categorical data
        original.nodes["compartment"] = original.nodes.compartment.astype("category")
        original.connectors["compartment"] = original.connectors.compartment.astype(
            "category"
        )

        return original

    # Generate the actual splits
    nl = []
    for label, nodes in zip(
        ["cellbodyfiber", "dendrite", "linker", "axon"], [cbf, dendrite, linker, axon]
    ):
        if not len(nodes):
            continue
        n = subset.subset_neuron(original, nodes)
        n.color = COLORS.get(label, (100, 100, 100))
        n._register_attr("compartment", label)
        nl.append(n)

    return core.NeuronList(nl)

Split neuron into fragments.

Cuts are based on longest neurites: the first cut is made where the second largest neurite merges onto the largest neurite, the second cut is made where the third largest neurite merges into either of the first fragments and so on.

PARAMETER DESCRIPTION
x
            Must be a single neuron.

TYPE: TreeNeuron | MeshNeuron | NeuronList

n
            Number of fragments to split into. Must be >1.

TYPE: int DEFAULT: 2

min_size
            Minimum size of fragment to be cut off. If too
            small, will stop cutting. This takes only the longest
            path in each fragment into account! If the neuron(s),
            has its `.units` set, you can also pass this as a string
            such as "10 microns".

TYPE: int | str DEFAULT: None

reroot_soma
            If True, neuron will be rerooted to soma.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
NeuronList

Examples:

>>> import navis
>>> x = navis.example_neurons(1)
>>> # Cut into two fragments
>>> cut1 = navis.split_into_fragments(x, n=2)
>>> # Cut into fragments of >10 um size
>>> cut2 = navis.split_into_fragments(x, n=float('inf'), min_size=10e3)
Source code in navis/graph/graph_utils.py
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
@utils.meshneuron_skeleton(method="split")
def split_into_fragments(
    x: "core.NeuronObject",
    n: int = 2,
    min_size: Optional[Union[float, str]] = None,
    reroot_soma: bool = False,
) -> "core.NeuronList":
    """Split neuron into fragments.

    Cuts are based on longest neurites: the first cut is made where the second
    largest neurite merges onto the largest neurite, the second cut is made
    where the third largest neurite merges into either of the first fragments
    and so on.

    Parameters
    ----------
    x :                 TreeNeuron | MeshNeuron | NeuronList
                        Must be a single neuron.
    n :                 int, optional
                        Number of fragments to split into. Must be >1.
    min_size :          int | str, optional
                        Minimum size of fragment to be cut off. If too
                        small, will stop cutting. This takes only the longest
                        path in each fragment into account! If the neuron(s),
                        has its `.units` set, you can also pass this as a string
                        such as "10 microns".
    reroot_soma :        bool, optional
                        If True, neuron will be rerooted to soma.

    Returns
    -------
    NeuronList

    Examples
    --------
    >>> import navis
    >>> x = navis.example_neurons(1)
    >>> # Cut into two fragments
    >>> cut1 = navis.split_into_fragments(x, n=2)
    >>> # Cut into fragments of >10 um size
    >>> cut2 = navis.split_into_fragments(x, n=float('inf'), min_size=10e3)

    """
    if isinstance(x, core.NeuronList):
        if len(x) == 1:
            x = x[0]
        else:
            raise Exception(
                f"{x.shape[0]} neurons provided. Please provide "
                "only a single neuron!"
            )

    if not isinstance(x, core.TreeNeuron):
        raise TypeError(f'Expected a single TreeNeuron, got "{type(x)}"')

    if n < 2:
        raise ValueError("Number of fragments must be at least 2.")

    # At this point x is TreeNeuron
    x: core.TreeNeuron

    min_size = x.map_units(min_size, on_error="raise")

    if reroot_soma and not isinstance(x.soma, type(None)):
        x.reroot(x.soma, inplace=True)

    # Collect nodes of the n longest neurites
    tn_to_preserve: List[int] = []
    fragments = []
    i = 0
    while i < n:
        if tn_to_preserve:
            # Generate fresh graph
            g = graph.neuron2nx(x)

            # Remove nodes that we have already preserved
            g.remove_nodes_from(tn_to_preserve)
        else:
            g = x.graph

        # Get path
        longest_path = nx.dag_longest_path(g)

        # Check if fragment is still long enough
        if min_size:
            this_length = sum(
                [
                    v
                    for k, v in nx.get_edge_attributes(g, "weight").items()
                    if k[1] in longest_path
                ]
            )
            if this_length <= min_size:
                break

        tn_to_preserve += longest_path
        fragments.append(longest_path)

        i += 1

    # Next, make some virtual cuts and get the complement of nodes for
    # each fragment
    graphs = [x.graph.copy()]
    # Grab graph once to avoide overhead from stale checking
    g = x.graph
    for fr in fragments[1:]:
        this_g = nx.bfs_tree(g, fr[-1], reverse=True)

        graphs.append(this_g)

    # Next, we need to remove nodes that are in subsequent graphs from
    # those graphs
    for i, g in enumerate(graphs):
        for g2 in graphs[i + 1 :]:
            g.remove_nodes_from(g2.nodes)

    # Now make neurons
    nl = core.NeuronList([morpho.subset_neuron(x, g) for g in graphs])

    return nl

Stitch multiple skeletons together.

Uses minimum spanning tree to determine a way to connect all fragments while minimizing length (Euclidean distance) of the new edges. Nodes that have been stitched will get a "stitched" tag.

Important

If duplicate node IDs are found across the fragments to stitch they will be remapped to new unique values!

PARAMETER DESCRIPTION
x
            Neurons to stitch (see examples).

TYPE: NeuronList | list of TreeNeuron/List DEFAULT: ()

method
            Set stitching method:
                (1) 'LEAFS': Only leaf (including root) nodes will
                    be allowed to make new edges.
                (2) 'ALL': All nodes are considered.
                (3) 'NONE': Node and connector tables will simply
                    be combined without generating any new edges.
                    The resulting neuron will have multiple roots.
                (4) List of node IDs that are allowed to be used.
                    Note that if these nodes are insufficient
                    the resulting neuron will not be fully
                    connected.

TYPE: 'LEAFS' | 'ALL' | 'NONE' | list of node IDs DEFAULT: 'ALL'

master
            Sets the master neuron:
                (1) 'SOMA': The largest fragment with a soma
                    becomes the master neuron. If no neuron with
                    soma, will pick the largest (option 2).
                (2) 'LARGEST': The largest (by number of nodes)
                    fragment becomes the master neuron.
                (3) 'FIRST': The first fragment provided becomes
                    the master neuron.

TYPE: 'SOMA' | 'LARGEST' | 'FIRST' DEFAULT: 'SOMA'

max_dist
            Max distance at which to stitch nodes. This can result
            in a neuron with multiple roots.

TYPE: float, optional DEFAULT: None

RETURNS DESCRIPTION
TreeNeuron

Stitched neuron.

See Also

navis.combine_neurons Combines multiple neurons of the same type into one without stitching. Works on TreeNeurons, MeshNeurons and Dotprops.

Examples:

Stitching neuronlist by simply combining data tables:

>>> import navis
>>> nl = navis.example_neurons(2)
>>> stitched = navis.stitch_skeletons(nl, method='NONE')

Stitching fragmented neurons:

>>> a = navis.example_neurons(1)
>>> fragments = navis.cut_skeleton(a, 100)
>>> stitched = navis.stitch_skeletons(fragments, method='LEAFS')
Source code in navis/morpho/manipulation.py
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
def stitch_skeletons(
    *x: Union[Sequence[NeuronObject], "core.NeuronList"],
    method: Union[
        Literal["LEAFS"], Literal["ALL"], Literal["NONE"], Sequence[int]
    ] = "ALL",
    master: Union[Literal["SOMA"], Literal["LARGEST"], Literal["FIRST"]] = "SOMA",
    max_dist: Optional[float] = None,
) -> "core.TreeNeuron":
    """Stitch multiple skeletons together.

    Uses minimum spanning tree to determine a way to connect all fragments
    while minimizing length (Euclidean distance) of the new edges. Nodes
    that have been stitched will get a "stitched" tag.

    Important
    ---------
    If duplicate node IDs are found across the fragments to stitch they will
    be remapped to new unique values!

    Parameters
    ----------
    x :                 NeuronList | list of TreeNeuron/List
                        Neurons to stitch (see examples).
    method :            'LEAFS' | 'ALL' | 'NONE' | list of node IDs
                        Set stitching method:
                            (1) 'LEAFS': Only leaf (including root) nodes will
                                be allowed to make new edges.
                            (2) 'ALL': All nodes are considered.
                            (3) 'NONE': Node and connector tables will simply
                                be combined without generating any new edges.
                                The resulting neuron will have multiple roots.
                            (4) List of node IDs that are allowed to be used.
                                Note that if these nodes are insufficient
                                the resulting neuron will not be fully
                                connected.

    master :            'SOMA' | 'LARGEST' | 'FIRST', optional
                        Sets the master neuron:
                            (1) 'SOMA': The largest fragment with a soma
                                becomes the master neuron. If no neuron with
                                soma, will pick the largest (option 2).
                            (2) 'LARGEST': The largest (by number of nodes)
                                fragment becomes the master neuron.
                            (3) 'FIRST': The first fragment provided becomes
                                the master neuron.
    max_dist :          float,  optional
                        Max distance at which to stitch nodes. This can result
                        in a neuron with multiple roots.

    Returns
    -------
    TreeNeuron
                        Stitched neuron.

    See Also
    --------
    [`navis.combine_neurons`][]
                        Combines multiple neurons of the same type into one
                        without stitching. Works on TreeNeurons, MeshNeurons
                        and Dotprops.

    Examples
    --------
    Stitching neuronlist by simply combining data tables:

    >>> import navis
    >>> nl = navis.example_neurons(2)
    >>> stitched = navis.stitch_skeletons(nl, method='NONE')

    Stitching fragmented neurons:

    >>> a = navis.example_neurons(1)
    >>> fragments = navis.cut_skeleton(a, 100)
    >>> stitched = navis.stitch_skeletons(fragments, method='LEAFS')

    """
    master = str(master).upper()
    ALLOWED_MASTER = ("SOMA", "LARGEST", "FIRST")
    utils.eval_param(master, "master", allowed_values=ALLOWED_MASTER)

    # Compile list of individual neurons
    neurons = utils.unpack_neurons(x)

    # Use copies of the original neurons!
    nl = core.NeuronList(neurons).copy()

    if len(nl) < 2:
        logger.warning(f"Need at least 2 neurons to stitch, found {len(nl)}")
        return nl[0]

    # If no soma, switch to largest
    if master == "SOMA" and not any(nl.has_soma):
        master = "LARGEST"

    # First find master
    if master == "SOMA":
        # Pick the first neuron with a soma
        m_ix = [i for i, n in enumerate(nl) if n.has_soma][0]
    elif master == "LARGEST":
        # Pick the largest neuron
        m_ix = sorted(list(range(len(nl))), key=lambda x: nl[x].n_nodes, reverse=True)[
            0
        ]
    else:
        # Pick the first neuron
        m_ix = 0
    m = nl[m_ix]

    # Check if we need to make any node IDs unique
    if nl.nodes.duplicated(subset="node_id").sum() > 0:
        # Master neuron will not be changed
        seen_tn: Set[int] = set(m.nodes.node_id)
        for i, n in enumerate(nl):
            # Skip the master neuron
            # Note we're using the index in case we have two neurons that are
            # equal (by our definition) - happens e.g. if a neuron has been
            # mirrored
            if i == m_ix:
                continue

            # Grab nodes
            this_tn = set(n.nodes.node_id)

            # Get duplicate node IDs
            non_unique = seen_tn & this_tn

            # Add this neuron's existing nodes to seen
            seen_tn = seen_tn | this_tn
            if non_unique:
                # Generate new, unique node IDs
                new_tn = np.arange(0, len(non_unique)) + max(seen_tn) + 1

                # Generate new map
                new_map = dict(zip(non_unique, new_tn))

                # Remap node IDs - if no new value, keep the old
                n.nodes["node_id"] = n.nodes.node_id.map(lambda x: new_map.get(x, x))

                if n.has_connectors:
                    n.connectors["node_id"] = n.connectors.node_id.map(
                        lambda x: new_map.get(x, x)
                    )

                if getattr(n, "tags", None) is not None:
                    n.tags = {new_map.get(k, k): v for k, v in n.tags.items()}  # type: ignore

                # Remap parent IDs
                new_map[None] = -1  # type: ignore
                n.nodes["parent_id"] = n.nodes.parent_id.map(
                    lambda x: new_map.get(x, x)
                ).astype(int)

                # Add new nodes to seen
                seen_tn = seen_tn | set(new_tn)

                # Make sure the graph is updated
                n._clear_temp_attr()

    # We will start by simply merging all neurons into one
    m._nodes = pd.concat(
        [n.nodes for n in nl],  # type: ignore  # no stubs for concat
        ignore_index=True,
    )

    if any(nl.has_connectors):
        m._connectors = pd.concat(
            [n.connectors for n in nl],  # type: ignore  # no stubs for concat
            ignore_index=True,
        )

    if not m.has_tags or not isinstance(m.tags, dict):
        m.tags = {}  # type: ignore  # TreeNeuron has no tags

    for n in nl:
        for k, v in (getattr(n, "tags", None) or {}).items():
            m.tags[k] = m.tags.get(k, []) + list(utils.make_iterable(v))

    # Reset temporary attributes of our final neuron
    m._clear_temp_attr()

    # If this is all we meant to do, return this neuron
    if not utils.is_iterable(method) and (method == "NONE" or method is None):
        return m

    return _stitch_mst(m, nodes=method, inplace=False, max_dist=max_dist)

Calculate Strahler Index (SI).

Starts with SI of 1 at each leaf and walks to root. At forks with different incoming SIs, the highest index is continued. At forks with the same incoming SI, highest index + 1 is continued.

PARAMETER DESCRIPTION
x

TYPE: TreeNeuron | MeshNeuron | NeuronList

method
            Method used to calculate Strahler indices: 'standard'
            will use the method described above; 'greedy' will
            always increase the index at converging branches
            whether these branches have the same index or not.

TYPE: 'standard' | 'greedy' DEFAULT: 'standard'

to_ignore
            List of node IDs to ignore. Must be the FIRST node
            of the branch. Excluded branches will not contribute
            to Strahler index calculations and instead be assigned
            the SI of their parent branch.

TYPE: iterable DEFAULT: []

min_twig_size
            If provided, will ignore twigs with fewer nodes than
            this. Instead, they will be assigned the SI of their
            parent branch.

TYPE: int DEFAULT: None

RETURNS DESCRIPTION
neuron

Adds "strahler_index" as column in the node table (for TreeNeurons) or as ."strahler_index property (for MeshNeurons).

See Also

navis.segment_analysis This function provides by-segment morphometrics, including Strahler indices.

Examples:

>>> import navis
>>> n = navis.example_neurons(2, kind='skeleton')
>>> n.reroot(n.soma, inplace=True)
>>> _ = navis.strahler_index(n)
>>> n[0].nodes.strahler_index.max()
6
>>> m = navis.example_neurons(1, kind='mesh')
>>> _ = navis.strahler_index(m)
>>> m.strahler_index.max()
5
Source code in navis/morpho/mmetrics.py
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
@utils.map_neuronlist(desc="Calc. SI", allow_parallel=True)
@utils.meshneuron_skeleton(
    method="node_properties", reroot_soma=True, node_props=["strahler_index"]
)
def strahler_index(
    x: "core.NeuronObject",
    method: Union[Literal["standard"], Literal["greedy"]] = "standard",
    to_ignore: list = [],
    min_twig_size: Optional[int] = None,
) -> "core.NeuronObject":
    """Calculate Strahler Index (SI).

    Starts with SI of 1 at each leaf and walks to root. At forks with different
    incoming SIs, the highest index is continued. At forks with the same
    incoming SI, highest index + 1 is continued.

    Parameters
    ----------
    x :                 TreeNeuron | MeshNeuron | NeuronList
    method :            'standard' | 'greedy', optional
                        Method used to calculate Strahler indices: 'standard'
                        will use the method described above; 'greedy' will
                        always increase the index at converging branches
                        whether these branches have the same index or not.
    to_ignore :         iterable, optional
                        List of node IDs to ignore. Must be the FIRST node
                        of the branch. Excluded branches will not contribute
                        to Strahler index calculations and instead be assigned
                        the SI of their parent branch.
    min_twig_size :     int, optional
                        If provided, will ignore twigs with fewer nodes than
                        this. Instead, they will be assigned the SI of their
                        parent branch.

    Returns
    -------
    neuron
                Adds "strahler_index" as column in the node table (for
                TreeNeurons) or as `."strahler_index` property
                (for MeshNeurons).

    See Also
    --------
    [`navis.segment_analysis`][]
                This function provides by-segment morphometrics, including
                Strahler indices.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(2, kind='skeleton')
    >>> n.reroot(n.soma, inplace=True)
    >>> _ = navis.strahler_index(n)
    >>> n[0].nodes.strahler_index.max()
    6
    >>> m = navis.example_neurons(1, kind='mesh')
    >>> _ = navis.strahler_index(m)
    >>> m.strahler_index.max()
    5

    """
    utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,))

    if method not in ["standard", "greedy"]:
        raise ValueError(f'`method` must be "standard" or "greedy", got "{method}"')

    if utils.fastcore:
        x.nodes["strahler_index"] = utils.fastcore.strahler_index(
            x.nodes.node_id.values,
            x.nodes.parent_id.values,
            method=method,
            to_ignore=to_ignore,
            min_twig_size=min_twig_size,
        ).astype(np.int16)
        x.nodes["strahler_index"] = x.nodes.strahler_index.fillna(1)
        return x

    # Find branch, root and end nodes
    if "type" not in x.nodes:
        graph.classify_nodes(x)

    end_nodes = x.nodes[x.nodes.type == "end"].node_id.values
    branch_nodes = x.nodes[x.nodes.type == "branch"].node_id.values
    root = x.nodes[x.nodes.type == "root"].node_id.values

    end_nodes = set(end_nodes)
    branch_nodes = set(branch_nodes)
    root = set(root)

    if min_twig_size:
        to_ignore = np.append(
            to_ignore,
            [
                seg[0]
                for seg in x.small_segments
                if seg[0] in end_nodes and len(seg) < min_twig_size
            ],
        ).astype(int)

    # Generate dicts for childs and parents
    list_of_childs = graph.generate_list_of_childs(x)

    # Get a node ID -> parent ID dictionary for fast lookups
    parents = x.nodes.set_index("node_id").parent_id.to_dict()

    # Do NOT name any parameter `strahler_index` - this overwrites the function!
    SI: Dict[int, int] = {}

    starting_points = end_nodes
    seen = set()
    while starting_points:
        logger.debug(f"New starting point. Remaining: {len(starting_points)}")
        this_node = starting_points.pop()

        # Get upstream indices for this branch
        previous_indices = [SI[c] for c in list_of_childs[this_node]]

        # If this is a not-a-branch branch
        if this_node in to_ignore:
            this_branch_index = 0
        # If this is an end node: start at 1
        elif not len(previous_indices):
            this_branch_index = 1
        # If this is a slab: assign SI of predecessor
        elif len(previous_indices) == 1:
            this_branch_index = previous_indices[0]
        # If this is a branch point and we're using the greedy method
        elif method == "greedy":
            this_branch_index = sum(previous_indices)
        # If this is a branch point at which similar indices collide: +1
        elif previous_indices.count(max(previous_indices)) >= 2:
            this_branch_index = max(previous_indices) + 1
        # If just a branch point: continue max SI
        else:
            this_branch_index = max(previous_indices)

        # Keep track of that this node has been processed
        seen.add(this_node)

        # Now walk down this segment
        # Find parent
        segment = [this_node]
        parent_node = parents[this_node]
        while parent_node >= 0 and parent_node not in branch_nodes:
            this_node = parent_node
            parent_node = parents[this_node]
            segment.append(this_node)
            seen.add(this_node)

        # Update indices for the entire segment
        SI.update({n: this_branch_index for n in segment})

        # The last `this_node` is either a branch node or the root
        # If a branch point: check, if all its childs have already been
        # processed
        if parent_node > 0:
            node_ready = True
            for child in list_of_childs[parent_node]:
                if child not in seen:
                    node_ready = False
                    break

            if node_ready is True:
                starting_points.add(parent_node)

    # Fix branches that were ignored
    if len(to_ignore):
        # Go over all terminal branches with the tag
        for tn in x.nodes[
            (x.nodes.type == "end") & x.nodes.node_id.isin(to_ignore)
        ].node_id.values:
            # Get this terminal's segment
            this_seg = [s for s in x.small_segments if s[0] == tn][0]
            # Get strahler index of parent branch
            this_SI = SI.get(this_seg[-1], 1)
            SI.update({n: this_SI for n in this_seg})

    # Disconnected single nodes (e.g. after pruning) will end up w/o an entry
    # --> we will give them an SI of 1
    x.nodes["strahler_index"] = x.nodes.node_id.map(lambda x: SI.get(x, 1))

    # Set correct data type
    x.nodes["strahler_index"] = x.nodes.strahler_index.astype(np.int16)

    return x

Subset a neuron to a given set of nodes/vertices.

Note that for MeshNeurons it is not guaranteed that all vertices in subset survive because we will also drop degenerate vertices that do not participate in any faces.

PARAMETER DESCRIPTION
x
              Neuron to subset. When passing a NeuronList, it's advised
              to use a function for `subset` (see below).

TYPE: TreeNeuron | MeshNeuron | Dotprops | NeuronList

subset
              Subset of the neuron to keep. Depending on the neuron:
                For TreeNeurons:
                 - node IDs
                 - a boolean mask matching the number of nodes
                 - DataFrame with `node_id` column
                For MeshNeurons:
                 - vertex indices
                 - a boolean mask matching either the number of
                   vertices or faces
                For Dotprops:
                 - point indices
                 - a boolean mask matching the number of points
              Alternatively, you can pass a function that accepts
              a neuron and returns a suitable `subset` as described
              above. This is useful e.g. when wanting to subset a
              list of neurons.

TYPE: list-like | set | NetworkX.Graph | pandas.DataFrame | Callable

keep_disc_cn
              If False, will remove disconnected connectors that
              have "lost" their parent node/vertex.

TYPE: bool DEFAULT: False

prevent_fragments
              If True, will add nodes/vertices to `subset`
              required to keep neuron from fragmenting. Ignored for
              `Dotprops`.

TYPE: bool DEFAULT: False

inplace
              If False, a copy of the neuron is returned.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
TreeNeuron | MeshNeuron | Dotprops | NeuronList

Examples:

Subset skeleton to all branches with less than 10 nodes

>>> import navis
>>> # Get neuron
>>> n = navis.example_neurons(1)
>>> # Get all linear segments
>>> segs = n.segments
>>> # Get short segments
>>> short_segs = [s for s in segs if len(s) <= 10]
>>> # Flatten segments into list of nodes
>>> nodes_to_keep = [n for s in short_segs for n in s]
>>> # Subset neuron
>>> n_short = navis.subset_neuron(n, subset=nodes_to_keep)

Subset multiple neurons using a callable

>>> import navis
>>> nl = navis.example_neurons(2)
>>> # Subset neurons to all leaf nodes
>>> nl_end = navis.subset_neuron(
...     nl,
...     subset=lambda x: x.leafs.node_id
... )
See Also

navis.cut_skeleton Cut neuron at specific points. navis.in_volume To intersect a neuron with a volume (mesh).

Source code in navis/morpho/subset.py
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
@utils.map_neuronlist(desc="Subsetting", allow_parallel=True)
@utils.lock_neuron
def subset_neuron(
    x: Union["core.TreeNeuron", "core.MeshNeuron"],
    subset: Union[Sequence[Union[int, str]], nx.DiGraph, pd.DataFrame, Callable],
    inplace: bool = False,
    keep_disc_cn: bool = False,
    prevent_fragments: bool = False,
) -> "core.NeuronObject":
    """Subset a neuron to a given set of nodes/vertices.

    Note that for `MeshNeurons` it is not guaranteed that all vertices in
    `subset` survive because we will also drop degenerate vertices that do
    not participate in any faces.

    Parameters
    ----------
    x :                   TreeNeuron | MeshNeuron | Dotprops | NeuronList
                          Neuron to subset. When passing a NeuronList, it's advised
                          to use a function for `subset` (see below).
    subset :              list-like | set | NetworkX.Graph | pandas.DataFrame | Callable
                          Subset of the neuron to keep. Depending on the neuron:
                            For TreeNeurons:
                             - node IDs
                             - a boolean mask matching the number of nodes
                             - DataFrame with `node_id` column
                            For MeshNeurons:
                             - vertex indices
                             - a boolean mask matching either the number of
                               vertices or faces
                            For Dotprops:
                             - point indices
                             - a boolean mask matching the number of points
                          Alternatively, you can pass a function that accepts
                          a neuron and returns a suitable `subset` as described
                          above. This is useful e.g. when wanting to subset a
                          list of neurons.
    keep_disc_cn :        bool, optional
                          If False, will remove disconnected connectors that
                          have "lost" their parent node/vertex.
    prevent_fragments :   bool, optional
                          If True, will add nodes/vertices to `subset`
                          required to keep neuron from fragmenting. Ignored for
                          `Dotprops`.
    inplace :             bool, optional
                          If False, a copy of the neuron is returned.

    Returns
    -------
    TreeNeuron | MeshNeuron | Dotprops | NeuronList

    Examples
    --------
    Subset skeleton to all branches with less than 10 nodes

    >>> import navis
    >>> # Get neuron
    >>> n = navis.example_neurons(1)
    >>> # Get all linear segments
    >>> segs = n.segments
    >>> # Get short segments
    >>> short_segs = [s for s in segs if len(s) <= 10]
    >>> # Flatten segments into list of nodes
    >>> nodes_to_keep = [n for s in short_segs for n in s]
    >>> # Subset neuron
    >>> n_short = navis.subset_neuron(n, subset=nodes_to_keep)

    Subset multiple neurons using a callable

    >>> import navis
    >>> nl = navis.example_neurons(2)
    >>> # Subset neurons to all leaf nodes
    >>> nl_end = navis.subset_neuron(
    ...     nl,
    ...     subset=lambda x: x.leafs.node_id
    ... )

    See Also
    --------
    [`navis.cut_skeleton`][]
            Cut neuron at specific points.
    [`navis.in_volume`][]
            To intersect a neuron with a volume (mesh).

    """
    if isinstance(x, core.NeuronList) and len(x) == 1:
        x = x[0]

    utils.eval_param(
        x, name="x", allowed_types=(core.TreeNeuron, core.MeshNeuron, core.Dotprops)
    )

    if callable(subset):
        subset = subset(x)

    # Make a copy of the neuron
    if not inplace:
        x = x.copy()
        # We have to run this in a separate function so that the lock is applied
        # to the copy
        subset_neuron(
            x,
            subset=subset,
            inplace=True,
            keep_disc_cn=keep_disc_cn,
            prevent_fragments=prevent_fragments,
        )
        return x

    if isinstance(x, core.TreeNeuron):
        x = _subset_treeneuron(
            x,
            subset=subset,
            keep_disc_cn=keep_disc_cn,
            prevent_fragments=prevent_fragments,
        )
    elif isinstance(x, core.MeshNeuron):
        x = _subset_meshneuron(
            x,
            subset=subset,
            keep_disc_cn=keep_disc_cn,
            prevent_fragments=prevent_fragments,
        )
    elif isinstance(x, core.Dotprops):
        x = _subset_dotprops(x, subset=subset, keep_disc_cn=keep_disc_cn)

    return x

Symmetrize 3D object (neuron, coordinates).

The way this works is by: 1. Finding the closest mirror transform (unless provided) 2. Mirror data on the left-hand-side to the right-hand-side using the proper (warp) mirror transform to offset deformations 3. Simply flip that data back to the left-hand-side

This works reasonably well but may produce odd results around the midline. For high quality symmetrization you are better off generating dedicated transform (see navis-flybrains for an example).

PARAMETER DESCRIPTION
x
        Data to transform. Dataframe must contain `['x', 'y', 'z']`
        columns. Numpy array must be shape `(N, 3)`.

TYPE: Neuron/List | Volume/trimesh | numpy.ndarray | pandas.DataFrame

template
        Source template brain space that the data is in. If string
        will be searched against registered template brains.

TYPE: str | TemplateBrain

via
        By default ("auto") it will find and apply the closest
        mirror transform. You can also specify a template that
        should be used. That template must have a mirror transform!

TYPE: "auto" | str DEFAULT: 'auto'

verbose
        If True, will print some useful info on the transform(s).

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
xs

Same object type as input (array, neurons, etc) but hopefully symmetrical.

Examples:

This example requires the flybrains library to be installed: pip3 install flybrains

>>> import navis
>>> import flybrains
>>> # Get the FAFB14 neuropil mesh
>>> m = flybrains.FAFB14.mesh
>>> # Symmetrize the mesh
>>> s = navis.symmetrize_brain(m, template='FAFB14')
>>> # Plot side-by-side for comparison
>>> m.plot3d()
>>> s.plot3d(color=(1, 0, 0))
Source code in navis/transforms/templates.py
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
def symmetrize_brain(x: Union['core.NeuronObject', 'pd.DataFrame', 'np.ndarray'],
                     template: Union[str, 'TemplateBrain'],
                     via: Optional[str] = 'auto',
                     verbose: bool = False) -> Union['core.NeuronObject',
                                                     'pd.DataFrame',
                                                     'np.ndarray']:
    """Symmetrize 3D object (neuron, coordinates).

    The way this works is by:
     1. Finding the closest mirror transform (unless provided)
     2. Mirror data on the left-hand-side to the right-hand-side using the
        proper (warp) mirror transform to offset deformations
     3. Simply flip that data back to the left-hand-side

    This works reasonably well but may produce odd results around the midline.
    For high quality symmetrization you are better off generating dedicated
    transform (see `navis-flybrains` for an example).

    Parameters
    ----------
    x :             Neuron/List | Volume/trimesh | numpy.ndarray | pandas.DataFrame
                    Data to transform. Dataframe must contain `['x', 'y', 'z']`
                    columns. Numpy array must be shape `(N, 3)`.
    template :      str | TemplateBrain
                    Source template brain space that the data is in. If string
                    will be searched against registered template brains.
    via :           "auto" | str
                    By default ("auto") it will find and apply the closest
                    mirror transform. You can also specify a template that
                    should be used. That template must have a mirror transform!
    verbose :       bool
                    If True, will print some useful info on the transform(s).

    Returns
    -------
    xs
                    Same object type as input (array, neurons, etc) but
                    hopefully symmetrical.

    Examples
    --------
    This example requires the
    [flybrains](https://github.com/navis-org/navis-flybrains)
    library to be installed: `pip3 install flybrains`

    >>> import navis
    >>> import flybrains
    >>> # Get the FAFB14 neuropil mesh
    >>> m = flybrains.FAFB14.mesh
    >>> # Symmetrize the mesh
    >>> s = navis.symmetrize_brain(m, template='FAFB14')
    >>> # Plot side-by-side for comparison
    >>> m.plot3d()                                              # doctest: +SKIP
    >>> s.plot3d(color=(1, 0, 0))                               # doctest: +SKIP

    """
    if not isinstance(template, str):
        TypeError(f'Expected template of type str, got "{type(template)}"')

    if via == 'auto':
        # Find closest mirror transform
        via = registry.find_closest_mirror_reg(template)

    if isinstance(x, core.NeuronList):
        if len(x) == 1:
            x = x[0]
        else:
            xf = []
            for n in config.tqdm(x, desc='Mirroring',
                                 disable=config.pbar_hide,
                                 leave=config.pbar_leave):
                xf.append(symmetrize_brain(n,
                                           template=template,
                                           via=via))
            return core.NeuronList(xf)

    if isinstance(x, core.BaseNeuron):
        x = x.copy()
        if isinstance(x, core.TreeNeuron):
            x.nodes = symmetrize_brain(x.nodes,
                                       template=template,
                                       via=via)
        elif isinstance(x, core.Dotprops):
            x.points = symmetrize_brain(x.points,
                                        template=template,
                                        via=via)
            # Set tangent vectors and alpha to None so they will be regenerated
            x._vect = x._alpha = None
        elif isinstance(x, core.MeshNeuron):
            x.vertices = symmetrize_brain(x.vertices,
                                          template=template,
                                          via=via)
        else:
            raise TypeError(f"Don't know how to transform neuron of type '{type(x)}'")

        if x.has_connectors:
            x.connectors = symmetrize_brain(x.connectors,
                                            template=template,
                                            via=via)
        return x
    elif isinstance(x, tm.Trimesh):
        x = x.copy()
        x.vertices = symmetrize_brain(x.vertices,
                                      template=template,
                                      via=via)
        return x
    elif isinstance(x, pd.DataFrame):
        if any([c not in x.columns for c in ['x', 'y', 'z']]):
            raise ValueError('DataFrame must have x, y and z columns.')
        x = x.copy()
        x[['x', 'y', 'z']] = symmetrize_brain(
            x[['x', 'y', 'z']].values.astype(float),
            template=template,
            via=via
        )
        return x
    else:
        try:
            # At this point we expect numpy arrays
            x = np.asarray(x)
        except BaseException:
            raise TypeError(f'Unable to transform data of type "{type(x)}"')

        if not x.ndim == 2 or x.shape[1] != 3:
            raise ValueError('Array must be of shape (N, 3).')

    # Now find the meta info for this template brain
    if isinstance(template, TemplateBrain):
        tb = template
    else:
        tb = registry.find_template(template, non_found='raise')

    # Get the bounding box
    if not hasattr(tb, 'boundingbox'):
        raise ValueError(f'Template "{tb.label}" has no bounding box info.')

    if not isinstance(tb.boundingbox, (list, tuple, np.ndarray)):
        raise TypeError("Expected the template brain's bounding box to be a "
                        f"list, tuple or array - got '{type(tb.boundingbox)}'")

    # Get bounding box of template brain
    bbox = np.asarray(tb.boundingbox)

    # Reshape if flat array
    if bbox.ndim == 1:
        bbox = bbox.reshape(3, 2)

    # Find points on the left
    center = bbox[0][0] + (bbox[0][1] - bbox[0][0]) / 2
    is_left = x[:, 0] > center

    # Make a copy of the original data
    x = x.copy()

    # If nothing to symmetrize - return
    if is_left.sum() == 0:
        return x

    # Mirror with compensation for deformations
    xm = mirror_brain(x[is_left], template=template, via=via,
                      mirror_axis='x', verbose=verbose)

    # And now flip them back without compensation for deformations
    xmf = mirror_brain(xm, template=template, warp=False, mirror_axis='x')

    # Replace values
    x[is_left] = xmf

    return x

Calculate synapse flow centrality (SFC).

From Schneider-Mizell et al. (2016): "We use flow centrality for four purposes. First, to split an arbor into axon and dendrite at the maximum centrifugal SFC, which is a preliminary step for computing the segregation index, for expressing all kinds of connectivity edges (e.g. axo-axonic, dendro-dendritic) in the wiring diagram, or for rendering the arbor in 3d with differently colored regions. Second, to quantitatively estimate the cable distance between the axon terminals and dendritic arbor by measuring the amount of cable with the maximum centrifugal SFC value. Third, to measure the cable length of the main dendritic shafts using centripetal SFC, which applies only to insect neurons with at least one output synapse in their dendritic arbor. And fourth, to weigh the color of each skeleton node in a 3d view, providing a characteristic signature of the arbor that enables subjective evaluation of its identity."

Uses navis-fastcore if available.

PARAMETER DESCRIPTION
x
    Neuron(s) to calculate synapse flow centrality for. Must have
    connectors!

TYPE: TreeNeuron | MeshNeuron | NeuronList

mode
    Type of flow centrality to calculate. There are three flavors::
    (1) centrifugal counts paths from proximal inputs to distal outputs
    (2) centripetal counts paths from distal inputs to proximal outputs
    (3) the sum of both - this is the original implementation

TYPE: 'centrifugal' | 'centripetal' | 'sum' DEFAULT: 'sum'

RETURNS DESCRIPTION
neuron

Adds "synapse_flow_centrality" as column in the node table (for TreeNeurons) or as .synapse_flow_centrality property (for MeshNeurons).

Examples:

>>> import navis
>>> n = navis.example_neurons(2)
>>> n.reroot(n.soma, inplace=True)
>>> _ = navis.synapse_flow_centrality(n)
>>> n[0].nodes.synapse_flow_centrality.max()
786969
See Also

navis.bending_flow Variation of synapse flow centrality: calculates bending flow. navis.arbor_segregation_index By-arbor segregation index. navis.segregation_index Calculates segregation score (polarity) of a neuron. navis.split_axon_dendrite Tries splitting a neuron into axon and dendrite. navis.flow_centrality Leaf-based version of flow centrality.

Source code in navis/morpho/mmetrics.py
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
@utils.map_neuronlist(desc="Calc. flow", allow_parallel=True)
@utils.meshneuron_skeleton(
    method="node_properties",
    include_connectors=True,
    heal=True,
    node_props=["synapse_flow_centrality"],
)
def synapse_flow_centrality(
    x: "core.NeuronObject",
    mode: Union[Literal["centrifugal"], Literal["centripetal"], Literal["sum"]] = "sum",
) -> "core.NeuronObject":
    """Calculate synapse flow centrality (SFC).

    From Schneider-Mizell et al. (2016): "We use flow centrality for
    four purposes. First, to split an arbor into axon and dendrite at the
    maximum centrifugal SFC, which is a preliminary step for computing the
    segregation index, for expressing all kinds of connectivity edges (e.g.
    axo-axonic, dendro-dendritic) in the wiring diagram, or for rendering the
    arbor in 3d with differently colored regions. Second, to quantitatively
    estimate the cable distance between the axon terminals and dendritic arbor
    by measuring the amount of cable with the maximum centrifugal SFC value.
    Third, to measure the cable length of the main dendritic shafts using
    centripetal SFC, which applies only to insect neurons with at least one
    output synapse in their dendritic arbor. And fourth, to weigh the color
    of each skeleton node in a 3d view, providing a characteristic signature of
    the arbor that enables subjective evaluation of its identity."

    Uses navis-fastcore if available.

    Parameters
    ----------
    x :         TreeNeuron | MeshNeuron | NeuronList
                Neuron(s) to calculate synapse flow centrality for. Must have
                connectors!
    mode :      'centrifugal' | 'centripetal' | 'sum', optional
                Type of flow centrality to calculate. There are three flavors::
                (1) centrifugal counts paths from proximal inputs to distal outputs
                (2) centripetal counts paths from distal inputs to proximal outputs
                (3) the sum of both - this is the original implementation

    Returns
    -------
    neuron
                Adds "synapse_flow_centrality" as column in the node table (for
                TreeNeurons) or as `.synapse_flow_centrality` property
                (for MeshNeurons).

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(2)
    >>> n.reroot(n.soma, inplace=True)
    >>> _ = navis.synapse_flow_centrality(n)
    >>> n[0].nodes.synapse_flow_centrality.max()
    786969

    See Also
    --------
    [`navis.bending_flow`][]
            Variation of synapse flow centrality: calculates bending flow.
    [`navis.arbor_segregation_index`][]
            By-arbor segregation index.
    [`navis.segregation_index`][]
            Calculates segregation score (polarity) of a neuron.
    [`navis.split_axon_dendrite`][]
            Tries splitting a neuron into axon and dendrite.
    [`navis.flow_centrality`][]
            Leaf-based version of flow centrality.

    """
    # Quick disclaimer:
    # This function may look unnecessarily complicated. I did also try out an
    # implementation using igraph + shortest paths which works like a charm and
    # causes less headaches. It is, however, about >10X slower than this version!
    # Note to self: do not go down that rabbit hole again!

    if mode not in ["centrifugal", "centripetal", "sum"]:
        raise ValueError(f'Unknown "mode" parameter: {mode}')

    if not isinstance(x, core.TreeNeuron):
        raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"')

    if not x.has_connectors:
        raise ValueError("Neuron must have connectors.")

    if np.any(x.soma) and not np.all(np.isin(x.soma, x.root)):
        logger.warning(f"Neuron {x.id} is not rooted to its soma!")

    # Figure out how connector types are labeled
    cn_types = x.connectors.type.unique()
    if any(np.isin(["pre", "post"], cn_types)):
        pre, post = "pre", "post"
    elif any(np.isin([0, 1], cn_types)):
        pre, post = 0, 1
    else:
        raise ValueError(
            f'Unable to parse connector types "{cn_types}" for neuron {x.id}'
        )

    if utils.fastcore:
        x.nodes["synapse_flow_centrality"] = utils.fastcore.synapse_flow_centrality(
            node_ids=x.nodes.node_id.values,
            parent_ids=x.nodes.parent_id.values,
            presynapses=x.nodes.node_id.map(
                x.connectors[(x.connectors.type == pre)].node_id.value_counts()
            )
            .fillna(0)
            .astype(int)
            .values,
            postsynapses=x.nodes.node_id.map(
                x.connectors[(x.connectors.type == post)].node_id.value_counts()
            )
            .fillna(0)
            .astype(int)
            .values,
            mode=mode,
        )
        # Add info on method/mode used for flow centrality
        x.centrality_method = mode  # type: ignore

        # Need to add a restriction, that a branchpoint cannot have a lower
        # flow than its highest child -> this happens at the main branch point to
        # the cell body fiber because the flow doesn't go "through" it in
        # child -> parent direction but rather "across" it from one child to the
        # other
        is_bp = x.nodes["type"] == "branch"
        bp = x.nodes.loc[is_bp, "node_id"].values
        bp_childs = x.nodes[x.nodes.parent_id.isin(bp)]
        max_flow = bp_childs.groupby("parent_id").synapse_flow_centrality.max()
        x.nodes.loc[is_bp, "synapse_flow_centrality"] = max_flow.loc[bp].values
        x.nodes["synapse_flow_centrality"] = x.nodes.synapse_flow_centrality.astype(int)
        return x

    # Get list of nodes with pre/postsynapses
    pre_node_ids = x.connectors[x.connectors.type == pre].node_id.values
    post_node_ids = x.connectors[x.connectors.type == post].node_id.values
    total_post = len(post_node_ids)
    total_pre = len(pre_node_ids)

    # Get list of points to calculate flow centrality for:
    # branches and and their children
    is_bp = x.nodes["type"] == "branch"
    is_cn = x.nodes.node_id.isin(x.connectors.node_id)
    calc_node_ids = x.nodes[is_bp | is_cn].node_id.values

    # We will be processing a super downsampled version of the neuron to
    # speed up calculations
    current_level = logger.level
    current_state = config.pbar_hide
    logger.setLevel("ERROR")
    config.pbar_hide = True
    y = sampling.downsample_neuron(
        x=x,
        downsampling_factor=float("inf"),
        inplace=False,
        preserve_nodes=calc_node_ids,
    )
    logger.setLevel(current_level)
    config.pbar_hide = current_state

    # Get number of pre/postsynapses distal to each branch's childs
    # Note that we're using geodesic matrix here because it is much more
    # efficient than for `distal_to` for larger queries/neurons
    dists = graph.geodesic_matrix(
        y, from_=np.append(pre_node_ids, post_node_ids), directed=True, weight=None
    )
    distal = dists[calc_node_ids] < np.inf

    # Since nodes can have multiple pre-/postsynapses but they show up only
    # once in distal, we have to reindex to reflect the correct number of synapes
    distal_pre = distal.loc[pre_node_ids]
    distal_post = distal.loc[post_node_ids]

    # Sum up axis - now each row represents the number of pre/postsynapses
    # that are distal to that node
    distal_pre = distal_pre.sum(axis=0)
    distal_post = distal_post.sum(axis=0)

    if mode != "centripetal":
        # Centrifugal is the flow from all proximal posts- to all distal presynapses
        centrifugal = {
            n: (total_post - distal_post[n]) * distal_pre[n] for n in calc_node_ids
        }

    if mode != "centrifugal":
        # Centripetal is the flow from all distal post- to all non-distal presynapses
        centripetal = {
            n: distal_post[n] * (total_pre - distal_pre[n]) for n in calc_node_ids
        }

    # Now map this onto our neuron
    if mode == "centrifugal":
        flow = centrifugal
    elif mode == "centripetal":
        flow = centripetal
    elif mode == "sum":
        flow = {n: centrifugal[n] + centripetal[n] for n in centrifugal}

    # At this point there is only flow for branch points and connectors nodes.
    # Let's complete that mapping by adding flow for the nodes between branch points.
    for s in x.small_segments:
        # Segments' orientation goes from distal -> proximal

        # If first node in the segment has no flow, set to 0
        flow[s[0]] = flow.get(s[0], 0)

        # For each node get the flow of its child
        for i in range(1, len(s)):
            if s[i] not in flow:
                flow[s[i]] = flow[s[i - 1]]

    x.nodes["synapse_flow_centrality"] = x.nodes.node_id.map(flow).fillna(0).astype(int)

    # Need to add a restriction, that a branchpoint cannot have a lower
    # flow than its highest child -> this happens at the main branch point to
    # the cell body fiber because the flow doesn't go "through" it in
    # child -> parent direction but rather "across" it from one child to the
    # other
    bp = x.nodes.loc[is_bp, "node_id"].values
    bp_childs = x.nodes[x.nodes.parent_id.isin(bp)]
    max_flow = bp_childs.groupby("parent_id").synapse_flow_centrality.max()
    x.nodes.loc[is_bp, "synapse_flow_centrality"] = max_flow.loc[bp].values
    x.nodes["synapse_flow_centrality"] = x.nodes.synapse_flow_centrality.astype(int)

    # Add info on method/mode used for flow centrality
    x.centrality_method = mode  # type: ignore

    return x

Cluster neurons based on their synapse placement.

Distances score is calculated by calculating for each synapse of neuron A: (1) the (Euclidean) distance to the closest synapse in neuron B and (2) comparing the synapse density around synapse A and B. This is type-sensitive: presynapses will only be matched with presynapses, post with post, etc. The formula is described in Schlegel et al., eLife (2017):

\[ f(i_{s},j_{k}) = \exp(\frac{-d^{2}_{sk}}{2\sigma^{2}}) \exp(\frac{|n(i_{s})-n(j_{k})|}{n(i_{s})+n(j_{k})}) \]

The synapse similarity score for neurons i and j being the average of \(f(i_{s},j_{k})\) over all synapses s of i. Synapse k is the closest synapse of the same sign (pre/post) in neuron j to synapse s. \(d^{2}_{sk}\) is the Euclidean distance between these distances. Variable \(\sigma\) (sigma) determines what distance between s and k is considered "close". \(n(i_{s})\) and \(n(j_{k})\) are defined as the number of synapses of neuron i/j that are within given radius \(\omega\) (omega) of synapse s and j, respectively (same sign only). This esnures that in cases of a strong disparity between \(n(i_{s})\) and \(n(j_{k})\), the synapse similarity will be close to zero even if the distance between s and k is very small.

PARAMETER DESCRIPTION
x
            Neurons to compare. Must have connectors.

TYPE: NeuronList

sigma
            Distance between synapses that is considered to be
            "close".

TYPE: int | float

omega
            Radius over which to calculate synapse density.

TYPE: int | float

mu_score
            If True, score is calculated as mean between A->B and
            B->A comparison.

TYPE: bool DEFAULT: True

restrict_cn
            Restrict to given connector types. Must map to
            a `type`, `relation` or `label` column in the
            connector tables.
            If None, will use all connector types. Use either
            single integer or list. E.g. `restrict_cn=[0, 1]`
            to use only pre- and postsynapses.

TYPE: int | list | None DEFAULT: None

n_cores
            Number of parallel processes to use. Defaults to half
            the available cores.

TYPE: int DEFAULT: max(1, os.cpu_count() // 2)

RETURNS DESCRIPTION
pandas.DataFrame
See Also

navis.synblast NBLAST variant using synapses.

Examples:

>>> import navis
>>> nl = navis.example_neurons(5)
>>> scores = navis.synapse_similarity(nl, omega=5000/8, sigma=2000/8)
Source code in navis/connectivity/similarity.py
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
def synapse_similarity(x: 'core.NeuronList',
                       sigma: Union[float, int],
                       omega: Union[float, int],
                       mu_score: bool = True,
                       restrict_cn: Optional[List[str]] = None,
                       n_cores: int = max(1, os.cpu_count() // 2)
                       ) -> pd.DataFrame:
    r"""Cluster neurons based on their synapse placement.

    Distances score is calculated by calculating for each synapse of
    neuron A: (1) the (Euclidean) distance to the closest synapse in neuron B
    and (2) comparing the synapse density around synapse A and B.
    This is type-sensitive: presynapses will only be matched with presynapses,
    post with post, etc. The formula is described in
    [Schlegel et al., eLife (2017)](https://elifesciences.org/articles/16799):

    $$
    f(i_{s},j_{k}) = \exp(\frac{-d^{2}_{sk}}{2\sigma^{2}}) \exp(\frac{|n(i_{s})-n(j_{k})|}{n(i_{s})+n(j_{k})})
    $$

    The synapse similarity score for neurons i and j being the average
    of $f(i_{s},j_{k})$ over all synapses s of i. Synapse k is the
    closest synapse of the same sign (pre/post) in neuron j to synapse s.
    $d^{2}_{sk}$ is the Euclidean distance between these distances.
    Variable $\sigma$ (`sigma`) determines what distance between
    s and k is considered "close". $n(i_{s})$ and $n(j_{k})$ are
    defined as the number of synapses of neuron i/j that are within given
    radius $\omega$ (`omega`) of synapse s and j, respectively (same
    sign only). This esnures that in cases of a strong disparity between
    $n(i_{s})$ and $n(j_{k})$, the synapse similarity will be
    close to zero even if the distance between s and k is very small.


    Parameters
    ----------
    x :                 NeuronList
                        Neurons to compare. Must have connectors.
    sigma :             int | float
                        Distance between synapses that is considered to be
                        "close".
    omega :             int | float
                        Radius over which to calculate synapse density.
    mu_score :          bool
                        If True, score is calculated as mean between A->B and
                        B->A comparison.
    restrict_cn :       int | list | None
                        Restrict to given connector types. Must map to
                        a `type`, `relation` or `label` column in the
                        connector tables.
                        If None, will use all connector types. Use either
                        single integer or list. E.g. `restrict_cn=[0, 1]`
                        to use only pre- and postsynapses.
    n_cores :           int
                        Number of parallel processes to use. Defaults to half
                        the available cores.

    Returns
    -------
    pandas.DataFrame

    See Also
    --------
    [`navis.synblast`][]
                        NBLAST variant using synapses.

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(5)
    >>> scores = navis.synapse_similarity(nl, omega=5000/8, sigma=2000/8)

    """
    if not isinstance(x, core.NeuronList):
        raise TypeError(f'Expected Neuronlist got {type(x)}')

    if any([not n.has_connectors for n in x]):
        raise ValueError('All neurons must have connector tables as .connectors property.')

    # If single value, turn into list
    if not isinstance(restrict_cn, type(None)):
        restrict_cn = utils.make_iterable(restrict_cn)

    combinations = [(nA.connectors, nB.connectors, sigma, omega, restrict_cn)
                    for nA in x for nB in x]

    with ProcessPoolExecutor(max_workers=n_cores) as e:
        futures = e.map(_unpack_synapse_helper, combinations, chunksize=1000)

        scores = [n for n in config.tqdm(futures, total=len(combinations),
                                         desc='Processing',
                                         disable=config.pbar_hide,
                                         leave=config.pbar_leave)]

    # Create empty score matrix
    sim_matrix = pd.DataFrame(np.zeros((len(x), len(x))),
                              index=x.id,
                              columns=x.id)
    # Populate matrix
    comb_names = [(nA.id, nB.id) for nA in x for nB in x]
    for c, v in zip(comb_names, scores):
        sim_matrix.loc[c[0], c[1]] = v

    if mu_score:
        sim_matrix = (sim_matrix + sim_matrix.T) / 2

    return sim_matrix

Synapsed-based variant of NBLAST.

The gist is this: for each synapse in the query neuron, we find the closest synapse in the target neuron (can be restricted by synapse types). Those distances are then scored similar to nearest-neighbor pairs in NBLAST but without the vector component.

PARAMETER DESCRIPTION
query
        Query neuron(s) to SynBLAST against the targets. Units should
        be in microns as NBLAST is optimized for that and have
        similar sampling resolutions. Neurons must have (non-empty)
        connector tables.

TYPE: Union[BaseNeuron, NeuronList]

by_type
        If True, will use the "type" column in the connector tables
        to only compare e.g. pre- with pre- and post- with
        postsynapses.

TYPE: bool DEFAULT: False

cn_types
        Use this to restrict synblast to specific types of
        connectors (e.g. "pre"synapses only).

TYPE: str | list DEFAULT: None

scores
        Determines the final scores:

            - 'forward' (default) returns query->target scores
            - 'mean' returns the mean of query->target and target->query scores
            - 'min' returns the minium between query->target and target->query scores
            - 'max' returns the maximum between query->target and target->query scores

TYPE: 'forward' | 'mean' | 'min' | 'max' DEFAULT: 'forward'

n_cores
        Max number of cores to use for nblasting. Default is
        `os.cpu_count() // 2`. This should ideally be an even
        number as that allows optimally splitting queries onto
        individual processes.

TYPE: int DEFAULT: os.cpu_count() // 2

normalized
        Whether to return normalized SynBLAST scores.

TYPE: bool DEFAULT: True

smat
        Score matrix. If 'auto' (default), will use scoring matrices
        from FCWB. Same behaviour as in R's nat.nblast
        implementation. If `smat=None` the scores will be
        generated as the product of the distances and the dotproduct
        of the vectors of nearest-neighbor pairs.

TYPE: str | pd.DataFrame DEFAULT: 'auto'

progress
        Whether to show progress bars. This may cause some overhead,
        so switch off if you don't really need it.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
scores

Matrix with SynBLAST scores. Rows are query neurons, columns are targets.

TYPE: pandas.DataFrame

Examples:

>>> import navis
>>> nl = navis.example_neurons(n=5)
>>> nl.units
<Quantity([8 8 8 8 8], 'nanometer')>
>>> # Convert to microns
>>> nl_um = nl * (8 / 1000)
>>> # Run type-agnostic SyNBLAST
>>> scores = navis.synblast(nl_um[:3], nl_um[3:], progress=False)
>>> # Run type-sensitive (i.e. pre vs pre and post vs post) SyNBLAST
>>> scores = navis.synblast(nl_um[:3], nl_um[3:], by_type=True, progress=False)
See Also

navis.nblast The original morphology-based NBLAST.

Source code in navis/nbl/synblast_funcs.py
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
def synblast(query: Union['BaseNeuron', 'NeuronList'],
             target: Union['BaseNeuron', 'NeuronList'],
             by_type: bool = False,
             cn_types: Optional[list] = None,
             scores: Union[Literal['forward'],
                           Literal['mean'],
                           Literal['min'],
                           Literal['max']] = 'forward',
             normalized: bool = True,
             smat: Optional[Union[str, pd.DataFrame]] = 'auto',
             n_cores: int = os.cpu_count() // 2,
             progress: bool = True) -> pd.DataFrame:
    """Synapsed-based variant of NBLAST.

    The gist is this: for each synapse in the query neuron, we find the closest
    synapse in the target neuron (can be restricted by synapse types). Those
    distances are then scored similar to nearest-neighbor pairs in NBLAST but
    without the vector component.

    Parameters
    ----------
    query,target :  Neuron/List
                    Query neuron(s) to SynBLAST against the targets. Units should
                    be in microns as NBLAST is optimized for that and have
                    similar sampling resolutions. Neurons must have (non-empty)
                    connector tables.
    by_type :       bool
                    If True, will use the "type" column in the connector tables
                    to only compare e.g. pre- with pre- and post- with
                    postsynapses.
    cn_types :      str | list, optional
                    Use this to restrict synblast to specific types of
                    connectors (e.g. "pre"synapses only).
    scores :        'forward' | 'mean' | 'min' | 'max'
                    Determines the final scores:

                        - 'forward' (default) returns query->target scores
                        - 'mean' returns the mean of query->target and target->query scores
                        - 'min' returns the minium between query->target and target->query scores
                        - 'max' returns the maximum between query->target and target->query scores

    n_cores :       int, optional
                    Max number of cores to use for nblasting. Default is
                    `os.cpu_count() // 2`. This should ideally be an even
                    number as that allows optimally splitting queries onto
                    individual processes.
    normalized :    bool, optional
                    Whether to return normalized SynBLAST scores.
    smat :          str | pd.DataFrame, optional
                    Score matrix. If 'auto' (default), will use scoring matrices
                    from FCWB. Same behaviour as in R's nat.nblast
                    implementation. If `smat=None` the scores will be
                    generated as the product of the distances and the dotproduct
                    of the vectors of nearest-neighbor pairs.
    progress :      bool
                    Whether to show progress bars. This may cause some overhead,
                    so switch off if you don't really need it.

    Returns
    -------
    scores :        pandas.DataFrame
                    Matrix with SynBLAST scores. Rows are query neurons, columns
                    are targets.

    Examples
    --------
    >>> import navis
    >>> nl = navis.example_neurons(n=5)
    >>> nl.units
    <Quantity([8 8 8 8 8], 'nanometer')>
    >>> # Convert to microns
    >>> nl_um = nl * (8 / 1000)
    >>> # Run type-agnostic SyNBLAST
    >>> scores = navis.synblast(nl_um[:3], nl_um[3:], progress=False)
    >>> # Run type-sensitive (i.e. pre vs pre and post vs post) SyNBLAST
    >>> scores = navis.synblast(nl_um[:3], nl_um[3:], by_type=True, progress=False)

    See Also
    --------
    [`navis.nblast`][]
                The original morphology-based NBLAST.

    """
    # Make sure we're working on NeuronList
    query = NeuronList(query)
    target = NeuronList(target)

    # Run pre-flight checks
    nblast_preflight(query, target, n_cores,
                     req_unique_ids=True, req_dotprops=False,
                     req_microns=isinstance(smat, str) and smat=='auto')

    # Make sure all neurons have connectors
    if not all(query.has_connectors):
        raise ValueError('Some query neurons appear to not have a connector table.')
    if not all(target.has_connectors):
        raise ValueError('Some target neurons appear to not have a connector table.')

    if not isinstance(cn_types, type(None)):
        cn_types = utils.make_iterable(cn_types)

    if not isinstance(cn_types, type(None)) or by_type:
        if any(['type' not in n.connectors.columns for n in query]):
            raise ValueError('Connector tables must have a "type" column if '
                             '`by_type=True` or `cn_types` is not `None`.')

    # Find a partition that produces batches that each run in approximately
    # 10 seconds
    if n_cores and n_cores > 1:
        if progress:
            # If progress bar, we need to make smaller mini batches.
            # These mini jobs must not be too small - otherwise the overhead
            # from spawning and sending results between processes slows things
            # down dramatically. Hence we want to make sure that each job runs
            # for >10s. The run time depends on the system and how big the neurons
            # are. Here, we run a quick test and try to extrapolate from there
            n_rows, n_cols = find_batch_partition(query, target,
                                                  T=10 * JOB_SIZE_MULTIPLIER)
        else:
            # If no progress bar needed, we can just split neurons evenly across
            # all available cores
            n_rows, n_cols = find_optimal_partition(n_cores, query, target)
    else:
        n_rows = n_cols = 1

    # Calculate self-hits once for all neurons
    nb = SynBlaster(normalized=normalized,
                    by_type=by_type,
                    smat=smat,
                    progress=progress)

    def get_connectors(n):
        """Gets the required connectors from a neuron."""
        if not isinstance(cn_types, type(None)):
            return n.connectors[n.connectors['type'].isin(cn_types)]
        else:
            return n.connectors

    query_self_hits = np.array([nb.calc_self_hit(get_connectors(n)) for n in query])
    target_self_hits = np.array([nb.calc_self_hit(get_connectors(n)) for n in target])

    # Initialize a pool of workers
    # Note that we're forcing "spawn" instead of "fork" (default on linux)!
    # This is to reduce the memory footprint since "fork" appears to inherit all
    # variables (including all neurons) while "spawn" appears to get only
    # what's required to run the job?
    with ProcessPoolExecutor(max_workers=n_cores,
                             mp_context=mp.get_context('spawn')) as pool:
        with config.tqdm(desc='Preparing',
                         total=n_rows * n_cols,
                         leave=False,
                         disable=not progress) as pbar:
            futures = {}
            blasters = []
            for qix in np.array_split(np.arange(len(query)), n_rows):
                for tix in np.array_split(np.arange(len(target)), n_cols):
                    # Initialize NBlaster
                    this = SynBlaster(normalized=normalized,
                                      by_type=by_type,
                                      smat=smat,
                                      progress=progress)

                    # Add queries and targets
                    for i, ix in enumerate(qix):
                        n = query[ix]
                        this.append(get_connectors(n), id=n.id, self_hit=query_self_hits[ix])
                    for i, ix in enumerate(tix):
                        n = target[ix]
                        this.append(get_connectors(n), id=n.id, self_hit=target_self_hits[ix])

                    # Keep track of indices of queries and targets
                    this.queries = np.arange(len(qix))
                    this.targets = np.arange(len(tix)) + len(qix)
                    this.queries_ix = qix  # this facilitates filling in the big matrix later
                    this.targets_ix = tix  # this facilitates filling in the big matrix later
                    this.pbar_position = len(blasters) if not utils.is_jupyter() else None

                    blasters.append(this)
                    pbar.update()

                    # If multiple cores requested, submit job to the pool right away
                    if n_cores and n_cores > 1 and (n_cols > 1 or n_rows > 1):
                        this.progress=False  # no progress bar for individual NBLASTERs
                        futures[pool.submit(this.multi_query_target,
                                            q_idx=this.queries,
                                            t_idx=this.targets,
                                            scores=scores)] = this

        # Collect results
        if futures and len(futures) > 1:
            # Prepare empty score matrix
            scores = pd.DataFrame(np.empty((len(query), len(target)),
                                           dtype=this.dtype),
                                  index=query.id, columns=target.id)
            scores.index.name = 'query'
            scores.columns.name = 'target'

            # Collect results
            # We're dropping the "N / N_total" bit from the progress bar because
            # it's not helpful here
            fmt = ('{desc}: {percentage:3.0f}%|{bar}| [{elapsed}<{remaining}]')
            for f in config.tqdm(as_completed(futures),
                                 desc='NBLASTing',
                                 bar_format=fmt,
                                 total=len(futures),
                                 smoothing=0,
                                 disable=not progress,
                                 leave=False):
                res = f.result()
                this = futures[f]
                # Fill-in big score matrix
                scores.iloc[this.queries_ix, this.targets_ix] = res.values
        else:
            scores = this.multi_query_target(this.queries,
                                             this.targets,
                                             scores=scores)

    return scores

    # Find an optimal partition that minimizes the number of neurons
    # we have to send to each process
    n_rows, n_cols = find_optimal_partition(n_cores, query, target)

    blasters = []
    for q in np.array_split(query, n_rows):
        for t in np.array_split(target, n_cols):
            # Initialize SynNBlaster
            this = SynBlaster(normalized=normalized,
                              by_type=by_type,
                              smat=smat,
                              progress=progress)
            # Add queries and targets
            for nl in [q, t]:
                for n in nl:
                    if not isinstance(cn_types, type(None)):
                        cn = n.connectors[n.connectors['type'].isin(cn_types)]
                    else:
                        cn = n.connectors

                    this.append(cn, id=n.id)

            # Keep track of indices of queries and targets
            this.queries = np.arange(len(q))
            this.targets = np.arange(len(t)) + len(q)
            this.pbar_position = len(blasters) if not utils.is_jupyter() else None

            blasters.append(this)

    # If only one core, we don't need to break out the multiprocessing
    if n_cores == 1:
        return this.multi_query_target(this.queries,
                                       this.targets,
                                       scores=scores)

    with ProcessPoolExecutor(max_workers=len(blasters)) as pool:
        # Each nblaster is passed to its own process
        futures = [pool.submit(this.multi_query_target,
                               q_idx=this.queries,
                               t_idx=this.targets,
                               scores=scores) for this in blasters]

        results = [f.result() for f in futures]

    scores = pd.DataFrame(np.zeros((len(query), len(target))),
                          index=query.id, columns=target.id)

    for res in results:
        scores.loc[res.index, res.columns] = res.values

    return scores

Skeletonize image data to single voxel width.

This is a simple thin wrapper around scikit-learn's skeletonize.

PARAMETER DESCRIPTION
x
    The image to thin.

TYPE: VoxelNeuron | numpy array

inplace
    For VoxelNeurons only: Whether to manipulate the neuron
    in place.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
thin

Thinned VoxelNeuron or numpy array.

Examples:

>>> import navis
>>> n = navis.example_neurons(1, kind='mesh')
>>> vx = navis.voxelize(n, pitch='1 micron')
>>> thinned = navis.thin_voxels(vx)
Source code in navis/morpho/images.py
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
@utils.map_neuronlist(desc="Thinning", allow_parallel=True)
def thin_voxels(x, inplace=False):
    """Skeletonize image data to single voxel width.

    This is a simple thin wrapper around scikit-learn's `skeletonize`.

    Parameters
    ----------
    x :         VoxelNeuron | numpy array
                The image to thin.
    inplace :   bool
                For VoxelNeurons only: Whether to manipulate the neuron
                in place.

    Returns
    -------
    thin
                Thinned VoxelNeuron or numpy array.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1, kind='mesh')
    >>> vx = navis.voxelize(n, pitch='1 micron')
    >>> thinned = navis.thin_voxels(vx)

    """
    try:
        from skimage.morphology import skeletonize
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            "`thin_image` requires the scikit-image packge:\n"
            "  pip install scikit-image"
        )

    if isinstance(x, core.VoxelNeuron):
        if not inplace:
            x = x.copy()

        x.grid = skeletonize(x.grid)
    elif isinstance(x, np.ndarray):
        x = skeletonize(x)
    else:
        raise TypeError(f"Unable to thin data of type {type(x)}")

    return x

Calculate tortuosity of a neuron.

See Stepanyants et al., Neuron (2004) for detailed explanation. Briefly, tortuosity index T is defined as the ratio of the branch segment length L (seg_length) to the Euclidean distance R between its ends.

The way this is implemented in navis: For each linear stretch (i.e. segments between branch points, leafs or roots) we calculate its geodesic length L and the Euclidean distance R between its ends. The final tortuosity is the mean of L / R across all segments.

PARAMETER DESCRIPTION
x
            Neuron to analyze. If MeshNeuron, will generate and
            use a skeleton representation.

TYPE: TreeNeuron | MeshNeuron | NeuronList

seg_length
            Target segment length(s) `L`. If `seg_length` is
            provided, each linear segment is further divided into
            segments of exactly `seg_length` (geodesic) length
            and the tortuosity is calculated for each of these
            sub-segments. If `seg_length` is not provided, the
            tortuosity is calculated for each linear segment as is.

            If neuron(s) have their  `.units` set, you can also
            pass a string such as "1 micron". `seg_length` must
            be larger than the current sampling resolution of the
            neuron. If you want to make sure that segments are as
            close to length `L` as possible, consider resampling the
            neuron using [`navis.resample_skeleton`][].

TYPE: int | float | str | list thereof DEFAULT: None

RETURNS DESCRIPTION
tortuosity

If x is NeuronList, will return DataFrame. If x is single TreeNeuron, will return either a single float (if no or a single seg_length is queried) or a DataFrame (if multiple seg_lengths are queried).

TYPE: float | np.array | pandas.DataFrame

See Also

navis.segment_analysis This function provides by-segment morphometrics, including tortuosity.

Examples:

>>> import navis
>>> n = navis.example_neurons(1)
>>> # Calculate tortuosity as-is
>>> T = navis.tortuosity(n)
>>> round(T, 3)
1.074
>>> # Calculate tortuosity with 1 micron segment lengths
>>> T = navis.tortuosity(n, seg_length='1 micron')
>>> round(T, 3)
1.054
Source code in navis/morpho/mmetrics.py
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
def tortuosity(
    x: "core.NeuronObject",
    seg_length: Optional[Union[int, float, str, Sequence[Union[int, float, str]]]] = None,
) -> Union[float, Sequence[float], pd.DataFrame]:
    """Calculate tortuosity of a neuron.

    See Stepanyants et al., Neuron (2004) for detailed explanation. Briefly,
    tortuosity index `T` is defined as the ratio of the branch segment length
    `L` (`seg_length`) to the Euclidean distance `R` between its ends.

    The way this is implemented in `navis`:
    For each linear stretch (i.e. segments between branch points, leafs or roots)
    we calculate its geodesic length `L` and the Euclidean distance `R` between
    its ends. The final tortuosity is the mean of `L / R` across all segments.


    Parameters
    ----------
    x :                 TreeNeuron | MeshNeuron | NeuronList
                        Neuron to analyze. If MeshNeuron, will generate and
                        use a skeleton representation.
    seg_length :        int | float | str | list thereof, optional
                        Target segment length(s) `L`. If `seg_length` is
                        provided, each linear segment is further divided into
                        segments of exactly `seg_length` (geodesic) length
                        and the tortuosity is calculated for each of these
                        sub-segments. If `seg_length` is not provided, the
                        tortuosity is calculated for each linear segment as is.

                        If neuron(s) have their  `.units` set, you can also
                        pass a string such as "1 micron". `seg_length` must
                        be larger than the current sampling resolution of the
                        neuron. If you want to make sure that segments are as
                        close to length `L` as possible, consider resampling the
                        neuron using [`navis.resample_skeleton`][].

    Returns
    -------
    tortuosity :        float | np.array | pandas.DataFrame
                        If x is NeuronList, will return DataFrame.
                        If x is single TreeNeuron, will return either a
                        single float (if no or a single seg_length is queried)
                        or a DataFrame (if multiple seg_lengths are queried).

    See Also
    --------
    [`navis.segment_analysis`][]
                This function provides by-segment morphometrics, including
                tortuosity.

    Examples
    --------
    >>> import navis
    >>> n = navis.example_neurons(1)
    >>> # Calculate tortuosity as-is
    >>> T = navis.tortuosity(n)
    >>> round(T, 3)
    1.074
    >>> # Calculate tortuosity with 1 micron segment lengths
    >>> T = navis.tortuosity(n, seg_length='1 micron')
    >>> round(T, 3)
    1.054

    """
    if isinstance(x, core.NeuronList):
        if not isinstance(seg_length, (list, np.ndarray, tuple)):
            seg_length = [seg_length]  # type: ignore
        df = pd.DataFrame(
            [
                tortuosity(n, seg_length=seg_length)
                for n in config.tqdm(
                    x,
                    desc="Tortuosity",
                    disable=config.pbar_hide,
                    leave=config.pbar_leave,
                )
            ],
            index=x.id,
            columns=seg_length,
        ).T
        df.index.name = "seg_length"
        return df

    if isinstance(x, core.MeshNeuron):
        x = x.skeleton
    elif not isinstance(x, core.TreeNeuron):
        raise TypeError(f"Expected TreeNeuron(s), got {type(x)}")

    if isinstance(seg_length, (list, np.ndarray)):
        return [tortuosity(x, l) for l in seg_length]

    if seg_length is None:
        return _tortuosity_simple(x)
    else:
        return _tortuosity_segmented(x, seg_length)

Add small variance to color.

Source code in navis/plotting/colors.py
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
def vary_colors(color: AnyColor,
                by_max: float = .1) -> np.ndarray:
    """Add small variance to color."""
    if isinstance(color, str):
        color = mcl.to_rgb(color)

    if not isinstance(color, np.ndarray):
        color = np.array(color)

    if color.ndim == 1:
        color = color.reshape(1, color.shape[0])

    variance = (np.random.randint(0, 100, color.shape) / 100) * by_max
    variance = variance - by_max / 2

    # We need to make sure color is array of floats
    color = color.astype(float)
    color[:, :3] = color[:, :3] + variance[:, :3]

    return np.clip(color, 0, 1)

Turn neuron into voxels.

PARAMETER DESCRIPTION
x
        Neuron(s) to voxelize. Uses the neurons' nodes, vertices and
        points, respectively.

TYPE: TreeNeuron | MeshNeuron | Dotprops

pitch
        Side length(s) voxels. Can be isometric (float) or an
        iterable of dimensions in (x, y, z).

TYPE: float | iterable thereof

bounds
        Boundaries [in units of `x`] for the voxel grid. If not
        provided, will use `x.bbox`.

TYPE: (3, 2) or (2, 3) array DEFAULT: None

counts
        If True, voxel grid will have point counts for values
        instead of just True/False.

TYPE: bool DEFAULT: False

vectors
        If True, will also attach a vector field as `.vectors`
        property.

TYPE: bool DEFAULT: False

alphas
        If True, will also return a grid with alpha values as
        `.alpha` property.

TYPE: bool DEFAULT: False

smooth
        If non-zero, will apply a Gaussian filter with `smooth`
        as `sigma`.

TYPE: int DEFAULT: 0

RETURNS DESCRIPTION
VoxelNeuron

Has the voxel grid as .grid and (optionally) .vectors and .alphas properties. .grid data type depends on settings: - default = bool (i.e. True/False) - if counts=True = integer - if smooth=True = float Empty voxels will have vector (0, 0, 0) and alpha 0. Also note that data tables (e.g. connectors) are not carried over from the input neuron.

Examples:

>>> import navis
>>> # Get a skeleton
>>> n = navis.example_neurons(1)
>>> # Convert to voxel neuron
>>> vx = navis.voxelize(n, pitch='5 microns')
Source code in navis/conversion/wrappers.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
@utils.map_neuronlist(desc='Voxelizing', allow_parallel=True)
def voxelize(x: 'core.BaseNeuron',
             pitch: Union[list, tuple, float],
             bounds: Optional[list] = None,
             counts: bool = False,
             vectors: bool = False,
             alphas: bool = False,
             smooth: int = 0) -> 'core.VoxelNeuron':
    """Turn neuron into voxels.

    Parameters
    ----------
    x :             TreeNeuron | MeshNeuron | Dotprops
                    Neuron(s) to voxelize. Uses the neurons' nodes, vertices and
                    points, respectively.
    pitch :         float | iterable thereof
                    Side length(s) voxels. Can be isometric (float) or an
                    iterable of dimensions in (x, y, z).
    bounds :        (3, 2)  or (2, 3) array, optional
                    Boundaries [in units of `x`] for the voxel grid. If not
                    provided, will use `x.bbox`.
    counts :        bool
                    If True, voxel grid will have point counts for values
                    instead of just True/False.
    vectors :       bool
                    If True, will also attach a vector field as `.vectors`
                    property.
    alphas :        bool
                    If True, will also return a grid with alpha values as
                    `.alpha` property.
    smooth :        int
                    If non-zero, will apply a Gaussian filter with `smooth`
                    as `sigma`.

    Returns
    -------
    VoxelNeuron
                    Has the voxel grid as `.grid` and (optionally) `.vectors`
                    and `.alphas` properties. `.grid` data type depends
                    on settings:
                     - default = bool (i.e. True/False)
                     - if `counts=True` = integer
                     - if `smooth=True` = float
                    Empty voxels will have vector (0, 0, 0) and alpha 0. Also
                    note that data tables (e.g. `connectors`) are not carried
                    over from the input neuron.

    Examples
    --------
    >>> import navis
    >>> # Get a skeleton
    >>> n = navis.example_neurons(1)
    >>> # Convert to voxel neuron
    >>> vx = navis.voxelize(n, pitch='5 microns')

    """
    if isinstance(x, (core.TreeNeuron, core.MeshNeuron, core.Dotprops)):
        return neuron2voxels(x,
                             pitch=pitch,
                             bounds=bounds,
                             counts=counts,
                             vectors=vectors,
                             alphas=alphas,
                             smooth=smooth)

    raise TypeError(f'Unable to voxelize data of type {type(x)}')

Write Neuron/List to Hdf5 file.

PARAMETER DESCRIPTION
n
            Neuron(s) to write to file.

TYPE: Neuron | NeuronList

filepath
            Path to HDF5 file. Will be created if it does not
            exist. If it does exist, we will add data to it.

TYPE: str

serialized
            Whether to write a serialized (pickled) version of the
            neuron to file.

TYPE: bool DEFAULT: True

raw
            Whether to write the neurons' raw data to file. This
            is required to re-generate neurons from tools other
            than `navis` (e.g. R's `nat`). This follows the schema
            specified [here](https://github.com/flyconnectome/hnf).

TYPE: bool DEFAULT: False

append
            If file already exists, whether to append data or to
            overwrite the entire file.

TYPE: bool DEFAULT: True

overwrite_neurons
            If a given neuron already exists in the h5 file whether
            to overwrite it or throw an exception.

TYPE: bool DEFAULT: False

Only

annotations
            Whether to write annotations (e.g. "connectors")
            associated with the neuron(s) to file. Annotations
            must be pandas DataFrames. If a neuron does not contain
            a given annotation, it is silently skipped.

TYPE: str | list thereof DEFAULT: None

format
            Which version of the format specs to use. By default
            use latest. Note that we don't allow mixing format
            specs in the same HDF5 file. So if you want to write
            to a file which already contains data in a given
            format, you have to use that format.

TYPE: "latest" | "v1" DEFAULT: 'latest'

RETURNS DESCRIPTION
Nothing

Examples:

>>> import navis
>>> # First get mesh, skeleton and dotprop representations for some neurons
>>> sk = navis.example_neurons(5, kind='skeleton')
>>> me = navis.example_neurons(5, kind='mesh')
>>> dp = navis.make_dotprops(sk, k=5)
>>> # Write them to a file
>>> navis.write_h5(sk + me + dp, '~/test.h5', overwrite_neurons=True)
>>> # Read back from file
>>> nl = navis.read_h5('~/test.h5')
See Also

navis.read_h5 Read neurons from h5 file.

Source code in navis/io/hdf_io.py
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
def write_h5(n: 'core.NeuronObject',
             filepath: str,
             serialized: bool = True,
             raw: bool = False,
             annotations: Optional[Union[str, list]] = None,
             format: str = 'latest',
             append: bool = True,
             overwrite_neurons: bool = False) -> 'core.NeuronObject':
    """Write Neuron/List to Hdf5 file.

    Parameters
    ----------
    n :                 Neuron | NeuronList
                        Neuron(s) to write to file.
    filepath :          str
                        Path to HDF5 file. Will be created if it does not
                        exist. If it does exist, we will add data to it.
    serialized :        bool
                        Whether to write a serialized (pickled) version of the
                        neuron to file.
    raw :               bool
                        Whether to write the neurons' raw data to file. This
                        is required to re-generate neurons from tools other
                        than `navis` (e.g. R's `nat`). This follows the schema
                        specified [here](https://github.com/flyconnectome/hnf).
    append :            bool
                        If file already exists, whether to append data or to
                        overwrite the entire file.
    overwrite_neurons : bool
                        If a given neuron already exists in the h5 file whether
                        to overwrite it or throw an exception.

    Only relevant if `raw=True`:

    annotations :       str | list thereof, optional
                        Whether to write annotations (e.g. "connectors")
                        associated with the neuron(s) to file. Annotations
                        must be pandas DataFrames. If a neuron does not contain
                        a given annotation, it is silently skipped.
    format :            "latest" | "v1"
                        Which version of the format specs to use. By default
                        use latest. Note that we don't allow mixing format
                        specs in the same HDF5 file. So if you want to write
                        to a file which already contains data in a given
                        format, you have to use that format.

    Returns
    -------
    Nothing

    Examples
    --------
    >>> import navis
    >>> # First get mesh, skeleton and dotprop representations for some neurons
    >>> sk = navis.example_neurons(5, kind='skeleton')
    >>> me = navis.example_neurons(5, kind='mesh')
    >>> dp = navis.make_dotprops(sk, k=5)
    >>> # Write them to a file
    >>> navis.write_h5(sk + me + dp, '~/test.h5', overwrite_neurons=True)
    >>> # Read back from file
    >>> nl = navis.read_h5('~/test.h5')

    See Also
    --------
    [`navis.read_h5`][]
                        Read neurons from h5 file.

    """
    if not serialized and not raw:
        raise ValueError('`serialized` and `raw` must not both be False.')

    utils.eval_param(format, name='format',
                     allowed_values=tuple(WRITERS.keys()))

    filepath = os.path.expanduser(filepath)

    # Get the writer for the specified format
    writer = WRITERS[format]

    # This opens the file
    with writer(filepath, mode='a' if append else 'w') as w:
        w.write_base_info()
        w.write_neurons(n,
                        raw=raw,
                        serialized=serialized,
                        overwrite=overwrite_neurons,
                        annotations=annotations)

Save neuron(s) to json-formatted file.

Nodes and connectors are serialised using pandas' to_json(). Most other items in the neuron's dict are serialised using json.dumps(). Properties not serialised: .graph, .igraph.

PARAMETER DESCRIPTION
x
    Neuron(s) to save.

TYPE: TreeNeuron | NeuronList

filepath
    File to save data to. If `None` will return a json-formatted
    string.

TYPE: str

**kwargs
    Parameters passed to `json.dumps()` and
    `pandas.DataFrame.to_json()`.

DEFAULT: {}

RETURNS DESCRIPTION
str

Only if filepath=None.

See Also

navis.read_json Read json back into navis neurons.

Source code in navis/io/json_io.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
def write_json(x: 'core.NeuronObject', filepath, **kwargs) -> str:
    """Save neuron(s) to json-formatted file.

    Nodes and connectors are serialised using pandas' `to_json()`. Most
    other items in the neuron's __dict__ are serialised using
    `json.dumps()`. Properties not serialised: `.graph`, `.igraph`.

    Parameters
    ----------
    x :         TreeNeuron | NeuronList
                Neuron(s) to save.
    filepath :  str, optional
                File to save data to. If `None` will return a json-formatted
                string.
    **kwargs
                Parameters passed to `json.dumps()` and
                `pandas.DataFrame.to_json()`.

    Returns
    -------
    str
                Only if `filepath=None`.

    See Also
    --------
    [`navis.read_json`][]
                Read json back into navis neurons.

    """
    if not isinstance(x, (core.TreeNeuron, core.NeuronList)):
        raise TypeError(f'Unable to convert data of type "{type(x)}"')

    if isinstance(x, core.BaseNeuron):
        x = core.NeuronList([x])

    data = []
    for n in x:
        this_data = {'id': n.id}
        for k, v in n.__dict__.items():
            if not isinstance(k, str):
                continue
            if k.startswith('_') and k not in ['_nodes', '_connectors']:
                continue

            if isinstance(v, pd.DataFrame):
                this_data[k] = v.to_json()
            elif isinstance(v, np.ndarray):
                this_data[k] = v.tolist()
            else:
                this_data[k] = v

        data.append(this_data)

    if not isinstance(filepath, type(None)):
        with open(filepath, 'w') as f:
            json.dump(data, f, **kwargs)
    else:
        return json.dumps(data, **kwargs)

Export meshes (MeshNeurons, Volumes, Trimeshes) to disk.

Under the hood this is using trimesh to export meshes.

PARAMETER DESCRIPTION
x
            If multiple objects, will generate a file for each
            neuron (see also `filepath`).

TYPE: MeshNeuron | Volume | Trimesh | NeuronList

filepath
            If `None`, will return byte string or list of
            thereof. If filepath will save to this file. If path
            will save neuron(s) in that path using `{x.id}`
            as filename(s). If list, input must be NeuronList and
            a filepath must be provided for each neuron.

TYPE: None | str | list DEFAULT: None

filetype
            If `filepath` does not include the file extension,
            you need to provide it as `filetype`.

TYPE: stl | ply | obj DEFAULT: None

RETURNS DESCRIPTION
None

If filepath is not None.

bytes

If filepath is None.

See Also

navis.read_mesh Import neurons. navis.write_precomputed Write meshes to Neuroglancer's precomputed format.

Examples:

Write MeshNeurons to folder:

>>> import navis
>>> nl = navis.example_neurons(3, kind='mesh')
>>> navis.write_mesh(nl, tmp_dir, filetype='obj')

Specify the filenames:

>>> import navis
>>> nl = navis.example_neurons(3, kind='mesh')
>>> navis.write_mesh(nl, tmp_dir / '{neuron.name}.obj')

Write directly to zip archive:

>>> import navis
>>> nl = navis.example_neurons(3, kind='mesh')
>>> navis.write_mesh(nl, tmp_dir / 'meshes.zip', filetype='obj')
Source code in navis/io/mesh_io.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
def write_mesh(
    x: Union["core.NeuronList", "core.MeshNeuron", "core.Volume", "tm.Trimesh"],
    filepath: Optional[str] = None,
    filetype: str = None,
) -> None:
    """Export meshes (MeshNeurons, Volumes, Trimeshes) to disk.

    Under the hood this is using trimesh to export meshes.

    Parameters
    ----------
    x :                 MeshNeuron | Volume | Trimesh | NeuronList
                        If multiple objects, will generate a file for each
                        neuron (see also `filepath`).
    filepath :          None | str | list, optional
                        If `None`, will return byte string or list of
                        thereof. If filepath will save to this file. If path
                        will save neuron(s) in that path using `{x.id}`
                        as filename(s). If list, input must be NeuronList and
                        a filepath must be provided for each neuron.
    filetype :          stl | ply | obj, optional
                        If `filepath` does not include the file extension,
                        you need to provide it as `filetype`.

    Returns
    -------
    None
                        If filepath is not `None`.
    bytes
                        If filepath is `None`.

    See Also
    --------
    [`navis.read_mesh`][]
                        Import neurons.
    [`navis.write_precomputed`][]
                        Write meshes to Neuroglancer's precomputed format.

    Examples
    --------

    Write `MeshNeurons` to folder:

    >>> import navis
    >>> nl = navis.example_neurons(3, kind='mesh')
    >>> navis.write_mesh(nl, tmp_dir, filetype='obj')

    Specify the filenames:

    >>> import navis
    >>> nl = navis.example_neurons(3, kind='mesh')
    >>> navis.write_mesh(nl, tmp_dir / '{neuron.name}.obj')

    Write directly to zip archive:

    >>> import navis
    >>> nl = navis.example_neurons(3, kind='mesh')
    >>> navis.write_mesh(nl, tmp_dir / 'meshes.zip', filetype='obj')

    """
    if filetype is not None:
        utils.eval_param(filetype, name="filetype", allowed_values=MESH_WRITE_EXT)
    else:
        # See if we can get filetype from filepath
        if filepath is not None:
            for f in MESH_WRITE_EXT:
                if str(filepath).endswith(f".{f}"):
                    filetype = f
                    break

        if not filetype:
            raise ValueError(
                "Must provide mesh type either explicitly via "
                "`filetype` variable or implicitly via the "
                "file extension in `filepath`"
            )

    writer = base.Writer(_write_mesh, ext=f".{filetype}")

    return writer.write_any(x, filepath=filepath)

Write VoxelNeurons or Dotprops to NRRD file(s).

PARAMETER DESCRIPTION
x
            If multiple neurons, will generate a NRRD file
            for each neuron (see also `filepath`).

TYPE: VoxelNeuron | Dotprops | NeuronList

filepath
            Destination for the NRRD files. See examples for options.
            If `x` is multiple neurons, `filepath` must either
            be a folder, a "formattable" filename (see Examples) or
            a list of filenames (one for each neuron in `x`).
            Existing files will be overwritten!

TYPE: str | pathlib.Path | list thereof

compression_level
            Lower = faster writing but larger files. Higher = slower
            writing but smaller files.

TYPE: int 1-9 DEFAULT: 3

attrs
            Any additional attributes will be written to NRRD header.

TYPE: dict DEFAULT: None

RETURNS DESCRIPTION
Nothing

Examples:

Save a single neuron to a specific file:

>>> import navis
>>> n = navis.example_neurons(1, kind='skeleton')
>>> vx = navis.voxelize(n, pitch='2 microns')
>>> navis.write_nrrd(vx, tmp_dir / 'my_neuron.nrrd')

Save multiple neurons to a folder (must exist). Filenames will be autogenerated as "{neuron.id}.nrrd":

>>> import navis
>>> nl = navis.example_neurons(5, kind='skeleton')
>>> dp = navis.make_dotprops(nl, k=5)
>>> navis.write_nrrd(dp, tmp_dir)

Save multiple neurons to a folder but modify the pattern for the autogenerated filenames:

>>> import navis
>>> nl = navis.example_neurons(5, kind='skeleton')
>>> vx = navis.voxelize(nl, pitch='2 microns')
>>> navis.write_nrrd(vx, tmp_dir / 'voxels-{neuron.name}.nrrd')

Save multiple neurons to a zip file:

>>> import navis
>>> nl = navis.example_neurons(5, kind='skeleton')
>>> vx = navis.voxelize(nl, pitch='2 microns')
>>> navis.write_nrrd(vx, tmp_dir / 'neuronlist.zip')

Save multiple neurons to a zip file but modify the filenames:

>>> import navis
>>> nl = navis.example_neurons(5, kind='skeleton')
>>> vx = navis.voxelize(nl, pitch='2 microns')
>>> navis.write_nrrd(vx, tmp_dir / 'voxels-{neuron.name}.nrrd@neuronlist.zip')
See Also

navis.read_nrrd Import VoxelNeuron from NRRD files.

Source code in navis/io/nrrd_io.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
def write_nrrd(
    x: "core.NeuronObject",
    filepath: Union[str, Path],
    compression_level: int = 3,
    attrs: Optional[Dict[str, Any]] = None,
) -> None:
    """Write VoxelNeurons or Dotprops to NRRD file(s).

    Parameters
    ----------
    x :                 VoxelNeuron | Dotprops | NeuronList
                        If multiple neurons, will generate a NRRD file
                        for each neuron (see also `filepath`).
    filepath :          str | pathlib.Path | list thereof
                        Destination for the NRRD files. See examples for options.
                        If `x` is multiple neurons, `filepath` must either
                        be a folder, a "formattable" filename (see Examples) or
                        a list of filenames (one for each neuron in `x`).
                        Existing files will be overwritten!
    compression_level : int 1-9
                        Lower = faster writing but larger files. Higher = slower
                        writing but smaller files.
    attrs :             dict
                        Any additional attributes will be written to NRRD header.

    Returns
    -------
    Nothing

    Examples
    --------
    Save a single neuron to a specific file:

    >>> import navis
    >>> n = navis.example_neurons(1, kind='skeleton')
    >>> vx = navis.voxelize(n, pitch='2 microns')
    >>> navis.write_nrrd(vx, tmp_dir / 'my_neuron.nrrd')

    Save multiple neurons to a folder (must exist). Filenames will be
    autogenerated as "{neuron.id}.nrrd":

    >>> import navis
    >>> nl = navis.example_neurons(5, kind='skeleton')
    >>> dp = navis.make_dotprops(nl, k=5)
    >>> navis.write_nrrd(dp, tmp_dir)

    Save multiple neurons to a folder but modify the pattern for the
    autogenerated filenames:

    >>> import navis
    >>> nl = navis.example_neurons(5, kind='skeleton')
    >>> vx = navis.voxelize(nl, pitch='2 microns')
    >>> navis.write_nrrd(vx, tmp_dir / 'voxels-{neuron.name}.nrrd')

    Save multiple neurons to a zip file:

    >>> import navis
    >>> nl = navis.example_neurons(5, kind='skeleton')
    >>> vx = navis.voxelize(nl, pitch='2 microns')
    >>> navis.write_nrrd(vx, tmp_dir / 'neuronlist.zip')

    Save multiple neurons to a zip file but modify the filenames:

    >>> import navis
    >>> nl = navis.example_neurons(5, kind='skeleton')
    >>> vx = navis.voxelize(nl, pitch='2 microns')
    >>> navis.write_nrrd(vx, tmp_dir / 'voxels-{neuron.name}.nrrd@neuronlist.zip')

    See Also
    --------
    [`navis.read_nrrd`][]
                        Import VoxelNeuron from NRRD files.

    """
    compression_level = int(compression_level)

    if (compression_level < 1) or (compression_level > 9):
        raise ValueError("`compression_level` must be 1-9, got " f"{compression_level}")

    writer = base.Writer(_write_nrrd, ext=".nrrd")

    return writer.write_any(
        x, filepath=filepath, compression_level=compression_level, **(attrs or {})
    )

Write TreeNeuron(s) or Dotprops to parquet file.

See here for format specifications.

PARAMETER DESCRIPTION
x
            Neuron(s) to save. If NeuronList must contain either
            only TreeNeurons or only Dotprops.

TYPE: TreeNeuron | Dotprop | NeuronList thereof

filepath
            Destination for the file.

TYPE: str | pathlib.Path

write_meta
            Whether to also write neuron properties to file. By
            default this is `.name`, `.units` and `.soma`. You can
            change which properties are written by providing them as
            list of strings.

TYPE: bool | list of str DEFAULT: True

See Also

navis.read_parquet Import skeleton from parquet file. navis.scan_parquet Scan parquet file for its contents.

Examples:

Save a bunch of skeletons:

>>> import navis
>>> nl = navis.example_neurons(3, kind='skeleton')
>>> navis.write_parquet(nl, tmp_dir / 'skeletons.parquet')

Inspect that file's content

>>> import navis
>>> contents = navis.scan_parquet(tmp_dir / 'skeletons.parquet')
>>> contents
           id        units       name    soma
0   722817260  8 nanometer  DA1_lPN_R     NaN
1  1734350908  8 nanometer  DA1_lPN_R     [6]
2  1734350788  8 nanometer  DA1_lPN_R  [4177]

Read the skeletons back in

>>> import navis
>>> nl = navis.read_parquet(tmp_dir / 'skeletons.parquet')
>>> len(nl)
3
Source code in navis/io/pq_io.py
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
def write_parquet(x: 'core.NeuronObject',
                  filepath: Union[str, Path],
                  write_meta: bool = True) -> None:
    """Write TreeNeuron(s) or Dotprops to parquet file.

    See [here](https://github.com/navis-org/navis/blob/master/navis/io/pq_io.md)
    for format specifications.

    Parameters
    ----------
    x :                 TreeNeuron | Dotprop | NeuronList thereof
                        Neuron(s) to save. If NeuronList must contain either
                        only TreeNeurons or only Dotprops.
    filepath :          str | pathlib.Path
                        Destination for the file.
    write_meta :        bool | list of str
                        Whether to also write neuron properties to file. By
                        default this is `.name`, `.units` and `.soma`. You can
                        change which properties are written by providing them as
                        list of strings.

    See Also
    --------
    [`navis.read_parquet`][]
                        Import skeleton from parquet file.
    [`navis.scan_parquet`][]
                        Scan parquet file for its contents.

    Examples
    --------
    Save a bunch of skeletons:

    >>> import navis
    >>> nl = navis.example_neurons(3, kind='skeleton')
    >>> navis.write_parquet(nl, tmp_dir / 'skeletons.parquet')

    Inspect that file's content

    >>> import navis
    >>> contents = navis.scan_parquet(tmp_dir / 'skeletons.parquet')
    >>> contents                                                # doctest: +SKIP
               id        units       name    soma
    0   722817260  8 nanometer  DA1_lPN_R     NaN
    1  1734350908  8 nanometer  DA1_lPN_R     [6]
    2  1734350788  8 nanometer  DA1_lPN_R  [4177]

    Read the skeletons back in

    >>> import navis
    >>> nl = navis.read_parquet(tmp_dir / 'skeletons.parquet')
    >>> len(nl)
    3

    """
    filepath = Path(filepath).expanduser()

    # Make sure inputs are only TreeNeurons or Dotprops
    if isinstance(x, core.NeuronList):
        types = x.types
        if types == (core.TreeNeuron,):
            _write_parquet = _write_parquet_skeletons
        elif types == (core.Dotprops, ):
            _write_parquet = _write_parquet_dotprops
        else:
            raise TypeError('Can only write either TreeNeurons or Dotprops to '
                            f'parquet but NeuronList contains {types}')
        if x.is_degenerated:
            raise ValueError('NeuronList must not contain non-unique IDs')
    else:
        if isinstance(x, (core.TreeNeuron, )):
            _write_parquet = _write_parquet_skeletons
        elif isinstance(x, (core.Dotprops, )):
            _write_parquet = _write_parquet_dotprops
        else:
            raise TypeError('Can only write TreeNeurons or Dotprops to parquet, '
                            f'got "{type(x)}"')

    return _write_parquet(x, filepath=filepath, write_meta=write_meta)

Export skeletons or meshes to neuroglancer's (legacy) precomputed format.

Note that you should not mix meshes and skeletons in the same folder!

Follows the formats specified here.

PARAMETER DESCRIPTION
x
            If multiple neurons, will generate a file for each
            neuron (see also `filepath`). For use in neuroglancer
            coordinates should generally be in nanometers.

TYPE: TreeNeuron | MeshNeuron | Volume | Trimesh | NeuronList

filepath
            If `None`, will return byte string or list of
            thereof. If filepath will save to this file. If path
            will save neuron(s) in that path using `{x.id}`
            as filename(s). If list, input must be NeuronList and
            a filepath must be provided for each neuron.

TYPE: None | str | list DEFAULT: None

write_info
            Whether to also write a JSON-formatted `info` file that
            can be parsed by e.g. neuroglancer. This only works if
            inputs are either only skeletons or only meshes!

TYPE: bool DEFAULT: True

write_manifest
            For meshes only: whether to also write manifests. For
            each mesh we will create a JSON-encoded `{id}:0` file
            that contains a "fragments" entry that maps to the
            actual filename. Note that this will not work on Windows
            because colons aren't allowed in file names and on OSX
            the colon will show up as a `/` in the Finder.

TYPE: bool DEFAULT: False

radius
            For TreeNeurons only: whether to write radius as
            additional vertex property.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
None

If filepath is not None.

bytes

If filepath is None.

See Also

navis.read_precomputed Import neurons from neuroglancer's precomputed format. navis.write_mesh Write meshes to generic mesh formats (obj, stl, etc).

Examples:

Write skeletons:

>>> import navis
>>> n = navis.example_neurons(3, kind='skeleton')
>>> navis.write_precomputed(n, tmp_dir)

Write meshes:

>>> import navis
>>> n = navis.example_neurons(3, kind='mesh')
>>> navis.write_precomputed(n, tmp_dir)

Write directly to zip archive:

>>> import navis
>>> n = navis.example_neurons(3, kind='skeleton')
>>> navis.write_precomputed(n, tmp_dir / 'precomputed.zip')
Source code in navis/io/precomputed_io.py
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
def write_precomputed(
    x: Union["core.NeuronList", "core.TreeNeuron", "core.MeshNeuron", "core.Volume"],
    filepath: Optional[str] = None,
    write_info: bool = True,
    write_manifest: bool = False,
    radius: bool = False,
) -> None:
    """Export skeletons or meshes to neuroglancer's (legacy) precomputed format.

    Note that you should not mix meshes and skeletons in the same folder!

    Follows the formats specified
    [here](https://github.com/google/neuroglancer/tree/master/src/neuroglancer/datasource/precomputed).

    Parameters
    ----------
    x :                 TreeNeuron | MeshNeuron | Volume | Trimesh | NeuronList
                        If multiple neurons, will generate a file for each
                        neuron (see also `filepath`). For use in neuroglancer
                        coordinates should generally be in nanometers.
    filepath :          None | str | list, optional
                        If `None`, will return byte string or list of
                        thereof. If filepath will save to this file. If path
                        will save neuron(s) in that path using `{x.id}`
                        as filename(s). If list, input must be NeuronList and
                        a filepath must be provided for each neuron.
    write_info :        bool
                        Whether to also write a JSON-formatted `info` file that
                        can be parsed by e.g. neuroglancer. This only works if
                        inputs are either only skeletons or only meshes!
    write_manifest :    bool
                        For meshes only: whether to also write manifests. For
                        each mesh we will create a JSON-encoded `{id}:0` file
                        that contains a "fragments" entry that maps to the
                        actual filename. Note that this will not work on Windows
                        because colons aren't allowed in file names and on OSX
                        the colon will show up as a `/` in the Finder.
    radius :            bool
                        For TreeNeurons only: whether to write radius as
                        additional vertex property.

    Returns
    -------
    None
                        If filepath is not `None`.
    bytes
                        If filepath is `None`.

    See Also
    --------
    [`navis.read_precomputed`][]
                        Import neurons from neuroglancer's precomputed format.
    [`navis.write_mesh`][]
                        Write meshes to generic mesh formats (obj, stl, etc).

    Examples
    --------

    Write skeletons:

    >>> import navis
    >>> n = navis.example_neurons(3, kind='skeleton')
    >>> navis.write_precomputed(n, tmp_dir)

    Write meshes:

    >>> import navis
    >>> n = navis.example_neurons(3, kind='mesh')
    >>> navis.write_precomputed(n, tmp_dir)

    Write directly to zip archive:

    >>> import navis
    >>> n = navis.example_neurons(3, kind='skeleton')
    >>> navis.write_precomputed(n, tmp_dir / 'precomputed.zip')

    """
    writer = PrecomputedWriter(_write_precomputed, ext=None)

    return writer.write_any(
        x,
        filepath=filepath,
        write_info=write_info,
        write_manifest=write_manifest,
        radius=radius,
    )

Write TreeNeuron(s) to SWC.

Follows the format specified here.

PARAMETER DESCRIPTION
x
            If multiple neurons, will generate a single SWC file
            for each neuron (see also `filepath`).

TYPE: TreeNeuron | NeuronList

filepath
            Destination for the SWC files. See examples for options.
            If `x` is multiple neurons, `filepath` must either
            be a folder, a "formattable" filename, a filename ending
            in `.zip` or a list of filenames (one for each neuron
            in `x`). Existing files will be overwritten!

TYPE: str | pathlib.Path | list thereof

header
            Header for SWC file. If not provided, will use generic
            header.

TYPE: str | None DEFAULT: None

write_meta
            If not False, will add meta data as JSON-formatted
            string to the header::

               True: adds neuron `id`, `name` and `units`
               list: use to set which properties, e.g. ['id', 'units']
               dict: use to set meta data, e.g. {'template': 'JRC2018F'}

            This parameter is ignored if custom header is provided.

TYPE: bool | list | dict DEFAULT: True

labels
            Node labels. Can be::

                str : column name in node table
                dict: must be of format {node_id: 'label', ...}.
                bool: if True, will generate automatic labels, if False all nodes have label "0".

TYPE: str | dict | bool DEFAULT: True

export_connectors
            If True, will label nodes with pre- ("7") and
            postsynapse ("8"). Because only one label can be given
            this might drop synapses (i.e. in case of multiple
            pre- and/or postsynapses on a single node)! `labels`
            must be `True` for this to have any effect.

TYPE: bool DEFAULT: False

return_node_map
            If True, will return a dictionary mapping the old node
            ID to the new reindexed node IDs in the file.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
node_map

Only if return_node_map=True.

TYPE: dict

See Also

navis.read_swc Import skeleton from SWC files.

Examples:

Save a single neuron to a specific file:

>>> import navis
>>> n = navis.example_neurons(1, kind='skeleton')
>>> navis.write_swc(n, tmp_dir / 'my_neuron.swc')

Save two neurons to specific files:

>>> import navis
>>> nl = navis.example_neurons(2, kind='skeleton')
>>> navis.write_swc(nl, [tmp_dir / 'my_neuron1.swc', tmp_dir / 'my_neuron2.swc'])

Save multiple neurons to a folder (must exist). Filenames will be autogenerated as "{neuron.id}.swc":

>>> import navis
>>> nl = navis.example_neurons(5, kind='skeleton')
>>> navis.write_swc(nl, tmp_dir)

Save multiple neurons to a folder but modify the pattern for the autogenerated filenames:

>>> import navis
>>> nl = navis.example_neurons(5, kind='skeleton')
>>> navis.write_swc(nl, tmp_dir / 'skel-{neuron.name}.swc')

Save multiple neurons to a zip file:

>>> import navis
>>> nl = navis.example_neurons(5, kind='skeleton')
>>> navis.write_swc(nl, tmp_dir / 'neuronlist.zip')

Save multiple neurons to a zip file but modify the filenames:

>>> import navis
>>> nl = navis.example_neurons(5, kind='skeleton')
>>> navis.write_swc(nl, tmp_dir / 'skel-{neuron.name}.swc@neuronlist.zip')
Source code in navis/io/swc_io.py
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
def write_swc(
    x: "core.NeuronObject",
    filepath: Union[str, Path],
    header: Optional[str] = None,
    write_meta: Union[bool, List[str], dict] = True,
    labels: Union[str, dict, bool] = True,
    export_connectors: bool = False,
    return_node_map: bool = False,
) -> None:
    """Write TreeNeuron(s) to SWC.

    Follows the format specified
    [here](http://www.neuronland.org/NLMorphologyConverter/MorphologyFormats/SWC/Spec.html).

    Parameters
    ----------
    x :                 TreeNeuron | NeuronList
                        If multiple neurons, will generate a single SWC file
                        for each neuron (see also `filepath`).
    filepath :          str | pathlib.Path | list thereof
                        Destination for the SWC files. See examples for options.
                        If `x` is multiple neurons, `filepath` must either
                        be a folder, a "formattable" filename, a filename ending
                        in `.zip` or a list of filenames (one for each neuron
                        in `x`). Existing files will be overwritten!
    header :            str | None, optional
                        Header for SWC file. If not provided, will use generic
                        header.
    write_meta :        bool | list | dict
                        If not False, will add meta data as JSON-formatted
                        string to the header::

                           True: adds neuron `id`, `name` and `units`
                           list: use to set which properties, e.g. ['id', 'units']
                           dict: use to set meta data, e.g. {'template': 'JRC2018F'}

                        This parameter is ignored if custom header is provided.
    labels :            str | dict | bool, optional
                        Node labels. Can be::

                            str : column name in node table
                            dict: must be of format {node_id: 'label', ...}.
                            bool: if True, will generate automatic labels, if False all nodes have label "0".

    export_connectors : bool, optional
                        If True, will label nodes with pre- ("7") and
                        postsynapse ("8"). Because only one label can be given
                        this might drop synapses (i.e. in case of multiple
                        pre- and/or postsynapses on a single node)! `labels`
                        must be `True` for this to have any effect.
    return_node_map :   bool
                        If True, will return a dictionary mapping the old node
                        ID to the new reindexed node IDs in the file.

    Returns
    -------
    node_map :          dict
                        Only if `return_node_map=True`.

    See Also
    --------
    [`navis.read_swc`][]
                        Import skeleton from SWC files.

    Examples
    --------
    Save a single neuron to a specific file:

    >>> import navis
    >>> n = navis.example_neurons(1, kind='skeleton')
    >>> navis.write_swc(n, tmp_dir / 'my_neuron.swc')

    Save two neurons to specific files:

    >>> import navis
    >>> nl = navis.example_neurons(2, kind='skeleton')
    >>> navis.write_swc(nl, [tmp_dir / 'my_neuron1.swc', tmp_dir / 'my_neuron2.swc'])

    Save multiple neurons to a folder (must exist). Filenames will be
    autogenerated as "{neuron.id}.swc":

    >>> import navis
    >>> nl = navis.example_neurons(5, kind='skeleton')
    >>> navis.write_swc(nl, tmp_dir)

    Save multiple neurons to a folder but modify the pattern for the
    autogenerated filenames:

    >>> import navis
    >>> nl = navis.example_neurons(5, kind='skeleton')
    >>> navis.write_swc(nl, tmp_dir / 'skel-{neuron.name}.swc')

    Save multiple neurons to a zip file:

    >>> import navis
    >>> nl = navis.example_neurons(5, kind='skeleton')
    >>> navis.write_swc(nl, tmp_dir / 'neuronlist.zip')

    Save multiple neurons to a zip file but modify the filenames:

    >>> import navis
    >>> nl = navis.example_neurons(5, kind='skeleton')
    >>> navis.write_swc(nl, tmp_dir / 'skel-{neuron.name}.swc@neuronlist.zip')

    """
    # Make sure inputs are only TreeNeurons
    if isinstance(x, core.NeuronList):
        for n in x:
            if not isinstance(n, core.TreeNeuron):
                msg = f'Can only write TreeNeurons to SWC, not "{type(n)}"'
                if isinstance(n, core.Dotprops):
                    msg += (
                        ". For Dotprops, you can use either `navis.write_nrrd`"
                        " or `navis.write_parquet`."
                    )
                raise TypeError(msg)
    elif not isinstance(x, core.TreeNeuron):
        msg = f'Can only write TreeNeurons to SWC, not "{type(n)}"'
        if isinstance(n, core.Dotprops):
            msg += (
                ". For Dotprops, you can use either `navis.write_nrrd`"
                " or `navis.write_parquet`."
            )
        raise TypeError(msg)

    writer = base.Writer(write_func=_write_swc, ext=".swc")

    return writer.write_any(
        x,
        filepath=filepath,
        header=header,
        write_meta=write_meta,
        labels=labels,
        export_connectors=export_connectors,
        return_node_map=return_node_map,
    )

Apply transform(s) to data.

Notes

For Neurons only: whether there is a change in units during transformation (e.g. nm -> um) is inferred by comparing distances between x/y/z coordinates before and after transform. This guesstimate is then used to convert .units and node/soma radii. This works reasonably well with base 10 increments (e.g. nm -> um) but is off with odd changes in units.

PARAMETER DESCRIPTION
x
            Data to transform. Dataframe must contain `['x', 'y', 'z']`
            columns. Numpy array must be shape `(N, 3)`.

TYPE: Neuron/List | Volume/Trimesh | numpy.ndarray | pandas.DataFrame

transform
            Either a single transform or a transform sequence.

TYPE: Transform/Sequence or list thereof

affine_fallback
            In same cases the non-rigid transformation of points
            can fail - for example if points are outside the
            deformation field. If that happens, they will be
            returned as `NaN`. Unless `affine_fallback` is
            `True`, in which case we will apply only the rigid
            affine  part of the transformation to at least get close
            to the correct coordinates.

TYPE: bool DEFAULT: True

caching
            If True, will (pre-)cache data for transforms whenever
            possible. Depending on the data and the type of
            transforms this can tremendously speed things up at the
            cost of increased memory usage:
              - `False` = no upfront cost, lower memory footprint
              - `True` = higher upfront cost, most definitely faster
            Only applies if input is NeuronList and if transforms
            include H5 transform.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
same type as `x`

Copy of input with transformed coordinates.

Examples:

>>> import navis
>>> # Example neurons are in 8nm voxel space
>>> nl = navis.example_neurons()
>>> # Make a simple Affine transform to go from voxel to nanometers
>>> import numpy as np
>>> M = np.diag([8, 8, 8, 8])
>>> tr = navis.transforms.AffineTransform(M)
>>> # Apply the transform
>>> xf = navis.xform(nl, tr)
See Also

navis.xform_brain Higher level function that finds and applies a sequence of transforms to go from one template brain to another.

Source code in navis/transforms/xfm_funcs.py
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
def xform(x: Union['core.NeuronObject', 'pd.DataFrame', 'np.ndarray'],
          transform: Union[BaseTransform, TransformSequence],
          affine_fallback: bool = True,
          caching: bool = True) -> Union['core.NeuronObject',
                                         'pd.DataFrame',
                                         'np.ndarray']:
    """Apply transform(s) to data.

    Notes
    -----
    For Neurons only: whether there is a change in units during transformation
    (e.g. nm -> um) is inferred by comparing distances between x/y/z coordinates
    before and after transform. This guesstimate is then used to convert
    `.units` and node/soma radii. This works reasonably well with base 10
    increments (e.g. nm -> um) but is off with odd changes in units.

    Parameters
    ----------
    x :                 Neuron/List | Volume/Trimesh | numpy.ndarray | pandas.DataFrame
                        Data to transform. Dataframe must contain `['x', 'y', 'z']`
                        columns. Numpy array must be shape `(N, 3)`.
    transform :         Transform/Sequence or list thereof
                        Either a single transform or a transform sequence.
    affine_fallback :   bool
                        In same cases the non-rigid transformation of points
                        can fail - for example if points are outside the
                        deformation field. If that happens, they will be
                        returned as `NaN`. Unless `affine_fallback` is
                        `True`, in which case we will apply only the rigid
                        affine  part of the transformation to at least get close
                        to the correct coordinates.
    caching :           bool
                        If True, will (pre-)cache data for transforms whenever
                        possible. Depending on the data and the type of
                        transforms this can tremendously speed things up at the
                        cost of increased memory usage:
                          - `False` = no upfront cost, lower memory footprint
                          - `True` = higher upfront cost, most definitely faster
                        Only applies if input is NeuronList and if transforms
                        include H5 transform.

    Returns
    -------
    same type as `x`
                        Copy of input with transformed coordinates.

    Examples
    --------
    >>> import navis
    >>> # Example neurons are in 8nm voxel space
    >>> nl = navis.example_neurons()
    >>> # Make a simple Affine transform to go from voxel to nanometers
    >>> import numpy as np
    >>> M = np.diag([8, 8, 8, 8])
    >>> tr = navis.transforms.AffineTransform(M)
    >>> # Apply the transform
    >>> xf = navis.xform(nl, tr)

    See Also
    --------
    [`navis.xform_brain`][]
                    Higher level function that finds and applies a sequence of
                    transforms to go from one template brain to another.

    """
    # We need to work with TransformSequence
    if isinstance(transform, (list, np.ndarray)):
        transform = TransformSequence(*transform)
    elif isinstance(transform, BaseTransform):
        transform = TransformSequence(transform)
    elif not isinstance(transform, TransformSequence):
        raise TypeError(f'Expected Transform or TransformSequence, got "{type(transform)}"')

    if isinstance(x, core.NeuronList):
        if len(x) == 1:
            x = x[0]
        else:
            xf = []
            # Get the transformation sequence
            with TransOptimizer(transform, bbox=x.bbox, caching=caching):
                try:
                    for i, n in enumerate(config.tqdm(x, desc='Xforming',
                                                      disable=config.pbar_hide,
                                                      leave=config.pbar_leave)):
                        xf.append(xform(n,
                                        transform=transform,
                                        caching=caching,
                                        affine_fallback=affine_fallback))

                        # If not caching we will clear the map cache after
                        # each neuron to free memory
                        if not caching:
                            _get_coordinates_map.cache_clear()
                except BaseException:
                    raise
                finally:
                    # Make sure we clear the coordinate map cache when done
                    _get_coordinates_map.cache_clear()

            return x.__class__(xf)

    if isinstance(x, core.BaseNeuron):
        # VoxelNeurons are a special case and have hence their own function
        if isinstance(x, core.VoxelNeuron):
            return _xform_image(x, transform=transform)

        xf = x.copy()
        # We will collate spatial data to reduce overhead from calling
        # R's xform_brain
        if isinstance(xf, core.TreeNeuron):
            xyz = xf.nodes[['x', 'y', 'z']].values
        elif isinstance(xf, core.MeshNeuron):
            xyz = xf.vertices
        elif isinstance(xf, core.Dotprops):
            xyz = xf.points
            # If this dotprops has a `k`, we only need to transform points and
            # can regenerate the rest. If not, we need to make helper points
            # to carry over vectors
            if isinstance(xf.k, type(None)) or xf.k <= 0:
                # To avoid problems with these helpers we need to make sure
                # they aren't too close to their cognate points (otherwise we'll
                # get NaNs later). We can fix this by scaling the vector by the
                # sampling resolution which should also help make things less
                # noisy.
                hp = xf.points + xf.vect * xf.sampling_resolution
                xyz = np.append(xyz, hp, axis=0)
        else:
            raise TypeError(f"Don't know how to transform neuron of type '{type(xf)}'")

        # Add connectors if they exist
        if xf.has_connectors:
            xyz = np.vstack([xyz, xf.connectors[['x', 'y', 'z']].values])

        # Do the xform of all spatial data
        xyz_xf = xform(xyz,
                       transform=transform,
                       affine_fallback=affine_fallback)

        # Guess change in spatial units
        if xyz.shape[0] > 1:
            change, magnitude = _guess_change(xyz, xyz_xf, sample=1000)
        else:
            change, magnitude = 1, 0
            logger.warning(f'Unable to assess change of units for neuron {x.id}: '
                           'must have at least two nodes/points.')

        # Round change -> this rounds to the first non-zero digit
        # change = np.around(change, decimals=-magnitude)

        # Map xformed coordinates back
        if isinstance(xf, core.TreeNeuron):
            xf.nodes[['x', 'y', 'z']] = xyz_xf[:xf.n_nodes]
            # Fix radius based on our best estimate
            if 'radius' in xf.nodes.columns:
                xf.nodes['radius'] *= 10**magnitude
        elif isinstance(xf, core.Dotprops):
            xf.points = xyz_xf[:xf.points.shape[0]]

            # If this dotprops has a `k`, set tangent vectors and alpha to
            # None so they will be regenerated
            if not isinstance(xf.k, type(None)) and xf.k > 0:
                xf._vect = xf._alpha = None
            else:
                # Re-generate vectors
                hp = xyz_xf[xf.points.shape[0]: xf.points.shape[0] * 2]
                vect = xf.points - hp
                vect = vect / np.linalg.norm(vect, axis=1).reshape(-1, 1)
                xf._vect = vect
        elif isinstance(xf, core.MeshNeuron):
            xf.vertices = xyz_xf[:xf.vertices.shape[0]]

        if xf.has_connectors:
            xf.connectors[['x', 'y', 'z']] = xyz_xf[-xf.connectors.shape[0]:]

        # Make an educated guess as to whether the units have changed
        if hasattr(xf, 'units') and magnitude != 0:
            if isinstance(xf.units, (config.ureg.Unit, config.ureg.Quantity)):
                xf.units = (xf.units / 10**magnitude).to_compact()

        # Fix soma radius if applicable
        if hasattr(xf, 'soma_radius') and isinstance(xf.soma_radius, numbers.Number):
            xf.soma_radius *= 10**magnitude

        return xf
    elif isinstance(x, pd.DataFrame):
        if any([c not in x.columns for c in ['x', 'y', 'z']]):
            raise ValueError('DataFrame must have x, y and z columns.')
        x = x.copy()
        x[['x', 'y', 'z']] = xform(x[['x', 'y', 'z']].values,
                                   transform=transform,
                                   affine_fallback=affine_fallback)
        return x
    elif isinstance(x, tm.Trimesh):
        x = x.copy()
        x.vertices = xform(x.vertices,
                           transform=transform,
                           affine_fallback=affine_fallback)
        return x
    else:
        try:
            # At this point we expect numpy arrays
            x = np.asarray(x)
        except BaseException:
            raise TypeError(f'Unable to transform data of type "{type(x)}"')

        if not x.ndim == 2 or x.shape[1] != 3:
            raise ValueError('Array must be of shape (N, 3).')

    # Apply transform and return xformed points
    return transform.xform(x, affine_fallback=affine_fallback)

Transform 3D data between template brains.

This requires the appropriate transforms to be registered with navis. See the docs/tutorials for details.

Notes

For Neurons only: transforms can introduce a change in the units (e.g. if the transform goes from micron to nanometer space). Some template brains have their units hard-coded in their meta data (as _navis_units). If that's not the case we fall-back to trying to infer any change in units by comparing distances between x/y/z coordinate before and after the transform. That approach works reasonably well with base 10 increments (e.g. nm -> um) but may be off with odd changes in units (e.g. physical -> voxel space). Regardless of whether hard-coded or inferred, any change in units is used to update the .units property and node/soma radii for TreeNeurons.

PARAMETER DESCRIPTION
x
            Data to transform. Dataframe must contain `['x', 'y', 'z']`
            columns. Numpy array must be shape `(N, 3)`.

TYPE: Neuron/List | numpy.ndarray | pandas.DataFrame

source
            Source template brain that the data currently is in.

TYPE: str

target
            Target template brain that the data should be
            transformed into.

TYPE: str

via
            Optionally set intermediate template(s). This can be
            helpful to force a specific transformation sequence.

TYPE: str | list thereof DEFAULT: None

avoid
            Prohibit going through specific intermediate template(s).

TYPE: str | list thereof DEFAULT: None

affine_fallback
            In some cases the non-rigid transformation of points
            can fail - for example if points are outside the
            deformation field. If that happens, they will be
            returned as `NaN`. If `affine_fallback=True`
            we will apply only the rigid affine part of the
            transformation to those points to get as close as
            possible to the correct coordinates.

TYPE: bool DEFAULT: True

caching
            If True, will (pre-)cache data for transforms whenever
            possible. Depending on the data and the type of
            transforms this can speed things up significantly at the
            cost of increased memory usage:
              - `False` = no upfront cost, lower memory footprint
              - `True` = higher upfront cost, most definitely faster
            Only applies if input is NeuronList and if transforms
            include H5 transform.

TYPE: bool DEFAULT: True

verbose
            If True, will print some useful info on transform.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
same type as `x`

Copy of input with transformed coordinates.

Examples:

This example requires the flybrains library to be installed: pip3 install flybrains

Also, if you haven't already, you will need to have the optional Saalfeld lab (Janelia Research Campus) transforms installed (this is a one-off):

>>> import flybrains
>>> flybrains.download_jrc_transforms()

Once flybrains is installed and you have downloaded the registrations, you can run this:

>>> import navis
>>> import flybrains
>>> # navis example neurons are in raw (8nm voxel) hemibrain (JRCFIB2018Fraw) space
>>> n = navis.example_neurons(1)
>>> # Transform to FAFB14 space
>>> xf = navis.xform_brain(n, source='JRCFIB2018Fraw', target='FAFB14')
See Also

navis.xform Lower level entry point that takes data and applies a given transform or sequence thereof. navis.mirror_brain Uses non-rigid transforms to mirror neurons from the left to the right side of given template brain and vice versa.

Source code in navis/transforms/templates.py
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
def xform_brain(x: Union['core.NeuronObject', 'pd.DataFrame', 'np.ndarray'],
                source: str,
                target: str,
                via: Optional[str] = None,
                avoid: Optional[str] = None,
                affine_fallback: bool = True,
                caching: bool = True,
                verbose: bool = True) -> Union['core.NeuronObject',
                                               'pd.DataFrame',
                                               'np.ndarray']:
    """Transform 3D data between template brains.

    This requires the appropriate transforms to be registered with `navis`.
    See the docs/tutorials for details.

    Notes
    -----
    For Neurons only: transforms can introduce a change in the units (e.g. if
    the transform goes from micron to nanometer space). Some template brains have
    their units hard-coded in their meta data (as `_navis_units`). If that's
    not the case we fall-back to trying to infer any change in units by comparing
    distances between x/y/z coordinate before and after the transform. That
    approach works reasonably well with base 10 increments (e.g. nm -> um) but
    may be off with odd changes in units (e.g. physical -> voxel space).
    Regardless of whether hard-coded or inferred, any change in units is used to
    update the `.units` property and node/soma radii for TreeNeurons.

    Parameters
    ----------
    x :                 Neuron/List | numpy.ndarray | pandas.DataFrame
                        Data to transform. Dataframe must contain `['x', 'y', 'z']`
                        columns. Numpy array must be shape `(N, 3)`.
    source :            str
                        Source template brain that the data currently is in.
    target :            str
                        Target template brain that the data should be
                        transformed into.
    via :               str | list thereof, optional
                        Optionally set intermediate template(s). This can be
                        helpful to force a specific transformation sequence.
    avoid :             str | list thereof, optional
                        Prohibit going through specific intermediate template(s).
    affine_fallback :   bool
                        In some cases the non-rigid transformation of points
                        can fail - for example if points are outside the
                        deformation field. If that happens, they will be
                        returned as `NaN`. If `affine_fallback=True`
                        we will apply only the rigid affine part of the
                        transformation to those points to get as close as
                        possible to the correct coordinates.
    caching :           bool
                        If True, will (pre-)cache data for transforms whenever
                        possible. Depending on the data and the type of
                        transforms this can speed things up significantly at the
                        cost of increased memory usage:
                          - `False` = no upfront cost, lower memory footprint
                          - `True` = higher upfront cost, most definitely faster
                        Only applies if input is NeuronList and if transforms
                        include H5 transform.
    verbose :           bool
                        If True, will print some useful info on transform.

    Returns
    -------
    same type as `x`
                        Copy of input with transformed coordinates.

    Examples
    --------
    This example requires the
    [flybrains](https://github.com/navis-org/navis-flybrains)
    library to be installed: `pip3 install flybrains`

    Also, if you haven't already, you will need to have the optional Saalfeld
    lab (Janelia Research Campus) transforms installed (this is a one-off):

    >>> import flybrains                                        # doctest: +SKIP
    >>> flybrains.download_jrc_transforms()                     # doctest: +SKIP

    Once `flybrains` is installed and you have downloaded the registrations,
    you can run this:

    >>> import navis
    >>> import flybrains
    >>> # navis example neurons are in raw (8nm voxel) hemibrain (JRCFIB2018Fraw) space
    >>> n = navis.example_neurons(1)
    >>> # Transform to FAFB14 space
    >>> xf = navis.xform_brain(n, source='JRCFIB2018Fraw', target='FAFB14') # doctest: +SKIP

    See Also
    --------
    [`navis.xform`][]
                    Lower level entry point that takes data and applies a given
                    transform or sequence thereof.
    [`navis.mirror_brain`][]
                    Uses non-rigid transforms to mirror neurons from the left
                    to the right side of given template brain and vice versa.

    """
    if not isinstance(source, str):
        TypeError(f'Expected source of type str, got "{type(source)}"')

    if not isinstance(target, str):
        TypeError(f'Expected target of type str, got "{type(target)}"')

    # Get the transformation sequence
    path, transforms = registry.find_bridging_path(source, target, via=via, avoid=avoid)

    if verbose:
        path_str = path[0]
        for p, tr in zip(path[1:], transforms):
            if isinstance(tr, AliasTransform):
                link = '='
            else:
                link = '->'
            path_str += f' {link} {p}'

        print('Transform path:', path_str)

    # Combine into transform sequence
    trans_seq = TransformSequence(*transforms)

    # Apply transform and returned xformed points
    xf = xform(x, transform=trans_seq, caching=caching,
               affine_fallback=affine_fallback)

    # We might be able to set the correct units based on the target template's
    # meta data (the "guessed" new units can be off if the transform is
    # not base 10 which happens for e.g. voxels -> physical space)
    if isinstance(xf, (core.NeuronList, core.BaseNeuron)):
        # First we need to find the last non-alias template space
        for tmp, tr in zip(path[::-1], transforms[::-1]):
            if not isinstance(tr, AliasTransform):
                # There is a chance that there is no meta data for this template
                try:
                    last_temp = registry.find_template(tmp)
                except ValueError:
                    break
                except BaseException:
                    raise
                # If this template brain has a property for navis units
                if hasattr(last_temp, '_navis_units'):
                    for n in core.NeuronList(xf):
                        n.units = last_temp._navis_units
                break

    return xf