This commit is contained in:
2026-04-10 15:06:59 +02:00
parent 3031b7153b
commit e5a4711004
7806 changed files with 1918528 additions and 335 deletions

View File

@@ -0,0 +1,84 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Skip pydocstyle checks that erroneously trigger on "example"
# noqa: D405,D214,D407
"""
Utilities
=========
Array operations
----------------
.. autosummary::
:toctree: generated/
frame
pad_center
expand_to
fix_length
fix_frames
index_to_slice
softmask
stack
sync
axis_sort
normalize
shear
sparsify_rows
buf_to_float
tiny
Matching
--------
.. autosummary::
:toctree: generated/
match_intervals
match_events
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
localmax
localmin
peak_pick
nnls
cyclic_gradient
dtype_c2r
dtype_r2c
count_unique
is_unique
abs2
phasor
Input validation
----------------
.. autosummary::
:toctree: generated/
valid_audio
valid_int
valid_intervals
is_positive_int
File operations
---------------
.. autosummary::
:toctree: generated/
example
example_info
list_examples
find_files
cite
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)

View File

@@ -0,0 +1,59 @@
from . import decorators
from . import exceptions
from .files import (
find_files as find_files,
example as example,
ex as ex,
list_examples as list_examples,
example_info as example_info,
cite as cite,
)
from .matching import (
match_intervals as match_intervals,
match_events as match_events,
)
from .deprecation import (
Deprecated as Deprecated,
rename_kw as rename_kw,
)
from ._nnls import (
nnls as nnls,
)
from .utils import (
MAX_MEM_BLOCK as MAX_MEM_BLOCK,
frame as frame,
pad_center as pad_center,
expand_to as expand_to,
fix_length as fix_length,
valid_audio as valid_audio,
valid_int as valid_int,
is_positive_int as is_positive_int,
valid_intervals as valid_intervals,
fix_frames as fix_frames,
axis_sort as axis_sort,
localmax as localmax,
localmin as localmin,
normalize as normalize,
peak_pick as peak_pick,
sparsify_rows as sparsify_rows,
shear as shear,
stack as stack,
fill_off_diagonal as fill_off_diagonal,
index_to_slice as index_to_slice,
sync as sync,
softmask as softmask,
buf_to_float as buf_to_float,
tiny as tiny,
cyclic_gradient as cyclic_gradient,
dtype_r2c as dtype_r2c,
dtype_c2r as dtype_c2r,
count_unique as count_unique,
is_unique as is_unique,
abs2 as abs2,
phasor as phasor,
)

View File

@@ -0,0 +1,162 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Non-negative least squares"""
# The scipy library provides an nnls solver, but it does
# not generalize efficiently to matrix-valued problems.
# We therefore provide an alternate solver here.
#
# The vectorized solver uses the L-BFGS-B over blocks of
# data to efficiently solve the constrained least-squares problem.
import numpy as np
import scipy.optimize
from .utils import MAX_MEM_BLOCK
from typing import Any, Optional, Tuple, Sequence
__all__ = ["nnls"]
def _nnls_obj(
x: np.ndarray, shape: Sequence[int], A: np.ndarray, B: np.ndarray
) -> Tuple[float, np.ndarray]:
"""Compute the objective and gradient for NNLS"""
# Scipy's lbfgs flattens all arrays, so we first reshape
# the iterate x
x = x.reshape(shape)
# Compute the difference matrix
diff = np.einsum("mf,...ft->...mt", A, x, optimize=True) - B
# Compute the objective value
value = (1 / B.size) * 0.5 * np.sum(diff**2)
# And the gradient
grad = (1 / B.size) * np.einsum("mf,...mt->...ft", A, diff, optimize=True)
# Flatten the gradient
return value, grad.flatten()
def _nnls_lbfgs_block(
A: np.ndarray, B: np.ndarray, x_init: Optional[np.ndarray] = None, **kwargs: Any
) -> np.ndarray:
"""Solve the constrained problem over a single block
Parameters
----------
A : np.ndarray [shape=(m, d)]
The basis matrix
B : np.ndarray [shape=(m, N)]
The regression targets
x_init : np.ndarray [shape=(d, N)]
An initial guess
**kwargs
Additional keyword arguments to `scipy.optimize.fmin_l_bfgs_b`
Returns
-------
x : np.ndarray [shape=(d, N)]
Non-negative matrix such that Ax ~= B
"""
# If we don't have an initial point, start at the projected
# least squares solution
if x_init is None:
# Suppress type checks because mypy can't find pinv
x_init = np.einsum("fm,...mt->...ft", np.linalg.pinv(A), B, optimize=True)
np.clip(x_init, 0, None, out=x_init)
# Adapt the hessian approximation to the dimension of the problem
kwargs.setdefault("m", A.shape[1])
# Construct non-negative bounds
bounds = [(0, None)] * x_init.size
shape = x_init.shape
# optimize
x: np.ndarray
x, obj_value, diagnostics = scipy.optimize.fmin_l_bfgs_b(
_nnls_obj, x_init, args=(shape, A, B), bounds=bounds, **kwargs
)
# reshape the solution
return x.reshape(shape)
def nnls(A: np.ndarray, B: np.ndarray, **kwargs: Any) -> np.ndarray:
"""Non-negative least squares.
Given two matrices A and B, find a non-negative matrix X
that minimizes the sum squared error::
err(X) = sum_i,j ((AX)[i,j] - B[i, j])^2
Parameters
----------
A : np.ndarray [shape=(m, n)]
The basis matrix
B : np.ndarray [shape=(..., m, N)]
The target array. Additional leading dimensions are supported.
**kwargs
Additional keyword arguments to `scipy.optimize.fmin_l_bfgs_b`
Returns
-------
X : np.ndarray [shape=(..., n, N), non-negative]
A minimizing solution to ``|AX - B|^2``
See Also
--------
scipy.optimize.nnls
scipy.optimize.fmin_l_bfgs_b
Examples
--------
Approximate a magnitude spectrum from its mel spectrogram
>>> y, sr = librosa.load(librosa.ex('trumpet'), duration=3)
>>> S = np.abs(librosa.stft(y, n_fft=2048))
>>> M = librosa.feature.melspectrogram(S=S, sr=sr, power=1)
>>> mel_basis = librosa.filters.mel(sr=sr, n_fft=2048, n_mels=M.shape[0])
>>> S_recover = librosa.util.nnls(mel_basis, M)
Plot the results
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=3, sharex=True, sharey=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time', ax=ax[2])
>>> ax[2].set(title='Original spectrogram (1025 bins)')
>>> ax[2].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(M, ref=np.max),
... y_axis='mel', x_axis='time', ax=ax[0])
>>> ax[0].set(title='Mel spectrogram (128 bins)')
>>> ax[0].label_outer()
>>> img = librosa.display.specshow(librosa.amplitude_to_db(S_recover, ref=np.max(S)),
... y_axis='log', x_axis='time', ax=ax[1])
>>> ax[1].set(title='Reconstructed spectrogram (1025 bins)')
>>> ax[1].label_outer()
>>> fig.colorbar(img, ax=ax, format="%+2.0f dB")
"""
# If B is a single vector, punt up to the scipy method
if B.ndim == 1:
return scipy.optimize.nnls(A, B)[0] # type: ignore
n_columns = int(MAX_MEM_BLOCK // (np.prod(B.shape[:-1]) * A.itemsize))
n_columns = max(n_columns, 1)
# Process in blocks:
if B.shape[-1] <= n_columns:
return _nnls_lbfgs_block(A, B, **kwargs).astype(A.dtype)
x: np.ndarray
x = np.einsum("fm,...mt->...ft", np.linalg.pinv(A), B, optimize=True)
np.clip(x, 0, None, out=x)
x_init = x
for bl_s in range(0, x.shape[-1], n_columns):
bl_t = min(bl_s + n_columns, B.shape[-1])
x[..., bl_s:bl_t] = _nnls_lbfgs_block(
A, B[..., bl_s:bl_t], x_init=x_init[..., bl_s:bl_t], **kwargs
)
return x

View File

@@ -0,0 +1,105 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2015-02-15 10:06:03 by Brian McFee <brian.mcfee@nyu.edu>
"""Helpful tools for deprecation"""
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
import warnings
import functools
from decorator import decorator
import numpy as np
from numpy.typing import DTypeLike
from typing_extensions import ParamSpec # Install typing_extensions in Python 3.8
__all__ = ["moved", "deprecated", "vectorize"]
P = ParamSpec("P")
R = TypeVar("R")
def moved(
*, moved_from: str, version: str, version_removed: str
) -> Callable[[Callable[P, R]], Callable[P, R]]:
"""Mark functions as moved/renamed.
Using the decorated (old) function will result in a warning.
"""
def __wrapper(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> R:
"""Warn the user, and then proceed."""
warnings.warn(
"{:s}\n\tThis function was moved to '{:s}.{:s}' in "
"librosa version {:s}."
"\n\tThis alias will be removed in librosa version "
"{:s}.".format(
moved_from, func.__module__, func.__name__, version, version_removed
),
category=FutureWarning,
stacklevel=3, # Would be 2, but the decorator adds a level
)
return func(*args, **kwargs)
return decorator(__wrapper)
def deprecated(
*, version: str, version_removed: str
) -> Callable[[Callable[P, R]], Callable[P, R]]:
"""Mark a function as deprecated.
Using the decorated (old) function will result in a warning.
"""
def __wrapper(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> R:
"""Warn the user, and then proceed."""
warnings.warn(
"{:s}.{:s}\n\tDeprecated as of librosa version {:s}."
"\n\tIt will be removed in librosa version {:s}.".format(
func.__module__, func.__name__, version, version_removed
),
category=FutureWarning,
stacklevel=3, # Would be 2, but the decorator adds a level
)
return func(*args, **kwargs)
return decorator(__wrapper)
_F = TypeVar("_F", bound=Callable[..., Any])
def vectorize(
*,
otypes: Optional[Union[str, Iterable[DTypeLike]]] = None,
doc: Optional[str] = None,
excluded: Optional[Iterable[Union[int, str]]] = None,
cache: bool = False,
signature: Optional[str] = None
) -> Callable[[_F], _F]:
"""Wrap a function for use with np.vectorize.
This function is not quite a decorator, but is used as a wrapper
to np.vectorize that preserves scalar behavior.
"""
def __wrapper(function):
vecfunc = np.vectorize(
function,
otypes=otypes,
doc=doc,
excluded=excluded,
cache=cache,
signature=signature,
)
@functools.wraps(function)
def _vec(*args, **kwargs):
y = vecfunc(*args, **kwargs)
if np.isscalar(args[0]):
return y.item()
else:
return y
return _vec
return __wrapper

View File

@@ -0,0 +1,70 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Deprecation utilities"""
import inspect
import warnings
from typing import Any
class Deprecated(object):
"""A placeholder class to catch usage of deprecated variable names"""
def __repr__(self) -> str:
"""Pretty-print display for deprecated objects"""
return "<DEPRECATED parameter>"
def rename_kw(
*,
old_name: str,
old_value: Any,
new_name: str,
new_value: Any,
version_deprecated: str,
version_removed: str
) -> Any:
"""Handle renamed arguments.
Parameters
----------
old_name : str
old_value
The name and value of the old argument
new_name : str
new_value
The name and value of the new argument
version_deprecated : str
The version at which the old name became deprecated
version_removed : str
The version at which the old name will be removed
Returns
-------
value
- ``new_value`` if ``old_value`` of type `Deprecated`
- ``old_value`` otherwise
Warnings
--------
if ``old_value`` is not of type `Deprecated`
"""
if isinstance(old_value, Deprecated):
return new_value
else:
stack = inspect.stack()
dep_func = stack[1]
warnings.warn(
"{:s}() keyword argument '{:s}' has been "
"renamed to '{:s}' in version {:}."
"\n\tThis alias will be removed in version "
"{:}.".format(
dep_func[3], old_name, new_name, version_deprecated, version_removed
),
category=FutureWarning,
stacklevel=3,
)
return old_value

View File

@@ -0,0 +1 @@
"""Resources for loading example data"""

View File

@@ -0,0 +1 @@
{"vibeace": {"path": "Kevin_MacLeod_-_Vibe_Ace", "desc": "Kevin MacLeod - Vibe Ace"}, "choice": {"path": "admiralbob77_-_Choice_-_Drum-bass", "desc": "Admiral Bob - Choice (drum+bass)"}, "nutcracker": {"path": "Kevin_MacLeod_-_P_I_Tchaikovsky_Dance_of_the_Sugar_Plum_Fairy", "desc": "Tchaikovsky - Dance of the Sugar Plum Fairy"}, "brahms": {"path": "Hungarian_Dance_number_5_-_Allegro_in_F_sharp_minor_(string_orchestra)", "desc": "Brahms - Hungarian Dance #5"}, "trumpet": {"path": "sorohanro_-_solo-trumpet-06", "desc": "Mihai Sorohan - Trumpet loop"}, "fishin": {"path": "Karissa_Hobbs_-_Lets_Go_Fishin", "desc": "Karissa Hobbs - Let's Go Fishin'"}, "sweetwaltz": {"path": "147793__setuniman__sweet-waltz-0i-22mi", "desc": "Setuniman - Sweet Waltz"}, "humpback": {"path": "glacier-bay-humpback", "desc": "Glacier Bay 60-second clip humpback whale song November 2020"}, "libri1": {"path": "5703-47212-0000", "desc": "Ashiel Mystery - A Detective Story, chapter 2, narrated by Garth Comira"}, "libri2": {"path": "3436-172162-0000", "desc": "The Age of Chivalry / Chapter 18: Perceval / Read by Anders Lankford"}, "pistachio": {"path": "442789__lena-orsa__happy-music-pistachio-ice-cream-ragtime", "desc": "The Piano Lady - Pistachio Ice Cream Ragtime"}, "robin": {"path": "456440__inspectorj__bird-whistling-robin-single-13", "desc": "Bird Whistling, Robin, Single, 13.wav / InspectorJ"}, "libri3": {"path": "198-209-0000", "desc": "Sense and Sensibility / Chapter 18 / Jane Austen / Read by Heather Barnett"}, "pibble": {"path": "pibble", "desc": "Who's a good girl?"}}

View File

@@ -0,0 +1,43 @@
147793__setuniman__sweet-waltz-0i-22mi.hq.ogg 248f93b7c729bc0bd6c4cbc107e6b67ec4d60e40c111e313d4d69f77748da26a
147793__setuniman__sweet-waltz-0i-22mi.ogg 4baff8ebf1771c33618b58aa12ac3fbac1e0462894ae74247f2fb8e649d1c63b
147793__setuniman__sweet-waltz-0i-22mi.txt 6e6e9265ae9a9a36033ce305a893abdc15f43bb5a41ec29efba368ad91905f43
198-209-0000.hq.ogg 6a8d2c16e56dcb27b7f5fe5aa99bfd26b4722a6dabca1968eff951b456936514
198-209-0000.ogg 0473ba9613befa1026db5e254546f105aeffba8366b5468fc5e8d474d844ca1d
198-209-0000.txt c5bf58b2a4aa0d6bf1e6051690312f3238960d455776a6d37e469d80db1da167
3436-172162-0000.hq.ogg 8e034529f2f171c7e5e772df6eccc2d169183c39605b45b351c2bfabddf14162
3436-172162-0000.ogg 780e482b52f2c8a500356babf60178ce005449cc171a8537eb3ef2961eb3e855
3436-172162-0000.txt 8cf4b20e148c4366205bc607f447c169256e945d62c64ead43d0a4dfa7d3ab98
442789__lena-orsa__happy-music-pistachio-ice-cream-ragtime.hq.ogg 1cc83b775e640a8dd409da9e29f5b5e0124bc5ec9bb4fda975919d31d1d893b8
442789__lena-orsa__happy-music-pistachio-ice-cream-ragtime.ogg 9617c9be55c128177b13c20fbc52178ed482e3545094517efb30a7db2798991e
442789__lena-orsa__happy-music-pistachio-ice-cream-ragtime.txt 99f9c44368918572ac154ae4a2fec4020d1d4bb418ae7bf241864916d68f9d04
456440__inspectorj__bird-whistling-robin-single-13.hq.ogg a3b3ecf749befde43bdf35f839fdcb8d399a4deb5666de6f399c35ce12936baa
456440__inspectorj__bird-whistling-robin-single-13.ogg 57c2b861d028e25d7c086b48853f20eda1ae9a7a33e1125a1f3bec91539b4208
456440__inspectorj__bird-whistling-robin-single-13.txt 34d25a379408d9144fbc4ce8cceb72be5196aeb6140cec6c90e6bdcb0567cb6e
5703-47212-0000.hq.ogg f09254a0daf4b14b292868d46dc2e3c8e158d19fafff739ad4c3931e2ce7b1b0
5703-47212-0000.ogg a284612b46af0535f7e1873758c4387bb8369f6dbbe192ffdec1f171108f98dd
5703-47212-0000.txt 1c5bd5eb792ff0d2fbe895e3a3dd0acf3beb48ee364e64e12cde3a646304a9f4
Hungarian_Dance_number_5_-_Allegro_in_F_sharp_minor_(string_orchestra).hq.ogg 8e93ff0182a93168b15346c497b164cb49d2a97bf1e987a1149ea579e914532e
Hungarian_Dance_number_5_-_Allegro_in_F_sharp_minor_(string_orchestra).ogg 919b48aa4cc66a0357d2cd5728664c5ab8f15c4b3469460df4b59470d35d3e49
Hungarian_Dance_number_5_-_Allegro_in_F_sharp_minor_(string_orchestra).txt 0c857ef8a7365e3df9d5c25c2b08ec3169c0585e3ec08c9c6423d2a04d72fe5f
Karissa_Hobbs_-_Lets_Go_Fishin.hq.ogg 85901bde0bc5f2cad28acb83487ccf99bd6908d7359f1a449fd799d8bebb3319
Karissa_Hobbs_-_Lets_Go_Fishin.ogg 27b3667c396c1831511aa3c415fcf582b6e8be560cafb844c5b67b76b72c1cb3
Karissa_Hobbs_-_Lets_Go_Fishin.txt 199bf3408b98916cd9d28a22b2b43c1935ab70072b46fa05c0b9f40e7882802e
Kevin_MacLeod_-_P_I_Tchaikovsky_Dance_of_the_Sugar_Plum_Fairy.hq.ogg f062221a56a227cdb7c067cf2e6ac0e250a50012f7693ca0c8e31f05f83e49b1
Kevin_MacLeod_-_P_I_Tchaikovsky_Dance_of_the_Sugar_Plum_Fairy.ogg b5c1a3e26310e6618d3c124f458654cd235650fcb9db7d711302644566600484
Kevin_MacLeod_-_P_I_Tchaikovsky_Dance_of_the_Sugar_Plum_Fairy.txt 059acb340170385d2bfa4c7ab7c2a06b1d8f8af3e0f11cb4f46ff4049e950915
Kevin_MacLeod_-_Vibe_Ace.hq.ogg 73d6443ef90a7c022f164e5aa90e56c2291585930b39b1656d0765abbc1f1779
Kevin_MacLeod_-_Vibe_Ace.ogg 6c23aed3dd5aa57f2b1652ecab68d15d9b82ad257f54e639eb2880ca09bc118a
Kevin_MacLeod_-_Vibe_Ace.txt 6c71e0525cb0452ea74c6d6f5fde6fa1e221223db7b2aa35b9914b98367ee7b9
admiralbob77_-_Choice_-_Drum-bass.hq.ogg 57b4d95473b92a8441c1d3ab20f836a3e0cbf501bf3afe1ce7a5d0d98d7d4576
admiralbob77_-_Choice_-_Drum-bass.ogg ac644f9645e7c15174e4a4f8561e4d1448d7f6e59ff6b0556b310ebbced879bc
admiralbob77_-_Choice_-_Drum-bass.txt 76525d6a4fd135053c5ff7463ae43a8ffcf064b575e8ef2fc3eb37786a45342b
glacier-bay-humpback.hq.ogg c3250ff526898aa5528aeb6b81a49aff58f63400ec2ddb8bcb86caf611d33144
glacier-bay-humpback.ogg 64395c617b28b5e31a03032b99b4617bc96b926f3bc5c141862cd13fb79ccd8e
glacier-bay-humpback.txt bec2c73fe2368604f161ffdd44b7afea5f597473e2d51c03906bb05c22113910
pibble.hq.ogg dfd796becf38b6f5ff606f2c822ce24e81d8df936b1ced9bffd29e0d1f05b800
pibble.ogg c596c027f604defa522a1734f599c6db5624fa62dd3f1be2a933d85b077f7400
pibble.txt 341fdf46cae7c165875667ed729f6d18983e8cfe32e491816705e7be00d57c76
sorohanro_-_solo-trumpet-06.hq.ogg beb954ae2c9c16919b5ca6973d6d5196cdcb196b46a3c2a201dd8861e7e324de
sorohanro_-_solo-trumpet-06.ogg 8374466fd3951d24509da6e799b132a0db0bdeda69d99c69d989a6888d3d727d
sorohanro_-_solo-trumpet-06.txt 750a191b9d0cc94b2f19cfbf11acc13783f75bedcccfcf562f5b076efd068aba
version_index.msgpack 9e06db4cf390d7b308a8e253f138237501b1d1a4cf46e7185f3d7081a71aef1a

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Exception classes for librosa"""
class LibrosaError(Exception):
"""The root librosa exception class"""
pass
class ParameterError(LibrosaError):
"""Exception class for mal-formed inputs"""
pass

View File

@@ -0,0 +1,335 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions for dealing with files"""
from __future__ import annotations
from typing import List, Optional, Union, Any, Set
import os
import glob
import json
import msgpack
import contextlib
import sys
from importlib import resources
import pooch
from .exceptions import ParameterError
from ..version import version as librosa_version
__all__ = [
"find_files",
"example",
"ex",
"list_examples",
"example_info",
]
# Instantiate the pooch
__data_path = os.environ.get("LIBROSA_DATA_DIR", pooch.os_cache("librosa"))
__GOODBOY = pooch.create(
__data_path, base_url="https://librosa.org/data/audio/", registry=None
)
@contextlib.contextmanager
def _resource_file(package: str, resource: str):
"""Provide a context manager for accessing resources in a package.
It acts as a shim to provide a consistent interface for accessing resources
since the 3.9 series deprecated the "path" method in favor of the "files" method.
"""
if sys.version_info < (3, 9):
with resources.path(package, resource) as path:
yield path
else:
with resources.as_file(resources.files(package).joinpath(resource)) as path:
yield path
with _resource_file("librosa.util.example_data", "registry.txt") as reg:
__GOODBOY.load_registry(str(reg))
# We want to bypass version checks here to allow asynchronous updates for new releases
__GOODBOY.registry['version_index.msgpack'] = None
with _resource_file("librosa.util.example_data", "index.json") as index:
with index.open("r") as _fdesc:
__TRACKMAP = json.load(_fdesc)
def example(key: str, *, hq: bool = False) -> str:
"""Retrieve the example recording identified by 'key'.
The first time an example is requested, it will be downloaded from
the remote repository over HTTPS.
All subsequent requests will use a locally cached copy of the recording.
For a list of examples (and their keys), see `librosa.util.list_examples`.
By default, local files will be cached in the directory given by
`pooch.os_cache('librosa')`. You can override this by setting
an environment variable ``LIBROSA_DATA_DIR`` prior to importing librosa:
>>> import os
>>> os.environ['LIBROSA_DATA_DIR'] = '/path/to/store/data'
>>> import librosa
Parameters
----------
key : str
The identifier for the track to load
hq : bool
If ``True``, return the high-quality version of the recording.
If ``False``, return the 22KHz mono version of the recording.
Returns
-------
path : str
The path to the requested example file
Examples
--------
Load "Hungarian Dance #5" by Johannes Brahms
>>> y, sr = librosa.load(librosa.example('brahms'))
Load "Vibe Ace" by Kevin MacLeod (the example previously packaged with librosa)
in high-quality mode
>>> y, sr = librosa.load(librosa.example('vibeace', hq=True))
See Also
--------
librosa.util.list_examples
pooch.os_cache
"""
if key not in __TRACKMAP:
raise ParameterError(f"Unknown example key: {key}")
if hq:
ext = ".hq.ogg"
else:
ext = ".ogg"
return str(__GOODBOY.fetch(__TRACKMAP[key]["path"] + ext))
ex = example
"""Alias for example"""
def list_examples() -> None:
"""List the available audio recordings included with librosa.
Each recording is given a unique identifier (e.g., "brahms" or "nutcracker"),
listed in the first column of the output.
A brief description is provided in the second column.
See Also
--------
util.example
util.example_info
"""
print("AVAILABLE EXAMPLES")
print("-" * 68)
for key in sorted(__TRACKMAP.keys()):
if key == "pibble":
# Shh... she's sleeping
continue
print(f"{key:10}\t{__TRACKMAP[key]['desc']}")
def example_info(key: str) -> None:
"""Display licensing and metadata information for the given example recording.
The first time an example is requested, it will be downloaded from
the remote repository over HTTPS.
All subsequent requests will use a locally cached copy of the recording.
For a list of examples (and their keys), see `librosa.util.list_examples`.
By default, local files will be cached in the directory given by
`pooch.os_cache('librosa')`. You can override this by setting
an environment variable ``LIBROSA_DATA_DIR`` prior to importing librosa.
Parameters
----------
key : str
The identifier for the recording (see `list_examples`)
See Also
--------
librosa.util.example
librosa.util.list_examples
pooch.os_cache
"""
if key not in __TRACKMAP:
raise ParameterError(f"Unknown example key: {key}")
license_file = __GOODBOY.fetch(__TRACKMAP[key]["path"] + ".txt")
with open(license_file, "r") as fdesc:
print(f"{key:10s}\t{__TRACKMAP[key]['desc']:s}")
print("-" * 68)
for line in fdesc:
print(line)
def find_files(
directory: Union[str, os.PathLike[Any]],
*,
ext: Optional[Union[str, List[str]]] = None,
recurse: bool = True,
case_sensitive: bool = False,
limit: Optional[int] = None,
offset: int = 0,
) -> List[str]:
"""Get a sorted list of (audio) files in a directory or directory sub-tree.
Examples
--------
>>> # Get all audio files in a directory sub-tree
>>> files = librosa.util.find_files('~/Music')
>>> # Look only within a specific directory, not the sub-tree
>>> files = librosa.util.find_files('~/Music', recurse=False)
>>> # Only look for mp3 files
>>> files = librosa.util.find_files('~/Music', ext='mp3')
>>> # Or just mp3 and ogg
>>> files = librosa.util.find_files('~/Music', ext=['mp3', 'ogg'])
>>> # Only get the first 10 files
>>> files = librosa.util.find_files('~/Music', limit=10)
>>> # Or last 10 files
>>> files = librosa.util.find_files('~/Music', offset=-10)
>>> # Avoid including search patterns in the path string
>>> import glob
>>> directory = '~/[202206] Music'
>>> directory = glob.escape(directory) # Escape the special characters
>>> files = librosa.util.find_files(directory)
Parameters
----------
directory : str
Path to look for files
ext : str or list of str
A file extension or list of file extensions to include in the search.
Default: ``['aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav']``
recurse : boolean
If ``True``, then all subfolders of ``directory`` will be searched.
Otherwise, only ``directory`` will be searched.
case_sensitive : boolean
If ``False``, files matching upper-case version of
extensions will be included.
limit : int > 0 or None
Return at most ``limit`` files. If ``None``, all files are returned.
offset : int
Return files starting at ``offset`` within the list.
Use negative values to offset from the end of the list.
Returns
-------
files : list of str
The list of audio files.
"""
if ext is None:
ext = ["aac", "au", "flac", "m4a", "mp3", "ogg", "wav"]
elif isinstance(ext, str):
ext = [ext]
# Cast into a set
ext = set(ext)
# Generate upper-case versions
if not case_sensitive:
# Force to lower-case
ext = {e.lower() for e in ext}
# Add in upper-case versions
ext |= {e.upper() for e in ext}
fileset = set()
if recurse:
for walk in os.walk(directory): # type: ignore
fileset |= __get_files(walk[0], ext)
else:
fileset = __get_files(directory, ext)
files = list(fileset)
files.sort()
files = files[offset:]
if limit is not None:
files = files[:limit]
return files
def __get_files(dir_name: Union[str, os.PathLike[Any]], extensions: Set[str]):
"""Get a list of files in a single directory"""
# Expand out the directory
dir_name = os.path.abspath(os.path.expanduser(dir_name))
myfiles = set()
for sub_ext in extensions:
globstr = os.path.join(dir_name, "*" + os.path.extsep + sub_ext)
myfiles |= set(glob.glob(globstr))
return myfiles
def cite(version: Optional[str]=None) -> str:
"""Print the citation information for librosa.
Parameters
----------
version : str or None
The version of librosa to cite. If None, the current version is used.
Returns
-------
doi : str
The DOI for the given version of librosa.
Raises
------
ParameterError
If the requested version is not found in the citation index.
Examples
--------
>>> librosa.cite("0.10.1")
"https://doi.org/10.5281/zenodo.8252662"
"""
if version is None:
version = librosa_version
version_data = __GOODBOY.fetch("version_index.msgpack")
with open(version_data, "rb") as fdesc:
version_index = msgpack.load(fdesc)
if version not in version_index:
if "dev" in version:
raise ParameterError(f"Version {version} is not yet released and therefore does not yet have a citable DOI.")
else:
raise ParameterError(f"Version {version} not found in the citation index")
return f"https://doi.org/{version_index[version]}"

View File

@@ -0,0 +1,381 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Matching functions"""
import numpy as np
import numba
from .exceptions import ParameterError
from .utils import valid_intervals
from .._typing import _SequenceLike
__all__ = ["match_intervals", "match_events"]
@numba.jit(nopython=True, cache=True) # type: ignore
def __jaccard(int_a: np.ndarray, int_b: np.ndarray): # pragma: no cover
"""Jaccard similarity between two intervals
Parameters
----------
int_a, int_b : np.ndarrays, shape=(2,)
Returns
-------
Jaccard similarity between intervals
"""
ends = [int_a[1], int_b[1]]
if ends[1] < ends[0]:
ends.reverse()
starts = [int_a[0], int_b[0]]
if starts[1] < starts[0]:
starts.reverse()
intersection = ends[0] - starts[1]
if intersection < 0:
intersection = 0.0
union = ends[1] - starts[0]
if union > 0:
return intersection / union
return 0.0
@numba.jit(nopython=True, cache=True)
def __match_interval_overlaps(query, intervals_to, candidates): # pragma: no cover
"""Find the best Jaccard match from query to candidates"""
best_score = -1
best_idx = -1
for idx in candidates:
score = __jaccard(query, intervals_to[idx])
if score > best_score:
best_score, best_idx = score, idx
return best_idx
@numba.jit(nopython=True, cache=True) # type: ignore
def __match_intervals(
intervals_from: np.ndarray, intervals_to: np.ndarray, strict: bool = True
) -> np.ndarray: # pragma: no cover
"""Numba-accelerated interval matching algorithm."""
# sort index of the interval starts
start_index = np.argsort(intervals_to[:, 0])
# sort index of the interval ends
end_index = np.argsort(intervals_to[:, 1])
# and sorted values of starts
start_sorted = intervals_to[start_index, 0]
# and ends
end_sorted = intervals_to[end_index, 1]
search_ends = np.searchsorted(start_sorted, intervals_from[:, 1], side="right")
search_starts = np.searchsorted(end_sorted, intervals_from[:, 0], side="left")
output = np.empty(len(intervals_from), dtype=numba.uint32)
for i in range(len(intervals_from)):
query = intervals_from[i]
# Find the intervals that start after our query ends
after_query = search_ends[i]
# And the intervals that end after our query begins
before_query = search_starts[i]
# Candidates for overlapping have to (end after we start) and (begin before we end)
candidates = set(start_index[:after_query]) & set(end_index[before_query:])
# Proceed as before
if len(candidates) > 0:
output[i] = __match_interval_overlaps(query, intervals_to, candidates)
elif strict:
# Numba only lets us use compile-time constants in exception messages
raise ParameterError
else:
# Find the closest interval
# (start_index[after_query] - query[1]) is the distance to the next interval
# (query[0] - end_index[before_query])
dist_before = np.inf
dist_after = np.inf
if search_starts[i] > 0:
dist_before = query[0] - end_sorted[search_starts[i] - 1]
if search_ends[i] + 1 < len(intervals_to):
dist_after = start_sorted[search_ends[i] + 1] - query[1]
if dist_before < dist_after:
output[i] = end_index[search_starts[i] - 1]
else:
output[i] = start_index[search_ends[i] + 1]
return output
def match_intervals(
intervals_from: np.ndarray, intervals_to: np.ndarray, strict: bool = True
) -> np.ndarray:
"""Match one set of time intervals to another.
This can be useful for tasks such as mapping beat timings
to segments.
Each element ``[a, b]`` of ``intervals_from`` is matched to the
element ``[c, d]`` of ``intervals_to`` which maximizes the
Jaccard similarity between the intervals::
max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)|
In ``strict=True`` mode, if there is no interval with positive
intersection with ``[a,b]``, an exception is thrown.
In ``strict=False`` mode, any interval ``[a, b]`` that has no
intersection with any element of ``intervals_to`` is instead
matched to the interval ``[c, d]`` which minimizes::
min(|b - c|, |a - d|)
that is, the disjoint interval [c, d] with a boundary closest
to [a, b].
.. note:: An element of ``intervals_to`` may be matched to multiple
entries of ``intervals_from``.
Parameters
----------
intervals_from : np.ndarray [shape=(n, 2)]
The time range for source intervals.
The ``i`` th interval spans time ``intervals_from[i, 0]``
to ``intervals_from[i, 1]``.
``intervals_from[0, 0]`` should be 0, ``intervals_from[-1, 1]``
should be the track duration.
intervals_to : np.ndarray [shape=(m, 2)]
Analogous to ``intervals_from``.
strict : bool
If ``True``, intervals can only match if they intersect.
If ``False``, disjoint intervals can match.
Returns
-------
interval_mapping : np.ndarray [shape=(n,)]
For each interval in ``intervals_from``, the
corresponding interval in ``intervals_to``.
See Also
--------
match_events
Raises
------
ParameterError
If either array of input intervals is not the correct shape
If ``strict=True`` and some element of ``intervals_from`` is disjoint from
every element of ``intervals_to``.
Examples
--------
>>> ints_from = np.array([[3, 5], [1, 4], [4, 5]])
>>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]])
>>> librosa.util.match_intervals(ints_from, ints_to)
array([2, 1, 2], dtype=uint32)
>>> # [3, 5] => [4, 5] (ints_to[2])
>>> # [1, 4] => [1, 3] (ints_to[1])
>>> # [4, 5] => [4, 5] (ints_to[2])
The reverse matching of the above is not possible in ``strict`` mode
because ``[6, 7]`` is disjoint from all intervals in ``ints_from``.
With ``strict=False``, we get the following:
>>> librosa.util.match_intervals(ints_to, ints_from, strict=False)
array([1, 1, 2, 2], dtype=uint32)
>>> # [0, 2] => [1, 4] (ints_from[1])
>>> # [1, 3] => [1, 4] (ints_from[1])
>>> # [4, 5] => [4, 5] (ints_from[2])
>>> # [6, 7] => [4, 5] (ints_from[2])
"""
if len(intervals_from) == 0 or len(intervals_to) == 0:
raise ParameterError("Attempting to match empty interval list")
# Verify that the input intervals has correct shape and size
valid_intervals(intervals_from)
valid_intervals(intervals_to)
try:
# Suppress type check because of numba wrapper
return __match_intervals(intervals_from, intervals_to, strict=strict) # type: ignore
except ParameterError as exc:
raise ParameterError(f"Unable to match intervals with strict={strict}") from exc
def match_events(
events_from: _SequenceLike,
events_to: _SequenceLike,
left: bool = True,
right: bool = True,
) -> np.ndarray:
"""Match one set of events to another.
This is useful for tasks such as matching beats to the nearest
detected onsets, or frame-aligned events to the nearest zero-crossing.
.. note:: A target event may be matched to multiple source events.
Examples
--------
>>> # Sources are multiples of 7
>>> s_from = np.arange(0, 100, 7)
>>> s_from
array([ 0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91,
98])
>>> # Targets are multiples of 10
>>> s_to = np.arange(0, 100, 10)
>>> s_to
array([ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90])
>>> # Find the matching
>>> idx = librosa.util.match_events(s_from, s_to)
>>> idx
array([0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9])
>>> # Print each source value to its matching target
>>> zip(s_from, s_to[idx])
[(0, 0), (7, 10), (14, 10), (21, 20), (28, 30), (35, 30),
(42, 40), (49, 50), (56, 60), (63, 60), (70, 70), (77, 80),
(84, 80), (91, 90), (98, 90)]
Parameters
----------
events_from : ndarray [shape=(n,)]
Array of events (eg, times, sample or frame indices) to match from.
events_to : ndarray [shape=(m,)]
Array of events (eg, times, sample or frame indices) to
match against.
left : bool
right : bool
If ``False``, then matched events cannot be to the left (or right)
of source events.
Returns
-------
event_mapping : np.ndarray [shape=(n,)]
For each event in ``events_from``, the corresponding event
index in ``events_to``::
event_mapping[i] == arg min |events_from[i] - events_to[:]|
See Also
--------
match_intervals
Raises
------
ParameterError
If either array of input events is not the correct shape
"""
if len(events_from) == 0 or len(events_to) == 0:
raise ParameterError("Attempting to match empty event list")
# If we can't match left or right, then only strict equivalence
# counts as a match.
if not (left or right) and not np.all(np.isin(events_from, events_to)):
raise ParameterError(
"Cannot match events with left=right=False "
"and events_from is not contained "
"in events_to"
)
# If we can't match to the left, then there should be at least one
# target event greater-equal to every source event
if (not left) and max(events_to) < max(events_from):
raise ParameterError(
"Cannot match events with left=False "
"and max(events_to) < max(events_from)"
)
# If we can't match to the right, then there should be at least one
# target event less-equal to every source event
if (not right) and min(events_to) > min(events_from):
raise ParameterError(
"Cannot match events with right=False "
"and min(events_to) > min(events_from)"
)
# array of matched items
output = np.empty_like(events_from, dtype=np.int32)
# Suppress type check because of numba
return __match_events_helper(output, events_from, events_to, left, right) # type: ignore
@numba.jit(nopython=True, cache=True) # type: ignore
def __match_events_helper(
output: np.ndarray,
events_from: np.ndarray,
events_to: np.ndarray,
left: bool = True,
right: bool = True,
): # pragma: no cover
# mock dictionary for events
from_idx = np.argsort(events_from)
sorted_from = events_from[from_idx]
to_idx = np.argsort(events_to)
sorted_to = events_to[to_idx]
# find the matching indices
matching_indices = np.searchsorted(sorted_to, sorted_from)
# iterate over indices in matching_indices
for ind, middle_ind in enumerate(matching_indices):
left_flag = False
right_flag = False
left_ind = -1
right_ind = len(matching_indices)
left_diff = 0
right_diff = 0
mid_diff = 0
middle_ind = matching_indices[ind]
sorted_from_num = sorted_from[ind]
# Prevent oob from chosen index
if middle_ind == len(sorted_to):
middle_ind -= 1
# Permitted to look to the left
if left and middle_ind > 0:
left_ind = middle_ind - 1
left_flag = True
# Permitted to look to right
if right and middle_ind < len(sorted_to) - 1:
right_ind = middle_ind + 1
right_flag = True
mid_diff = abs(sorted_to[middle_ind] - sorted_from_num)
if left and left_flag:
left_diff = abs(sorted_to[left_ind] - sorted_from_num)
if right and right_flag:
right_diff = abs(sorted_to[right_ind] - sorted_from_num)
if left_flag and (
not right
and (sorted_to[middle_ind] > sorted_from_num)
or (not right_flag and left_diff < mid_diff)
or (left_diff < right_diff and left_diff < mid_diff)
):
output[ind] = to_idx[left_ind]
# Check if right should be chosen
elif right_flag and (right_diff < mid_diff):
output[ind] = to_idx[right_ind]
# Selected index wins
else:
output[ind] = to_idx[middle_ind]
# Undo sorting
solutions = np.empty_like(output)
solutions[from_idx] = output
return solutions

File diff suppressed because it is too large Load Diff