This commit is contained in:
2026-04-09 21:54:18 +02:00
commit ad33255b88
8906 changed files with 1437726 additions and 0 deletions

View File

@@ -0,0 +1,542 @@
""":module: watchdog.events
:synopsis: File system events and event handlers.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
Event Classes
-------------
.. autoclass:: FileSystemEvent
:members:
:show-inheritance:
:inherited-members:
.. autoclass:: FileSystemMovedEvent
:members:
:show-inheritance:
.. autoclass:: FileMovedEvent
:members:
:show-inheritance:
.. autoclass:: DirMovedEvent
:members:
:show-inheritance:
.. autoclass:: FileModifiedEvent
:members:
:show-inheritance:
.. autoclass:: DirModifiedEvent
:members:
:show-inheritance:
.. autoclass:: FileCreatedEvent
:members:
:show-inheritance:
.. autoclass:: FileClosedEvent
:members:
:show-inheritance:
.. autoclass:: FileClosedNoWriteEvent
:members:
:show-inheritance:
.. autoclass:: FileOpenedEvent
:members:
:show-inheritance:
.. autoclass:: DirCreatedEvent
:members:
:show-inheritance:
.. autoclass:: FileDeletedEvent
:members:
:show-inheritance:
.. autoclass:: DirDeletedEvent
:members:
:show-inheritance:
Event Handler Classes
---------------------
.. autoclass:: FileSystemEventHandler
:members:
:show-inheritance:
.. autoclass:: PatternMatchingEventHandler
:members:
:show-inheritance:
.. autoclass:: RegexMatchingEventHandler
:members:
:show-inheritance:
.. autoclass:: LoggingEventHandler
:members:
:show-inheritance:
"""
from __future__ import annotations
import logging
import os.path
import re
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
from watchdog.utils.patterns import match_any_paths
if TYPE_CHECKING:
from collections.abc import Generator
EVENT_TYPE_MOVED = "moved"
EVENT_TYPE_DELETED = "deleted"
EVENT_TYPE_CREATED = "created"
EVENT_TYPE_MODIFIED = "modified"
EVENT_TYPE_CLOSED = "closed"
EVENT_TYPE_CLOSED_NO_WRITE = "closed_no_write"
EVENT_TYPE_OPENED = "opened"
@dataclass(unsafe_hash=True)
class FileSystemEvent:
"""Immutable type that represents a file system event that is triggered
when a change occurs on the monitored file system.
All FileSystemEvent objects are required to be immutable and hence
can be used as keys in dictionaries or be added to sets.
"""
src_path: bytes | str
dest_path: bytes | str = ""
event_type: str = field(default="", init=False)
is_directory: bool = field(default=False, init=False)
"""
True if event was synthesized; False otherwise.
These are events that weren't actually broadcast by the OS, but
are presumed to have happened based on other, actual events.
"""
is_synthetic: bool = field(default=False)
class FileSystemMovedEvent(FileSystemEvent):
"""File system event representing any kind of file system movement."""
event_type = EVENT_TYPE_MOVED
# File events.
class FileDeletedEvent(FileSystemEvent):
"""File system event representing file deletion on the file system."""
event_type = EVENT_TYPE_DELETED
class FileModifiedEvent(FileSystemEvent):
"""File system event representing file modification on the file system."""
event_type = EVENT_TYPE_MODIFIED
class FileCreatedEvent(FileSystemEvent):
"""File system event representing file creation on the file system."""
event_type = EVENT_TYPE_CREATED
class FileMovedEvent(FileSystemMovedEvent):
"""File system event representing file movement on the file system."""
class FileClosedEvent(FileSystemEvent):
"""File system event representing file close on the file system."""
event_type = EVENT_TYPE_CLOSED
class FileClosedNoWriteEvent(FileSystemEvent):
"""File system event representing an unmodified file close on the file system."""
event_type = EVENT_TYPE_CLOSED_NO_WRITE
class FileOpenedEvent(FileSystemEvent):
"""File system event representing file close on the file system."""
event_type = EVENT_TYPE_OPENED
# Directory events.
class DirDeletedEvent(FileSystemEvent):
"""File system event representing directory deletion on the file system."""
event_type = EVENT_TYPE_DELETED
is_directory = True
class DirModifiedEvent(FileSystemEvent):
"""File system event representing directory modification on the file system."""
event_type = EVENT_TYPE_MODIFIED
is_directory = True
class DirCreatedEvent(FileSystemEvent):
"""File system event representing directory creation on the file system."""
event_type = EVENT_TYPE_CREATED
is_directory = True
class DirMovedEvent(FileSystemMovedEvent):
"""File system event representing directory movement on the file system."""
is_directory = True
class FileSystemEventHandler:
"""Base file system event handler that you can override methods from."""
def dispatch(self, event: FileSystemEvent) -> None:
"""Dispatches events to the appropriate methods.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
self.on_any_event(event)
getattr(self, f"on_{event.event_type}")(event)
def on_any_event(self, event: FileSystemEvent) -> None:
"""Catch-all event handler.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
def on_moved(self, event: DirMovedEvent | FileMovedEvent) -> None:
"""Called when a file or a directory is moved or renamed.
:param event:
Event representing file/directory movement.
:type event:
:class:`DirMovedEvent` or :class:`FileMovedEvent`
"""
def on_created(self, event: DirCreatedEvent | FileCreatedEvent) -> None:
"""Called when a file or directory is created.
:param event:
Event representing file/directory creation.
:type event:
:class:`DirCreatedEvent` or :class:`FileCreatedEvent`
"""
def on_deleted(self, event: DirDeletedEvent | FileDeletedEvent) -> None:
"""Called when a file or directory is deleted.
:param event:
Event representing file/directory deletion.
:type event:
:class:`DirDeletedEvent` or :class:`FileDeletedEvent`
"""
def on_modified(self, event: DirModifiedEvent | FileModifiedEvent) -> None:
"""Called when a file or directory is modified.
:param event:
Event representing file/directory modification.
:type event:
:class:`DirModifiedEvent` or :class:`FileModifiedEvent`
"""
def on_closed(self, event: FileClosedEvent) -> None:
"""Called when a file opened for writing is closed.
:param event:
Event representing file closing.
:type event:
:class:`FileClosedEvent`
"""
def on_closed_no_write(self, event: FileClosedNoWriteEvent) -> None:
"""Called when a file opened for reading is closed.
:param event:
Event representing file closing.
:type event:
:class:`FileClosedNoWriteEvent`
"""
def on_opened(self, event: FileOpenedEvent) -> None:
"""Called when a file is opened.
:param event:
Event representing file opening.
:type event:
:class:`FileOpenedEvent`
"""
class PatternMatchingEventHandler(FileSystemEventHandler):
"""Matches given patterns with file paths associated with occurring events.
Uses pathlib's `PurePath.match()` method. `patterns` and `ignore_patterns`
are expected to be a list of strings.
"""
def __init__(
self,
*,
patterns: list[str] | None = None,
ignore_patterns: list[str] | None = None,
ignore_directories: bool = False,
case_sensitive: bool = False,
):
super().__init__()
self._patterns = patterns
self._ignore_patterns = ignore_patterns
self._ignore_directories = ignore_directories
self._case_sensitive = case_sensitive
@property
def patterns(self) -> list[str] | None:
"""(Read-only)
Patterns to allow matching event paths.
"""
return self._patterns
@property
def ignore_patterns(self) -> list[str] | None:
"""(Read-only)
Patterns to ignore matching event paths.
"""
return self._ignore_patterns
@property
def ignore_directories(self) -> bool:
"""(Read-only)
``True`` if directories should be ignored; ``False`` otherwise.
"""
return self._ignore_directories
@property
def case_sensitive(self) -> bool:
"""(Read-only)
``True`` if path names should be matched sensitive to case; ``False``
otherwise.
"""
return self._case_sensitive
def dispatch(self, event: FileSystemEvent) -> None:
"""Dispatches events to the appropriate methods.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
if self.ignore_directories and event.is_directory:
return
paths = []
if hasattr(event, "dest_path"):
paths.append(os.fsdecode(event.dest_path))
if event.src_path:
paths.append(os.fsdecode(event.src_path))
if match_any_paths(
paths,
included_patterns=self.patterns,
excluded_patterns=self.ignore_patterns,
case_sensitive=self.case_sensitive,
):
super().dispatch(event)
class RegexMatchingEventHandler(FileSystemEventHandler):
"""Matches given regexes with file paths associated with occurring events.
Uses the `re` module.
"""
def __init__(
self,
*,
regexes: list[str] | None = None,
ignore_regexes: list[str] | None = None,
ignore_directories: bool = False,
case_sensitive: bool = False,
):
super().__init__()
if regexes is None:
regexes = [r".*"]
elif isinstance(regexes, str):
regexes = [regexes]
if ignore_regexes is None:
ignore_regexes = []
if case_sensitive:
self._regexes = [re.compile(r) for r in regexes]
self._ignore_regexes = [re.compile(r) for r in ignore_regexes]
else:
self._regexes = [re.compile(r, re.IGNORECASE) for r in regexes]
self._ignore_regexes = [re.compile(r, re.IGNORECASE) for r in ignore_regexes]
self._ignore_directories = ignore_directories
self._case_sensitive = case_sensitive
@property
def regexes(self) -> list[re.Pattern[str]]:
"""(Read-only)
Regexes to allow matching event paths.
"""
return self._regexes
@property
def ignore_regexes(self) -> list[re.Pattern[str]]:
"""(Read-only)
Regexes to ignore matching event paths.
"""
return self._ignore_regexes
@property
def ignore_directories(self) -> bool:
"""(Read-only)
``True`` if directories should be ignored; ``False`` otherwise.
"""
return self._ignore_directories
@property
def case_sensitive(self) -> bool:
"""(Read-only)
``True`` if path names should be matched sensitive to case; ``False``
otherwise.
"""
return self._case_sensitive
def dispatch(self, event: FileSystemEvent) -> None:
"""Dispatches events to the appropriate methods.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
if self.ignore_directories and event.is_directory:
return
paths = []
if hasattr(event, "dest_path"):
paths.append(os.fsdecode(event.dest_path))
if event.src_path:
paths.append(os.fsdecode(event.src_path))
if any(r.match(p) for r in self.ignore_regexes for p in paths):
return
if any(r.match(p) for r in self.regexes for p in paths):
super().dispatch(event)
class LoggingEventHandler(FileSystemEventHandler):
"""Logs all the events captured."""
def __init__(self, *, logger: logging.Logger | None = None) -> None:
super().__init__()
self.logger = logger or logging.root
def on_moved(self, event: DirMovedEvent | FileMovedEvent) -> None:
super().on_moved(event)
what = "directory" if event.is_directory else "file"
self.logger.info("Moved %s: from %s to %s", what, event.src_path, event.dest_path)
def on_created(self, event: DirCreatedEvent | FileCreatedEvent) -> None:
super().on_created(event)
what = "directory" if event.is_directory else "file"
self.logger.info("Created %s: %s", what, event.src_path)
def on_deleted(self, event: DirDeletedEvent | FileDeletedEvent) -> None:
super().on_deleted(event)
what = "directory" if event.is_directory else "file"
self.logger.info("Deleted %s: %s", what, event.src_path)
def on_modified(self, event: DirModifiedEvent | FileModifiedEvent) -> None:
super().on_modified(event)
what = "directory" if event.is_directory else "file"
self.logger.info("Modified %s: %s", what, event.src_path)
def on_closed(self, event: FileClosedEvent) -> None:
super().on_closed(event)
self.logger.info("Closed modified file: %s", event.src_path)
def on_closed_no_write(self, event: FileClosedNoWriteEvent) -> None:
super().on_closed_no_write(event)
self.logger.info("Closed read file: %s", event.src_path)
def on_opened(self, event: FileOpenedEvent) -> None:
super().on_opened(event)
self.logger.info("Opened file: %s", event.src_path)
def generate_sub_moved_events(
src_dir_path: bytes | str,
dest_dir_path: bytes | str,
) -> Generator[DirMovedEvent | FileMovedEvent]:
"""Generates an event list of :class:`DirMovedEvent` and
:class:`FileMovedEvent` objects for all the files and directories within
the given moved directory that were moved along with the directory.
:param src_dir_path:
The source path of the moved directory.
:param dest_dir_path:
The destination path of the moved directory.
:returns:
An iterable of file system events of type :class:`DirMovedEvent` and
:class:`FileMovedEvent`.
"""
for root, directories, filenames in os.walk(dest_dir_path): # type: ignore[type-var]
for directory in directories:
full_path = os.path.join(root, directory) # type: ignore[call-overload]
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else ""
yield DirMovedEvent(renamed_path, full_path, is_synthetic=True)
for filename in filenames:
full_path = os.path.join(root, filename) # type: ignore[call-overload]
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else ""
yield FileMovedEvent(renamed_path, full_path, is_synthetic=True)
def generate_sub_created_events(src_dir_path: bytes | str) -> Generator[DirCreatedEvent | FileCreatedEvent]:
"""Generates an event list of :class:`DirCreatedEvent` and
:class:`FileCreatedEvent` objects for all the files and directories within
the given moved directory that were moved along with the directory.
:param src_dir_path:
The source path of the created directory.
:returns:
An iterable of file system events of type :class:`DirCreatedEvent` and
:class:`FileCreatedEvent`.
"""
for root, directories, filenames in os.walk(src_dir_path): # type: ignore[type-var]
for directory in directories:
full_path = os.path.join(root, directory) # type: ignore[call-overload]
yield DirCreatedEvent(full_path, is_synthetic=True)
for filename in filenames:
full_path = os.path.join(root, filename) # type: ignore[call-overload]
yield FileCreatedEvent(full_path, is_synthetic=True)

View File

@@ -0,0 +1,91 @@
""":module: watchdog.observers
:synopsis: Observer that picks a native implementation if available.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
Classes
=======
.. autoclass:: Observer
:members:
:show-inheritance:
:inherited-members:
Observer thread that schedules watching directories and dispatches
calls to event handlers.
You can also import platform specific classes directly and use it instead
of :class:`Observer`. Here is a list of implemented observer classes.:
============== ================================ ==============================
Class Platforms Note
============== ================================ ==============================
|Inotify| Linux 2.6.13+ ``inotify(7)`` based observer
|FSEvents| macOS FSEvents based observer
|Kqueue| macOS and BSD with kqueue(2) ``kqueue(2)`` based observer
|WinApi| Microsoft Windows Windows API-based observer
|Polling| Any fallback implementation
============== ================================ ==============================
.. |Inotify| replace:: :class:`.inotify.InotifyObserver`
.. |FSEvents| replace:: :class:`.fsevents.FSEventsObserver`
.. |Kqueue| replace:: :class:`.kqueue.KqueueObserver`
.. |WinApi| replace:: :class:`.read_directory_changes.WindowsApiObserver`
.. |Polling| replace:: :class:`.polling.PollingObserver`
"""
from __future__ import annotations
import contextlib
import warnings
from typing import TYPE_CHECKING, Protocol
from watchdog.utils import UnsupportedLibcError, platform
if TYPE_CHECKING:
from watchdog.observers.api import BaseObserver
class ObserverType(Protocol):
def __call__(self, *, timeout: float = ...) -> BaseObserver: ...
def _get_observer_cls() -> ObserverType:
if platform.is_linux():
with contextlib.suppress(UnsupportedLibcError):
from watchdog.observers.inotify import InotifyObserver
return InotifyObserver
elif platform.is_darwin():
try:
from watchdog.observers.fsevents import FSEventsObserver
except Exception:
try:
from watchdog.observers.kqueue import KqueueObserver
except Exception:
warnings.warn("Failed to import fsevents and kqueue. Fall back to polling.", stacklevel=1)
else:
warnings.warn("Failed to import fsevents. Fall back to kqueue", stacklevel=1)
return KqueueObserver
else:
return FSEventsObserver
elif platform.is_windows():
try:
from watchdog.observers.read_directory_changes import WindowsApiObserver
except Exception:
warnings.warn("Failed to import `read_directory_changes`. Fall back to polling.", stacklevel=1)
else:
return WindowsApiObserver
elif platform.is_bsd():
from watchdog.observers.kqueue import KqueueObserver
return KqueueObserver
from watchdog.observers.polling import PollingObserver
return PollingObserver
Observer = _get_observer_cls()
__all__ = ["Observer"]

View File

@@ -0,0 +1,392 @@
from __future__ import annotations
import contextlib
import queue
import threading
from collections import defaultdict
from pathlib import Path
from typing import TYPE_CHECKING
from watchdog.utils import BaseThread
from watchdog.utils.bricks import SkipRepeatsQueue
if TYPE_CHECKING:
from watchdog.events import FileSystemEvent, FileSystemEventHandler
DEFAULT_EMITTER_TIMEOUT = 1.0 # in seconds
DEFAULT_OBSERVER_TIMEOUT = 1.0 # in seconds
class EventQueue(SkipRepeatsQueue):
"""Thread-safe event queue based on a special queue that skips adding
the same event (:class:`FileSystemEvent`) multiple times consecutively.
Thus avoiding dispatching multiple event handling
calls when multiple identical events are produced quicker than an observer
can consume them.
"""
class ObservedWatch:
"""An scheduled watch.
:param path:
Path string.
:param recursive:
``True`` if watch is recursive; ``False`` otherwise.
:param event_filter:
Optional collection of :class:`watchdog.events.FileSystemEvent` to watch
"""
def __init__(self, path: str | Path, *, recursive: bool, event_filter: list[type[FileSystemEvent]] | None = None):
self._path = str(path) if isinstance(path, Path) else path
self._is_recursive = recursive
self._event_filter = frozenset(event_filter) if event_filter is not None else None
@property
def path(self) -> str:
"""The path that this watch monitors."""
return self._path
@property
def is_recursive(self) -> bool:
"""Determines whether subdirectories are watched for the path."""
return self._is_recursive
@property
def event_filter(self) -> frozenset[type[FileSystemEvent]] | None:
"""Collection of event types watched for the path"""
return self._event_filter
@property
def key(self) -> tuple[str, bool, frozenset[type[FileSystemEvent]] | None]:
return self.path, self.is_recursive, self.event_filter
def __eq__(self, watch: object) -> bool:
if not isinstance(watch, ObservedWatch):
return NotImplemented
return self.key == watch.key
def __ne__(self, watch: object) -> bool:
if not isinstance(watch, ObservedWatch):
return NotImplemented
return self.key != watch.key
def __hash__(self) -> int:
return hash(self.key)
def __repr__(self) -> str:
if self.event_filter is not None:
event_filter_str = "|".join(sorted(_cls.__name__ for _cls in self.event_filter))
event_filter_str = f", event_filter={event_filter_str}"
else:
event_filter_str = ""
return f"<{type(self).__name__}: path={self.path!r}, is_recursive={self.is_recursive}{event_filter_str}>"
# Observer classes
class EventEmitter(BaseThread):
"""Producer thread base class subclassed by event emitters
that generate events and populate a queue with them.
:param event_queue:
The event queue to populate with generated events.
:type event_queue:
:class:`watchdog.events.EventQueue`
:param watch:
The watch to observe and produce events for.
:type watch:
:class:`ObservedWatch`
:param timeout:
Timeout (in seconds) between successive attempts at reading events.
:type timeout:
``float``
:param event_filter:
Collection of event types to emit, or None for no filtering (default).
:type event_filter:
Iterable[:class:`watchdog.events.FileSystemEvent`] | None
"""
def __init__(
self,
event_queue: EventQueue,
watch: ObservedWatch,
*,
timeout: float = DEFAULT_EMITTER_TIMEOUT,
event_filter: list[type[FileSystemEvent]] | None = None,
) -> None:
super().__init__()
self._event_queue = event_queue
self._watch = watch
self._timeout = timeout
self._event_filter = frozenset(event_filter) if event_filter is not None else None
@property
def timeout(self) -> float:
"""Blocking timeout for reading events."""
return self._timeout
@property
def watch(self) -> ObservedWatch:
"""The watch associated with this emitter."""
return self._watch
def queue_event(self, event: FileSystemEvent) -> None:
"""Queues a single event.
:param event:
Event to be queued.
:type event:
An instance of :class:`watchdog.events.FileSystemEvent`
or a subclass.
"""
if self._event_filter is None or any(isinstance(event, cls) for cls in self._event_filter):
self._event_queue.put((event, self.watch))
def queue_events(self, timeout: float) -> None:
"""Override this method to populate the event queue with events
per interval period.
:param timeout:
Timeout (in seconds) between successive attempts at
reading events.
:type timeout:
``float``
"""
def run(self) -> None:
while self.should_keep_running():
self.queue_events(self.timeout)
class EventDispatcher(BaseThread):
"""Consumer thread base class subclassed by event observer threads
that dispatch events from an event queue to appropriate event handlers.
:param timeout:
Timeout value (in seconds) passed to emitters
constructions in the child class BaseObserver.
:type timeout:
``float``
"""
stop_event = object()
"""Event inserted into the queue to signal a requested stop."""
def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None:
super().__init__()
self._event_queue = EventQueue()
self._timeout = timeout
@property
def timeout(self) -> float:
"""Timeout value to construct emitters with."""
return self._timeout
def stop(self) -> None:
BaseThread.stop(self)
with contextlib.suppress(queue.Full):
self.event_queue.put_nowait(EventDispatcher.stop_event)
@property
def event_queue(self) -> EventQueue:
"""The event queue which is populated with file system events
by emitters and from which events are dispatched by a dispatcher
thread.
"""
return self._event_queue
def dispatch_events(self, event_queue: EventQueue) -> None:
"""Override this method to consume events from an event queue, blocking
on the queue for the specified timeout before raising :class:`queue.Empty`.
:param event_queue:
Event queue to populate with one set of events.
:type event_queue:
:class:`EventQueue`
:raises:
:class:`queue.Empty`
"""
def run(self) -> None:
while self.should_keep_running():
try:
self.dispatch_events(self.event_queue)
except queue.Empty:
continue
class BaseObserver(EventDispatcher):
"""Base observer."""
def __init__(self, emitter_class: type[EventEmitter], *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None:
super().__init__(timeout=timeout)
self._emitter_class = emitter_class
self._lock = threading.RLock()
self._watches: set[ObservedWatch] = set()
self._handlers: defaultdict[ObservedWatch, set[FileSystemEventHandler]] = defaultdict(set)
self._emitters: set[EventEmitter] = set()
self._emitter_for_watch: dict[ObservedWatch, EventEmitter] = {}
def _add_emitter(self, emitter: EventEmitter) -> None:
self._emitter_for_watch[emitter.watch] = emitter
self._emitters.add(emitter)
def _remove_emitter(self, emitter: EventEmitter) -> None:
del self._emitter_for_watch[emitter.watch]
self._emitters.remove(emitter)
emitter.stop()
with contextlib.suppress(RuntimeError):
emitter.join()
def _clear_emitters(self) -> None:
for emitter in self._emitters:
emitter.stop()
for emitter in self._emitters:
with contextlib.suppress(RuntimeError):
emitter.join()
self._emitters.clear()
self._emitter_for_watch.clear()
def _add_handler_for_watch(self, event_handler: FileSystemEventHandler, watch: ObservedWatch) -> None:
self._handlers[watch].add(event_handler)
def _remove_handlers_for_watch(self, watch: ObservedWatch) -> None:
del self._handlers[watch]
@property
def emitters(self) -> set[EventEmitter]:
"""Returns event emitter created by this observer."""
return self._emitters
def start(self) -> None:
for emitter in self._emitters.copy():
try:
emitter.start()
except Exception:
self._remove_emitter(emitter)
raise
super().start()
def schedule(
self,
event_handler: FileSystemEventHandler,
path: str,
*,
recursive: bool = False,
event_filter: list[type[FileSystemEvent]] | None = None,
) -> ObservedWatch:
"""Schedules watching a path and calls appropriate methods specified
in the given event handler in response to file system events.
:param event_handler:
An event handler instance that has appropriate event handling
methods which will be called by the observer in response to
file system events.
:type event_handler:
:class:`watchdog.events.FileSystemEventHandler` or a subclass
:param path:
Directory path that will be monitored.
:type path:
``str``
:param recursive:
``True`` if events will be emitted for sub-directories
traversed recursively; ``False`` otherwise.
:type recursive:
``bool``
:param event_filter:
Collection of event types to emit, or None for no filtering (default).
:type event_filter:
Iterable[:class:`watchdog.events.FileSystemEvent`] | None
:return:
An :class:`ObservedWatch` object instance representing
a watch.
"""
with self._lock:
watch = ObservedWatch(path, recursive=recursive, event_filter=event_filter)
self._add_handler_for_watch(event_handler, watch)
# If we don't have an emitter for this watch already, create it.
if watch not in self._emitter_for_watch:
emitter = self._emitter_class(self.event_queue, watch, timeout=self.timeout, event_filter=event_filter)
if self.is_alive():
emitter.start()
self._add_emitter(emitter)
self._watches.add(watch)
return watch
def add_handler_for_watch(self, event_handler: FileSystemEventHandler, watch: ObservedWatch) -> None:
"""Adds a handler for the given watch.
:param event_handler:
An event handler instance that has appropriate event handling
methods which will be called by the observer in response to
file system events.
:type event_handler:
:class:`watchdog.events.FileSystemEventHandler` or a subclass
:param watch:
The watch to add a handler for.
:type watch:
An instance of :class:`ObservedWatch` or a subclass of
:class:`ObservedWatch`
"""
with self._lock:
self._add_handler_for_watch(event_handler, watch)
def remove_handler_for_watch(self, event_handler: FileSystemEventHandler, watch: ObservedWatch) -> None:
"""Removes a handler for the given watch.
:param event_handler:
An event handler instance that has appropriate event handling
methods which will be called by the observer in response to
file system events.
:type event_handler:
:class:`watchdog.events.FileSystemEventHandler` or a subclass
:param watch:
The watch to remove a handler for.
:type watch:
An instance of :class:`ObservedWatch` or a subclass of
:class:`ObservedWatch`
"""
with self._lock:
self._handlers[watch].remove(event_handler)
def unschedule(self, watch: ObservedWatch) -> None:
"""Unschedules a watch.
:param watch:
The watch to unschedule.
:type watch:
An instance of :class:`ObservedWatch` or a subclass of
:class:`ObservedWatch`
"""
with self._lock:
emitter = self._emitter_for_watch[watch]
del self._handlers[watch]
self._remove_emitter(emitter)
self._watches.remove(watch)
def unschedule_all(self) -> None:
"""Unschedules all watches and detaches all associated event handlers."""
with self._lock:
self._handlers.clear()
self._clear_emitters()
self._watches.clear()
def on_thread_stop(self) -> None:
self.unschedule_all()
def dispatch_events(self, event_queue: EventQueue) -> None:
entry = event_queue.get(block=True)
if entry is EventDispatcher.stop_event:
return
event, watch = entry
with self._lock:
# To allow unschedule/stop and safe removal of event handlers
# within event handlers itself, check if the handler is still
# registered after every dispatch.
for handler in self._handlers[watch].copy():
if handler in self._handlers[watch]:
handler.dispatch(event)
event_queue.task_done()

View File

@@ -0,0 +1,339 @@
""":module: watchdog.observers.fsevents
:synopsis: FSEvents based emitter implementation.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
:platforms: macOS
"""
from __future__ import annotations
import logging
import os
import threading
import time
import unicodedata
from typing import TYPE_CHECKING
import _watchdog_fsevents as _fsevents
from watchdog.events import (
DirCreatedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
FileCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
generate_sub_created_events,
generate_sub_moved_events,
)
from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter
from watchdog.utils.dirsnapshot import DirectorySnapshot
if TYPE_CHECKING:
from watchdog.events import FileSystemEvent, FileSystemEventHandler
from watchdog.observers.api import EventQueue, ObservedWatch
logger = logging.getLogger("fsevents")
class FSEventsEmitter(EventEmitter):
"""macOS FSEvents Emitter class.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:param event_filter:
Collection of event types to emit, or None for no filtering (default).
:param suppress_history:
The FSEvents API may emit historic events up to 30 sec before the watch was
started. When ``suppress_history`` is ``True``, those events will be suppressed
by creating a directory snapshot of the watched path before starting the stream
as a reference to suppress old events. Warning: This may result in significant
memory usage in case of a large number of items in the watched path.
:type timeout:
``float``
"""
def __init__(
self,
event_queue: EventQueue,
watch: ObservedWatch,
*,
timeout: float = DEFAULT_EMITTER_TIMEOUT,
event_filter: list[type[FileSystemEvent]] | None = None,
suppress_history: bool = False,
) -> None:
super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter)
self._fs_view: set[int] = set()
self.suppress_history = suppress_history
self._start_time = 0.0
self._starting_state: DirectorySnapshot | None = None
self._lock = threading.Lock()
self._absolute_watch_path = os.path.realpath(os.path.abspath(os.path.expanduser(self.watch.path)))
def on_thread_stop(self) -> None:
_fsevents.remove_watch(self.watch)
_fsevents.stop(self)
def queue_event(self, event: FileSystemEvent) -> None:
# fsevents defaults to be recursive, so if the watch was meant to be non-recursive then we need to drop
# all the events here which do not have a src_path / dest_path that matches the watched path
if self._watch.is_recursive or not self._is_recursive_event(event):
logger.debug("queue_event %s", event)
EventEmitter.queue_event(self, event)
else:
logger.debug("drop event %s", event)
def _is_recursive_event(self, event: FileSystemEvent) -> bool:
src_path = event.src_path if event.is_directory else os.path.dirname(event.src_path)
if src_path == self._absolute_watch_path:
return False
if isinstance(event, (FileMovedEvent, DirMovedEvent)):
# when moving something into the watch path we must always take the dirname,
# otherwise we miss out on `DirMovedEvent`s
dest_path = os.path.dirname(event.dest_path)
if dest_path == self._absolute_watch_path:
return False
return True
def _queue_created_event(self, event: FileSystemEvent, src_path: bytes | str, dirname: bytes | str) -> None:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(dirname))
def _queue_deleted_event(self, event: FileSystemEvent, src_path: bytes | str, dirname: bytes | str) -> None:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(dirname))
def _queue_modified_event(self, event: FileSystemEvent, src_path: bytes | str, dirname: bytes | str) -> None:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
def _queue_renamed_event(
self,
src_event: FileSystemEvent,
src_path: bytes | str,
dst_path: bytes | str,
src_dirname: bytes | str,
dst_dirname: bytes | str,
) -> None:
cls = DirMovedEvent if src_event.is_directory else FileMovedEvent
dst_path = self._encode_path(dst_path)
self.queue_event(cls(src_path, dst_path))
self.queue_event(DirModifiedEvent(src_dirname))
self.queue_event(DirModifiedEvent(dst_dirname))
def _is_historic_created_event(self, event: _fsevents.NativeEvent) -> bool:
# We only queue a created event if the item was created after we
# started the FSEventsStream.
in_history = event.inode in self._fs_view
if self._starting_state:
try:
old_inode = self._starting_state.inode(event.path)[0]
before_start = old_inode == event.inode
except KeyError:
before_start = False
else:
before_start = False
return in_history or before_start
@staticmethod
def _is_meta_mod(event: _fsevents.NativeEvent) -> bool:
"""Returns True if the event indicates a change in metadata."""
return event.is_inode_meta_mod or event.is_xattr_mod or event.is_owner_change
def queue_events(self, timeout: float, events: list[_fsevents.NativeEvent]) -> None: # type: ignore[override]
if logger.getEffectiveLevel() <= logging.DEBUG:
for event in events:
flags = ", ".join(attr for attr in dir(event) if getattr(event, attr) is True)
logger.debug("%s: %s", event, flags)
if time.monotonic() - self._start_time > 60:
# Event history is no longer needed, let's free some memory.
self._starting_state = None
while events:
event = events.pop(0)
src_path = self._encode_path(event.path)
src_dirname = os.path.dirname(src_path)
try:
stat = os.stat(src_path)
except OSError:
stat = None
exists = stat and stat.st_ino == event.inode
# FSevents may coalesce multiple events for the same item + path into a
# single event. However, events are never coalesced for different items at
# the same path or for the same item at different paths. Therefore, the
# event chains "removed -> created" and "created -> renamed -> removed" will
# never emit a single native event and a deleted event *always* means that
# the item no longer existed at the end of the event chain.
# Some events will have a spurious `is_created` flag set, coalesced from an
# already emitted and processed CreatedEvent. To filter those, we keep track
# of all inodes which we know to be already created. This is safer than
# keeping track of paths since paths are more likely to be reused than
# inodes.
# Likewise, some events will have a spurious `is_modified`,
# `is_inode_meta_mod` or `is_xattr_mod` flag set. We currently do not
# suppress those but could do so if the item still exists by caching the
# stat result and verifying that it did change.
if event.is_created and event.is_removed:
# Events will only be coalesced for the same item / inode.
# The sequence deleted -> created therefore cannot occur.
# Any combination with renamed cannot occur either.
if not self._is_historic_created_event(event):
self._queue_created_event(event, src_path, src_dirname)
self._fs_view.add(event.inode)
if event.is_modified or self._is_meta_mod(event):
self._queue_modified_event(event, src_path, src_dirname)
self._queue_deleted_event(event, src_path, src_dirname)
self._fs_view.discard(event.inode)
else:
if event.is_created and not self._is_historic_created_event(event):
self._queue_created_event(event, src_path, src_dirname)
self._fs_view.add(event.inode)
if event.is_modified or self._is_meta_mod(event):
self._queue_modified_event(event, src_path, src_dirname)
if event.is_renamed:
# Check if we have a corresponding destination event in the watched path.
dst_event = next(
iter(e for e in events if e.is_renamed and e.inode == event.inode),
None,
)
if dst_event:
# Item was moved within the watched folder.
logger.debug("Destination event for rename is %s", dst_event)
dst_path = self._encode_path(dst_event.path)
dst_dirname = os.path.dirname(dst_path)
self._queue_renamed_event(event, src_path, dst_path, src_dirname, dst_dirname)
self._fs_view.add(event.inode)
for sub_moved_event in generate_sub_moved_events(src_path, dst_path):
self.queue_event(sub_moved_event)
# Process any coalesced flags for the dst_event.
events.remove(dst_event)
if dst_event.is_modified or self._is_meta_mod(dst_event):
self._queue_modified_event(dst_event, dst_path, dst_dirname)
if dst_event.is_removed:
self._queue_deleted_event(dst_event, dst_path, dst_dirname)
self._fs_view.discard(dst_event.inode)
elif exists:
# This is the destination event, item was moved into the watched
# folder.
self._queue_created_event(event, src_path, src_dirname)
self._fs_view.add(event.inode)
for sub_created_event in generate_sub_created_events(src_path):
self.queue_event(sub_created_event)
else:
# This is the source event, item was moved out of the watched
# folder.
self._queue_deleted_event(event, src_path, src_dirname)
self._fs_view.discard(event.inode)
# Skip further coalesced processing.
continue
if event.is_removed:
# Won't occur together with renamed.
self._queue_deleted_event(event, src_path, src_dirname)
self._fs_view.discard(event.inode)
if event.is_root_changed:
# This will be set if root or any of its parents is renamed or deleted.
# TODO: find out new path and generate DirMovedEvent?
self.queue_event(DirDeletedEvent(self.watch.path))
logger.debug("Stopping because root path was changed")
self.stop()
self._fs_view.clear()
def events_callback(self, paths: list[bytes], inodes: list[int], flags: list[int], ids: list[int]) -> None:
"""Callback passed to FSEventStreamCreate(), it will receive all
FS events and queue them.
"""
cls = _fsevents.NativeEvent
try:
events = [
cls(path, inode, event_flags, event_id)
for path, inode, event_flags, event_id in zip(paths, inodes, flags, ids)
]
with self._lock:
self.queue_events(self.timeout, events)
except Exception:
logger.exception("Unhandled exception in fsevents callback")
def run(self) -> None:
self.pathnames = [self.watch.path]
self._start_time = time.monotonic()
try:
_fsevents.add_watch(self, self.watch, self.events_callback, self.pathnames)
_fsevents.read_events(self)
except Exception:
logger.exception("Unhandled exception in FSEventsEmitter")
def on_thread_start(self) -> None:
if self.suppress_history:
watch_path = os.fsdecode(self.watch.path) if isinstance(self.watch.path, bytes) else self.watch.path
self._starting_state = DirectorySnapshot(watch_path)
def _encode_path(self, path: bytes | str) -> bytes | str:
"""Encode path only if bytes were passed to this emitter."""
return os.fsencode(path) if isinstance(self.watch.path, bytes) else path
class FSEventsObserver(BaseObserver):
def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None:
super().__init__(FSEventsEmitter, timeout=timeout)
def schedule(
self,
event_handler: FileSystemEventHandler,
path: str,
*,
recursive: bool = False,
event_filter: list[type[FileSystemEvent]] | None = None,
) -> ObservedWatch:
# Fix for issue #26: Trace/BPT error when given a unicode path
# string. https://github.com/gorakhargosh/watchdog/issues#issue/26
if isinstance(path, str):
path = unicodedata.normalize("NFC", path)
return super().schedule(event_handler, path, recursive=recursive, event_filter=event_filter)

View File

@@ -0,0 +1,253 @@
""":module: watchdog.observers.fsevents2
:synopsis: FSEvents based emitter implementation.
:author: thomas.amland@gmail.com (Thomas Amland)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
:platforms: macOS
"""
from __future__ import annotations
import logging
import os
import queue
import unicodedata
import warnings
from threading import Thread
from typing import TYPE_CHECKING
# pyobjc
import AppKit
from FSEvents import (
CFRunLoopGetCurrent,
CFRunLoopRun,
CFRunLoopStop,
FSEventStreamCreate,
FSEventStreamInvalidate,
FSEventStreamRelease,
FSEventStreamScheduleWithRunLoop,
FSEventStreamStart,
FSEventStreamStop,
kCFAllocatorDefault,
kCFRunLoopDefaultMode,
kFSEventStreamCreateFlagFileEvents,
kFSEventStreamCreateFlagNoDefer,
kFSEventStreamEventFlagItemChangeOwner,
kFSEventStreamEventFlagItemCreated,
kFSEventStreamEventFlagItemFinderInfoMod,
kFSEventStreamEventFlagItemInodeMetaMod,
kFSEventStreamEventFlagItemIsDir,
kFSEventStreamEventFlagItemIsSymlink,
kFSEventStreamEventFlagItemModified,
kFSEventStreamEventFlagItemRemoved,
kFSEventStreamEventFlagItemRenamed,
kFSEventStreamEventFlagItemXattrMod,
kFSEventStreamEventIdSinceNow,
)
from watchdog.events import (
DirCreatedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
FileCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileSystemEvent,
)
from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter
if TYPE_CHECKING:
from typing import Callable
from watchdog.observers.api import EventQueue, ObservedWatch
logger = logging.getLogger(__name__)
message = "watchdog.observers.fsevents2 is deprecated and will be removed in a future release."
warnings.warn(message, category=DeprecationWarning, stacklevel=1)
logger.warning(message)
class FSEventsQueue(Thread):
"""Low level FSEvents client."""
def __init__(self, path: bytes | str) -> None:
Thread.__init__(self)
self._queue: queue.Queue[list[NativeEvent] | None] = queue.Queue()
self._run_loop = None
if isinstance(path, bytes):
path = os.fsdecode(path)
self._path = unicodedata.normalize("NFC", path)
context = None
latency = 1.0
self._stream_ref = FSEventStreamCreate(
kCFAllocatorDefault,
self._callback,
context,
[self._path],
kFSEventStreamEventIdSinceNow,
latency,
kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents,
)
if self._stream_ref is None:
error = "FSEvents. Could not create stream."
raise OSError(error)
def run(self) -> None:
pool = AppKit.NSAutoreleasePool.alloc().init()
self._run_loop = CFRunLoopGetCurrent()
FSEventStreamScheduleWithRunLoop(self._stream_ref, self._run_loop, kCFRunLoopDefaultMode)
if not FSEventStreamStart(self._stream_ref):
FSEventStreamInvalidate(self._stream_ref)
FSEventStreamRelease(self._stream_ref)
error = "FSEvents. Could not start stream."
raise OSError(error)
CFRunLoopRun()
FSEventStreamStop(self._stream_ref)
FSEventStreamInvalidate(self._stream_ref)
FSEventStreamRelease(self._stream_ref)
del pool
# Make sure waiting thread is notified
self._queue.put(None)
def stop(self) -> None:
if self._run_loop is not None:
CFRunLoopStop(self._run_loop)
def _callback(
self,
stream_ref: int,
client_callback_info: Callable,
num_events: int,
event_paths: list[bytes],
event_flags: list[int],
event_ids: list[int],
) -> None:
events = [NativeEvent(path, flags, _id) for path, flags, _id in zip(event_paths, event_flags, event_ids)]
logger.debug("FSEvents callback. Got %d events:", num_events)
for e in events:
logger.debug(e)
self._queue.put(events)
def read_events(self) -> list[NativeEvent] | None:
"""Returns a list or one or more events, or None if there are no more
events to be read.
"""
return self._queue.get() if self.is_alive() else None
class NativeEvent:
def __init__(self, path: bytes, flags: int, event_id: int) -> None:
self.path = path
self.flags = flags
self.event_id = event_id
self.is_created = bool(flags & kFSEventStreamEventFlagItemCreated)
self.is_removed = bool(flags & kFSEventStreamEventFlagItemRemoved)
self.is_renamed = bool(flags & kFSEventStreamEventFlagItemRenamed)
self.is_modified = bool(flags & kFSEventStreamEventFlagItemModified)
self.is_change_owner = bool(flags & kFSEventStreamEventFlagItemChangeOwner)
self.is_inode_meta_mod = bool(flags & kFSEventStreamEventFlagItemInodeMetaMod)
self.is_finder_info_mod = bool(flags & kFSEventStreamEventFlagItemFinderInfoMod)
self.is_xattr_mod = bool(flags & kFSEventStreamEventFlagItemXattrMod)
self.is_symlink = bool(flags & kFSEventStreamEventFlagItemIsSymlink)
self.is_directory = bool(flags & kFSEventStreamEventFlagItemIsDir)
@property
def _event_type(self) -> str:
if self.is_created:
return "Created"
if self.is_removed:
return "Removed"
if self.is_renamed:
return "Renamed"
if self.is_modified:
return "Modified"
if self.is_inode_meta_mod:
return "InodeMetaMod"
if self.is_xattr_mod:
return "XattrMod"
return "Unknown"
def __repr__(self) -> str:
return (
f"<{type(self).__name__}: path={self.path!r}, type={self._event_type},"
f" is_dir={self.is_directory}, flags={hex(self.flags)}, id={self.event_id}>"
)
class FSEventsEmitter(EventEmitter):
"""FSEvents based event emitter. Handles conversion of native events."""
def __init__(
self,
event_queue: EventQueue,
watch: ObservedWatch,
*,
timeout: float = DEFAULT_EMITTER_TIMEOUT,
event_filter: list[type[FileSystemEvent]] | None = None,
):
super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter)
self._fsevents = FSEventsQueue(watch.path)
self._fsevents.start()
def on_thread_stop(self) -> None:
self._fsevents.stop()
def queue_events(self, timeout: float) -> None:
events = self._fsevents.read_events()
if events is None:
return
i = 0
while i < len(events):
event = events[i]
cls: type[FileSystemEvent]
# For some reason the create and remove flags are sometimes also
# set for rename and modify type events, so let those take
# precedence.
if event.is_renamed:
# Internal moves appears to always be consecutive in the same
# buffer and have IDs differ by exactly one (while others
# don't) making it possible to pair up the two events coming
# from a single move operation. (None of this is documented!)
# Otherwise, guess whether file was moved in or out.
# TODO: handle id wrapping
if i + 1 < len(events) and events[i + 1].is_renamed and events[i + 1].event_id == event.event_id + 1:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(event.path, events[i + 1].path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
self.queue_event(DirModifiedEvent(os.path.dirname(events[i + 1].path)))
i += 1
elif os.path.exists(event.path):
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
else:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
# TODO: generate events for tree
elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(event.path))
elif event.is_created:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
elif event.is_removed:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
i += 1
class FSEventsObserver2(BaseObserver):
def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None:
super().__init__(FSEventsEmitter, timeout=timeout)

View File

@@ -0,0 +1,252 @@
""":module: watchdog.observers.inotify
:synopsis: ``inotify(7)`` based emitter implementation.
:author: Sebastien Martini <seb@dbzteam.org>
:author: Luke McCarthy <luke@iogopro.co.uk>
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: Tim Cuthbertson <tim+github@gfxmonk.net>
:author: contact@tiger-222.fr (Mickaël Schoentgen)
:platforms: Linux 2.6.13+.
.. ADMONITION:: About system requirements
Recommended minimum kernel version: 2.6.25.
Quote from the inotify(7) man page:
"Inotify was merged into the 2.6.13 Linux kernel. The required library
interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW,
IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)"
Therefore, you must ensure the system is running at least these versions
appropriate libraries and the kernel.
.. ADMONITION:: About recursiveness, event order, and event coalescing
Quote from the inotify(7) man page:
If successive output inotify events produced on the inotify file
descriptor are identical (same wd, mask, cookie, and name) then they
are coalesced into a single event if the older event has not yet been
read (but see BUGS).
The events returned by reading from an inotify file descriptor form
an ordered queue. Thus, for example, it is guaranteed that when
renaming from one directory to another, events will be produced in
the correct order on the inotify file descriptor.
...
Inotify monitoring of directories is not recursive: to monitor
subdirectories under a directory, additional watches must be created.
This emitter implementation therefore automatically adds watches for
sub-directories if running in recursive mode.
Some extremely useful articles and documentation:
.. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en
.. _intro to inotify: http://www.linuxjournal.com/article/8478
"""
from __future__ import annotations
import logging
import os
import threading
from typing import TYPE_CHECKING
from watchdog.events import (
DirCreatedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
FileClosedEvent,
FileClosedNoWriteEvent,
FileCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileOpenedEvent,
FileSystemEvent,
generate_sub_created_events,
generate_sub_moved_events,
)
from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter
from watchdog.observers.inotify_buffer import InotifyBuffer
from watchdog.observers.inotify_c import InotifyConstants
if TYPE_CHECKING:
from watchdog.observers.api import EventQueue, ObservedWatch
logger = logging.getLogger(__name__)
class InotifyEmitter(EventEmitter):
"""inotify(7)-based event emitter.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
:param event_filter:
Collection of event types to emit, or None for no filtering (default).
:type event_filter:
Iterable[:class:`watchdog.events.FileSystemEvent`] | None
"""
def __init__(
self,
event_queue: EventQueue,
watch: ObservedWatch,
*,
timeout: float = DEFAULT_EMITTER_TIMEOUT,
event_filter: list[type[FileSystemEvent]] | None = None,
) -> None:
super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter)
self._lock = threading.Lock()
self._inotify: InotifyBuffer | None = None
def on_thread_start(self) -> None:
path = os.fsencode(self.watch.path)
event_mask = self.get_event_mask_from_filter()
self._inotify = InotifyBuffer(path, recursive=self.watch.is_recursive, event_mask=event_mask)
def on_thread_stop(self) -> None:
if self._inotify:
self._inotify.close()
self._inotify = None
def queue_events(self, timeout: float, *, full_events: bool = False) -> None:
# If "full_events" is true, then the method will report unmatched move events as separate events
# This behavior is by default only called by a InotifyFullEmitter
if self._inotify is None:
logger.error("InotifyEmitter.queue_events() called when the thread is inactive")
return
with self._lock:
if self._inotify is None:
logger.error("InotifyEmitter.queue_events() called when the thread is inactive")
return
event = self._inotify.read_event()
if event is None:
return
cls: type[FileSystemEvent]
if isinstance(event, tuple):
move_from, move_to = event
src_path = self._decode_path(move_from.src_path)
dest_path = self._decode_path(move_to.src_path)
cls = DirMovedEvent if move_from.is_directory else FileMovedEvent
self.queue_event(cls(src_path, dest_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
self.queue_event(DirModifiedEvent(os.path.dirname(dest_path)))
if move_from.is_directory and self.watch.is_recursive:
for sub_moved_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_moved_event)
return
src_path = self._decode_path(event.src_path)
if event.is_moved_to:
if full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls("", src_path))
else:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
if event.is_directory and self.watch.is_recursive:
for sub_created_event in generate_sub_created_events(src_path):
self.queue_event(sub_created_event)
elif event.is_attrib or event.is_modify:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_delete or (event.is_moved_from and not full_events):
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_moved_from and full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(src_path, ""))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_create:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_delete_self and src_path == self.watch.path:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.stop()
elif not event.is_directory:
if event.is_open:
cls = FileOpenedEvent
self.queue_event(cls(src_path))
elif event.is_close_write:
cls = FileClosedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_close_nowrite:
cls = FileClosedNoWriteEvent
self.queue_event(cls(src_path))
def _decode_path(self, path: bytes | str) -> bytes | str:
"""Decode path only if unicode string was passed to this emitter."""
return path if isinstance(self.watch.path, bytes) else os.fsdecode(path)
def get_event_mask_from_filter(self) -> int | None:
"""Optimization: Only include events we are filtering in inotify call."""
if self._event_filter is None:
return None
# Always listen to delete self
event_mask = InotifyConstants.IN_DELETE_SELF
for cls in self._event_filter:
if cls in {DirMovedEvent, FileMovedEvent}:
event_mask |= InotifyConstants.IN_MOVE
elif cls in {DirCreatedEvent, FileCreatedEvent}:
event_mask |= InotifyConstants.IN_MOVE | InotifyConstants.IN_CREATE
elif cls is DirModifiedEvent:
event_mask |= (
InotifyConstants.IN_MOVE
| InotifyConstants.IN_ATTRIB
| InotifyConstants.IN_MODIFY
| InotifyConstants.IN_CREATE
| InotifyConstants.IN_CLOSE_WRITE
)
elif cls is FileModifiedEvent:
event_mask |= InotifyConstants.IN_ATTRIB | InotifyConstants.IN_MODIFY
elif cls in {DirDeletedEvent, FileDeletedEvent}:
event_mask |= InotifyConstants.IN_DELETE
elif cls is FileClosedEvent:
event_mask |= InotifyConstants.IN_CLOSE_WRITE
elif cls is FileClosedNoWriteEvent:
event_mask |= InotifyConstants.IN_CLOSE_NOWRITE
elif cls is FileOpenedEvent:
event_mask |= InotifyConstants.IN_OPEN
return event_mask
class InotifyFullEmitter(InotifyEmitter):
"""inotify(7)-based event emitter. By default this class produces move events even if they are not matched
Such move events will have a ``None`` value for the unmatched part.
"""
def queue_events(self, timeout: float, *, events: bool = True) -> None: # type: ignore[override]
super().queue_events(timeout, full_events=events)
class InotifyObserver(BaseObserver):
"""Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT, generate_full_events: bool = False) -> None:
cls = InotifyFullEmitter if generate_full_events else InotifyEmitter
super().__init__(cls, timeout=timeout)

View File

@@ -0,0 +1,102 @@
""":module: watchdog.observers.inotify_buffer
:synopsis: A wrapper for ``Inotify``.
:author: thomas.amland@gmail.com (Thomas Amland)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
:platforms: linux
"""
from __future__ import annotations
import logging
from watchdog.observers.inotify_c import Inotify, InotifyEvent
from watchdog.utils import BaseThread
from watchdog.utils.delayed_queue import DelayedQueue
logger = logging.getLogger(__name__)
class InotifyBuffer(BaseThread):
"""A wrapper for `Inotify` that holds events for `delay` seconds. During
this time, IN_MOVED_FROM and IN_MOVED_TO events are paired.
"""
delay = 0.5
def __init__(self, path: bytes, *, recursive: bool = False, event_mask: int | None = None) -> None:
super().__init__()
# XXX: Remove quotes after Python 3.9 drop
self._queue = DelayedQueue["InotifyEvent | tuple[InotifyEvent, InotifyEvent]"](self.delay)
self._inotify = Inotify(path, recursive=recursive, event_mask=event_mask)
self.start()
def read_event(self) -> InotifyEvent | tuple[InotifyEvent, InotifyEvent] | None:
"""Returns a single event or a tuple of from/to events in case of a
paired move event. If this buffer has been closed, immediately return
None.
"""
return self._queue.get()
def on_thread_stop(self) -> None:
self._inotify.close()
self._queue.close()
def close(self) -> None:
self.stop()
self.join()
def _group_events(self, event_list: list[InotifyEvent]) -> list[InotifyEvent | tuple[InotifyEvent, InotifyEvent]]:
"""Group any matching move events"""
grouped: list[InotifyEvent | tuple[InotifyEvent, InotifyEvent]] = []
for inotify_event in event_list:
logger.debug("in-event %s", inotify_event)
def matching_from_event(event: InotifyEvent | tuple[InotifyEvent, InotifyEvent]) -> bool:
return not isinstance(event, tuple) and event.is_moved_from and event.cookie == inotify_event.cookie
if inotify_event.is_moved_to:
# Check if move_from is already in the buffer
for index, event in enumerate(grouped):
if matching_from_event(event):
grouped[index] = (event, inotify_event) # type: ignore[assignment]
break
else:
# Check if move_from is in delayqueue already
from_event = self._queue.remove(matching_from_event)
if from_event is not None:
grouped.append((from_event, inotify_event)) # type: ignore[arg-type]
else:
logger.debug("could not find matching move_from event")
grouped.append(inotify_event)
else:
grouped.append(inotify_event)
return grouped
def run(self) -> None:
"""Read event from `inotify` and add them to `queue`. When reading a
IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event
and add them back to the queue as a tuple.
"""
deleted_self = False
while self.should_keep_running() and not deleted_self:
inotify_events = self._inotify.read_events()
grouped_events = self._group_events(inotify_events)
for inotify_event in grouped_events:
if not isinstance(inotify_event, tuple) and inotify_event.is_ignored:
if inotify_event.src_path == self._inotify.path:
# Watch was removed explicitly (inotify_rm_watch(2)) or automatically (file
# was deleted, or filesystem was unmounted), stop watching for events
deleted_self = True
continue
# Only add delay for unmatched move_from events
delay = not isinstance(inotify_event, tuple) and inotify_event.is_moved_from
self._queue.put(inotify_event, delay=delay)
if (
not isinstance(inotify_event, tuple)
and inotify_event.is_delete_self
and inotify_event.src_path == self._inotify.path
):
# Deleted the watched directory, stop watching for events
deleted_self = True

View File

@@ -0,0 +1,605 @@
from __future__ import annotations
import contextlib
import ctypes
import ctypes.util
import errno
import os
import select
import struct
import threading
from ctypes import c_char_p, c_int, c_uint32
from functools import reduce
from typing import TYPE_CHECKING
from watchdog.utils import UnsupportedLibcError
if TYPE_CHECKING:
from collections.abc import Generator
libc = ctypes.CDLL(None)
if not hasattr(libc, "inotify_init") or not hasattr(libc, "inotify_add_watch") or not hasattr(libc, "inotify_rm_watch"):
error = f"Unsupported libc version found: {libc._name}" # noqa:SLF001
raise UnsupportedLibcError(error)
inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(("inotify_add_watch", libc))
inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)(("inotify_rm_watch", libc))
inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(("inotify_init", libc))
class InotifyConstants:
# User-space events
IN_ACCESS = 0x00000001 # File was accessed.
IN_MODIFY = 0x00000002 # File was modified.
IN_ATTRIB = 0x00000004 # Meta-data changed.
IN_CLOSE_WRITE = 0x00000008 # Writable file was closed.
IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed.
IN_OPEN = 0x00000020 # File was opened.
IN_MOVED_FROM = 0x00000040 # File was moved from X.
IN_MOVED_TO = 0x00000080 # File was moved to Y.
IN_CREATE = 0x00000100 # Subfile was created.
IN_DELETE = 0x00000200 # Subfile was deleted.
IN_DELETE_SELF = 0x00000400 # Self was deleted.
IN_MOVE_SELF = 0x00000800 # Self was moved.
# Helper user-space events.
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves.
# Events sent by the kernel to a watch.
IN_UNMOUNT = 0x00002000 # Backing file system was unmounted.
IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed.
IN_IGNORED = 0x00008000 # File was ignored.
# Special flags.
IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory.
IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link.
IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects
IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch.
IN_ISDIR = 0x40000000 # Event occurred against directory.
IN_ONESHOT = 0x80000000 # Only send event once.
# All user-space events.
IN_ALL_EVENTS = reduce(
lambda x, y: x | y,
[
IN_ACCESS,
IN_MODIFY,
IN_ATTRIB,
IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE,
IN_OPEN,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_DELETE,
IN_CREATE,
IN_DELETE_SELF,
IN_MOVE_SELF,
],
)
# Flags for ``inotify_init1``
IN_CLOEXEC = 0x02000000
IN_NONBLOCK = 0x00004000
# Watchdog's API cares only about these events.
WATCHDOG_ALL_EVENTS = reduce(
lambda x, y: x | y,
[
InotifyConstants.IN_MODIFY,
InotifyConstants.IN_ATTRIB,
InotifyConstants.IN_MOVED_FROM,
InotifyConstants.IN_MOVED_TO,
InotifyConstants.IN_CREATE,
InotifyConstants.IN_DELETE,
InotifyConstants.IN_DELETE_SELF,
InotifyConstants.IN_DONT_FOLLOW,
InotifyConstants.IN_CLOSE_WRITE,
InotifyConstants.IN_CLOSE_NOWRITE,
InotifyConstants.IN_OPEN,
],
)
class InotifyEventStruct(ctypes.Structure):
"""Structure representation of the inotify_event structure
(used in buffer size calculations)::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
"""
_fields_ = (
("wd", c_int),
("mask", c_uint32),
("cookie", c_uint32),
("len", c_uint32),
("name", c_char_p),
)
EVENT_SIZE = ctypes.sizeof(InotifyEventStruct)
DEFAULT_NUM_EVENTS = 2048
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16)
class Inotify:
"""Linux inotify(7) API wrapper class.
:param path:
The directory path for which we want an inotify object.
:type path:
:class:`bytes`
:param recursive:
``True`` if subdirectories should be monitored; ``False`` otherwise.
"""
def __init__(self, path: bytes, *, recursive: bool = False, event_mask: int | None = None) -> None:
# The file descriptor associated with the inotify instance.
inotify_fd = inotify_init()
if inotify_fd == -1:
Inotify._raise_error()
self._inotify_fd = inotify_fd
self._lock = threading.Lock()
self._closed = False
self._is_reading = True
self._kill_r, self._kill_w = os.pipe()
# _check_inotify_fd will return true if we can read _inotify_fd without blocking
if hasattr(select, "poll"):
self._poller = select.poll()
self._poller.register(self._inotify_fd, select.POLLIN)
self._poller.register(self._kill_r, select.POLLIN)
def do_poll() -> bool:
return any(fd == self._inotify_fd for fd, _ in self._poller.poll())
self._check_inotify_fd = do_poll
else:
def do_select() -> bool:
result = select.select([self._inotify_fd, self._kill_r], [], [])
return self._inotify_fd in result[0]
self._check_inotify_fd = do_select
# Stores the watch descriptor for a given path.
self._wd_for_path: dict[bytes, int] = {}
self._path_for_wd: dict[int, bytes] = {}
self._path = path
# Default to all events
if event_mask is None:
event_mask = WATCHDOG_ALL_EVENTS
self._event_mask = event_mask
self._is_recursive = recursive
if os.path.isdir(path):
self._add_dir_watch(path, event_mask, recursive=recursive)
else:
self._add_watch(path, event_mask)
self._moved_from_events: dict[int, InotifyEvent] = {}
@property
def event_mask(self) -> int:
"""The event mask for this inotify instance."""
return self._event_mask
@property
def path(self) -> bytes:
"""The path associated with the inotify instance."""
return self._path
@property
def is_recursive(self) -> bool:
"""Whether we are watching directories recursively."""
return self._is_recursive
@property
def fd(self) -> int:
"""The file descriptor associated with the inotify instance."""
return self._inotify_fd
def clear_move_records(self) -> None:
"""Clear cached records of MOVED_FROM events"""
self._moved_from_events = {}
def source_for_move(self, destination_event: InotifyEvent) -> bytes | None:
"""The source path corresponding to the given MOVED_TO event.
If the source path is outside the monitored directories, None
is returned instead.
"""
if destination_event.cookie in self._moved_from_events:
return self._moved_from_events[destination_event.cookie].src_path
return None
def remember_move_from_event(self, event: InotifyEvent) -> None:
"""Save this event as the source event for future MOVED_TO events to
reference.
"""
self._moved_from_events[event.cookie] = event
def add_watch(self, path: bytes) -> None:
"""Adds a watch for the given path.
:param path:
Path to begin monitoring.
"""
with self._lock:
self._add_watch(path, self._event_mask)
def remove_watch(self, path: bytes) -> None:
"""Removes a watch for the given path.
:param path:
Path string for which the watch will be removed.
"""
with self._lock:
wd = self._wd_for_path.pop(path)
del self._path_for_wd[wd]
if inotify_rm_watch(self._inotify_fd, wd) == -1:
Inotify._raise_error()
def close(self) -> None:
"""Closes the inotify instance and removes all associated watches."""
with self._lock:
if not self._closed:
self._closed = True
if self._path in self._wd_for_path:
wd = self._wd_for_path[self._path]
inotify_rm_watch(self._inotify_fd, wd)
if self._is_reading:
# inotify_rm_watch() should write data to _inotify_fd and wake
# the thread, but writing to the kill channel will gaurentee this
os.write(self._kill_w, b"!")
else:
self._close_resources()
def read_events(self, *, event_buffer_size: int = DEFAULT_EVENT_BUFFER_SIZE) -> list[InotifyEvent]:
"""Reads events from inotify and yields them."""
# HACK: We need to traverse the directory path
# recursively and simulate events for newly
# created subdirectories/files. This will handle
# mkdir -p foobar/blah/bar; touch foobar/afile
def _recursive_simulate(src_path: bytes) -> list[InotifyEvent]:
events = []
for root, dirnames, filenames in os.walk(src_path):
for dirname in dirnames:
with contextlib.suppress(OSError):
full_path = os.path.join(root, dirname)
wd_dir = self._add_watch(full_path, self._event_mask)
e = InotifyEvent(
wd_dir,
InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR,
0,
dirname,
full_path,
)
events.append(e)
for filename in filenames:
full_path = os.path.join(root, filename)
wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)]
e = InotifyEvent(
wd_parent_dir,
InotifyConstants.IN_CREATE,
0,
filename,
full_path,
)
events.append(e)
return events
event_buffer = b""
while True:
try:
with self._lock:
if self._closed:
return []
self._is_reading = True
if self._check_inotify_fd():
event_buffer = os.read(self._inotify_fd, event_buffer_size)
with self._lock:
self._is_reading = False
if self._closed:
self._close_resources()
return []
except OSError as e:
if e.errno == errno.EINTR:
continue
if e.errno == errno.EBADF:
return []
raise
break
with self._lock:
event_list = []
for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer):
if wd == -1:
continue
wd_path = self._path_for_wd[wd]
src_path = os.path.join(wd_path, name) if name else wd_path # avoid trailing slash
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_moved_from:
self.remember_move_from_event(inotify_event)
elif inotify_event.is_moved_to:
move_src_path = self.source_for_move(inotify_event)
if move_src_path in self._wd_for_path:
moved_wd = self._wd_for_path[move_src_path]
del self._wd_for_path[move_src_path]
self._wd_for_path[inotify_event.src_path] = moved_wd
self._path_for_wd[moved_wd] = inotify_event.src_path
if self.is_recursive:
for _path in self._wd_for_path.copy():
if _path.startswith(move_src_path + os.path.sep.encode()):
moved_wd = self._wd_for_path.pop(_path)
_move_to_path = _path.replace(move_src_path, inotify_event.src_path)
self._wd_for_path[_move_to_path] = moved_wd
self._path_for_wd[moved_wd] = _move_to_path
src_path = os.path.join(wd_path, name)
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_ignored:
# Clean up book-keeping for deleted watches.
path = self._path_for_wd.pop(wd)
if self._wd_for_path[path] == wd:
del self._wd_for_path[path]
event_list.append(inotify_event)
if self.is_recursive and inotify_event.is_directory and inotify_event.is_create:
# TODO: When a directory from another part of the
# filesystem is moved into a watched directory, this
# will not generate events for the directory tree.
# We need to coalesce IN_MOVED_TO events and those
# IN_MOVED_TO events which don't pair up with
# IN_MOVED_FROM events should be marked IN_CREATE
# instead relative to this directory.
try:
self._add_watch(src_path, self._event_mask)
except OSError:
continue
event_list.extend(_recursive_simulate(src_path))
return event_list
def _close_resources(self) -> None:
os.close(self._inotify_fd)
os.close(self._kill_r)
os.close(self._kill_w)
# Non-synchronized methods.
def _add_dir_watch(self, path: bytes, mask: int, *, recursive: bool) -> None:
"""Adds a watch (optionally recursively) for the given directory path
to monitor events specified by the mask.
:param path:
Path to monitor
:param recursive:
``True`` to monitor recursively.
:param mask:
Event bit mask.
"""
if not os.path.isdir(path):
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
self._add_watch(path, mask)
if recursive:
for root, dirnames, _ in os.walk(path):
for dirname in dirnames:
full_path = os.path.join(root, dirname)
if os.path.islink(full_path):
continue
self._add_watch(full_path, mask)
def _add_watch(self, path: bytes, mask: int) -> int:
"""Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:param mask:
Event bit mask.
"""
wd = inotify_add_watch(self._inotify_fd, path, mask)
if wd == -1:
Inotify._raise_error()
self._wd_for_path[path] = wd
self._path_for_wd[wd] = path
return wd
@staticmethod
def _raise_error() -> None:
"""Raises errors for inotify failures."""
err = ctypes.get_errno()
if err == errno.ENOSPC:
raise OSError(errno.ENOSPC, "inotify watch limit reached")
if err == errno.EMFILE:
raise OSError(errno.EMFILE, "inotify instance limit reached")
if err != errno.EACCES:
raise OSError(err, os.strerror(err))
@staticmethod
def _parse_event_buffer(event_buffer: bytes) -> Generator[tuple[int, int, int, bytes]]:
"""Parses an event buffer of ``inotify_event`` structs returned by
inotify::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
The ``cookie`` member of this struct is used to pair two related
events, for example, it pairs an IN_MOVED_FROM event with an
IN_MOVED_TO event.
"""
i = 0
while i + 16 <= len(event_buffer):
wd, mask, cookie, length = struct.unpack_from("iIII", event_buffer, i)
name = event_buffer[i + 16 : i + 16 + length].rstrip(b"\0")
i += 16 + length
yield wd, mask, cookie, name
class InotifyEvent:
"""Inotify event struct wrapper.
:param wd:
Watch descriptor
:param mask:
Event mask
:param cookie:
Event cookie
:param name:
Base name of the event source path.
:param src_path:
Full event source path.
"""
def __init__(self, wd: int, mask: int, cookie: int, name: bytes, src_path: bytes) -> None:
self._wd = wd
self._mask = mask
self._cookie = cookie
self._name = name
self._src_path = src_path
@property
def src_path(self) -> bytes:
return self._src_path
@property
def wd(self) -> int:
return self._wd
@property
def mask(self) -> int:
return self._mask
@property
def cookie(self) -> int:
return self._cookie
@property
def name(self) -> bytes:
return self._name
@property
def is_modify(self) -> bool:
return self._mask & InotifyConstants.IN_MODIFY > 0
@property
def is_close_write(self) -> bool:
return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0
@property
def is_close_nowrite(self) -> bool:
return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0
@property
def is_open(self) -> bool:
return self._mask & InotifyConstants.IN_OPEN > 0
@property
def is_access(self) -> bool:
return self._mask & InotifyConstants.IN_ACCESS > 0
@property
def is_delete(self) -> bool:
return self._mask & InotifyConstants.IN_DELETE > 0
@property
def is_delete_self(self) -> bool:
return self._mask & InotifyConstants.IN_DELETE_SELF > 0
@property
def is_create(self) -> bool:
return self._mask & InotifyConstants.IN_CREATE > 0
@property
def is_moved_from(self) -> bool:
return self._mask & InotifyConstants.IN_MOVED_FROM > 0
@property
def is_moved_to(self) -> bool:
return self._mask & InotifyConstants.IN_MOVED_TO > 0
@property
def is_move(self) -> bool:
return self._mask & InotifyConstants.IN_MOVE > 0
@property
def is_move_self(self) -> bool:
return self._mask & InotifyConstants.IN_MOVE_SELF > 0
@property
def is_attrib(self) -> bool:
return self._mask & InotifyConstants.IN_ATTRIB > 0
@property
def is_ignored(self) -> bool:
return self._mask & InotifyConstants.IN_IGNORED > 0
@property
def is_directory(self) -> bool:
# It looks like the kernel does not provide this information for
# IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir.
# See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897
return self.is_delete_self or self.is_move_self or self._mask & InotifyConstants.IN_ISDIR > 0
@property
def key(self) -> tuple[bytes, int, int, int, bytes]:
return self._src_path, self._wd, self._mask, self._cookie, self._name
def __eq__(self, inotify_event: object) -> bool:
if not isinstance(inotify_event, InotifyEvent):
return NotImplemented
return self.key == inotify_event.key
def __ne__(self, inotify_event: object) -> bool:
if not isinstance(inotify_event, InotifyEvent):
return NotImplemented
return self.key != inotify_event.key
def __hash__(self) -> int:
return hash(self.key)
@staticmethod
def _get_mask_string(mask: int) -> str:
masks = []
for c in dir(InotifyConstants):
if c.startswith("IN_") and c not in {"IN_ALL_EVENTS", "IN_MOVE"}:
c_val = getattr(InotifyConstants, c)
if mask & c_val:
masks.append(c)
return "|".join(masks)
def __repr__(self) -> str:
return (
f"<{type(self).__name__}: src_path={self.src_path!r}, wd={self.wd},"
f" mask={self._get_mask_string(self.mask)}, cookie={self.cookie},"
f" name={os.fsdecode(self.name)!r}>"
)

View File

@@ -0,0 +1,655 @@
""":module: watchdog.observers.kqueue
:synopsis: ``kqueue(2)`` based emitter implementation.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
:platforms: macOS and BSD with kqueue(2).
.. WARNING:: kqueue is a very heavyweight way to monitor file systems.
Each kqueue-detected directory modification triggers
a full directory scan. Traversing the entire directory tree
and opening file descriptors for all files will create
performance problems. We need to find a way to re-scan
only those directories which report changes and do a diff
between two sub-DirectorySnapshots perhaps.
.. ADMONITION:: About OS X performance guidelines
Quote from the `macOS File System Performance Guidelines`_:
"When you only want to track changes on a file or directory, be sure to
open it using the ``O_EVTONLY`` flag. This flag prevents the file or
directory from being marked as open or in use. This is important
if you are tracking files on a removable volume and the user tries to
unmount the volume. With this flag in place, the system knows it can
dismiss the volume. If you had opened the files or directories without
this flag, the volume would be marked as busy and would not be
unmounted."
``O_EVTONLY`` is defined as ``0x8000`` in the OS X header files.
More information here: http://www.mlsite.net/blog/?p=2312
Classes
-------
.. autoclass:: KqueueEmitter
:members:
:show-inheritance:
Collections and Utility Classes
-------------------------------
.. autoclass:: KeventDescriptor
:members:
:show-inheritance:
.. autoclass:: KeventDescriptorSet
:members:
:show-inheritance:
.. _macOS File System Performance Guidelines:
http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html#//apple_ref/doc/uid/20001993-CJBJFIDD
"""
# The `select` module varies between platforms.
# mypy may complain about missing module attributes depending on which platform it's running on.
# The comment below disables mypy's attribute check.
# mypy: disable-error-code="attr-defined, name-defined"
from __future__ import annotations
import contextlib
import errno
import os
import os.path
import select
import threading
from stat import S_ISDIR
from typing import TYPE_CHECKING
from watchdog.events import (
EVENT_TYPE_CREATED,
EVENT_TYPE_DELETED,
EVENT_TYPE_MOVED,
DirCreatedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
FileCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
generate_sub_moved_events,
)
from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter
from watchdog.utils import platform
from watchdog.utils.dirsnapshot import DirectorySnapshot
if TYPE_CHECKING:
from collections.abc import Generator
from typing import Callable
from watchdog.events import FileSystemEvent
from watchdog.observers.api import EventQueue, ObservedWatch
# Maximum number of events to process.
MAX_EVENTS = 4096
# O_EVTONLY value from the header files for OS X only.
O_EVTONLY = 0x8000
# Pre-calculated values for the kevent filter, flags, and fflags attributes.
WATCHDOG_OS_OPEN_FLAGS = O_EVTONLY if platform.is_darwin() else os.O_RDONLY | os.O_NONBLOCK
WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE
WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR
WATCHDOG_KQ_FFLAGS = (
select.KQ_NOTE_DELETE
| select.KQ_NOTE_WRITE
| select.KQ_NOTE_EXTEND
| select.KQ_NOTE_ATTRIB
| select.KQ_NOTE_LINK
| select.KQ_NOTE_RENAME
| select.KQ_NOTE_REVOKE
)
def absolute_path(path: bytes | str) -> bytes | str:
return os.path.abspath(os.path.normpath(path))
# Flag tests.
def is_deleted(kev: select.kevent) -> bool:
"""Determines whether the given kevent represents deletion."""
return kev.fflags & select.KQ_NOTE_DELETE > 0
def is_modified(kev: select.kevent) -> bool:
"""Determines whether the given kevent represents modification."""
fflags = kev.fflags
return (fflags & select.KQ_NOTE_EXTEND > 0) or (fflags & select.KQ_NOTE_WRITE > 0)
def is_attrib_modified(kev: select.kevent) -> bool:
"""Determines whether the given kevent represents attribute modification."""
return kev.fflags & select.KQ_NOTE_ATTRIB > 0
def is_renamed(kev: select.kevent) -> bool:
"""Determines whether the given kevent represents movement."""
return kev.fflags & select.KQ_NOTE_RENAME > 0
class KeventDescriptorSet:
"""Thread-safe kevent descriptor collection."""
def __init__(self) -> None:
self._descriptors: set[KeventDescriptor] = set()
self._descriptor_for_path: dict[bytes | str, KeventDescriptor] = {}
self._descriptor_for_fd: dict[int, KeventDescriptor] = {}
self._kevents: list[select.kevent] = []
self._lock = threading.Lock()
@property
def kevents(self) -> list[select.kevent]:
"""List of kevents monitored."""
with self._lock:
return self._kevents
@property
def paths(self) -> list[bytes | str]:
"""List of paths for which kevents have been created."""
with self._lock:
return list(self._descriptor_for_path.keys())
def get_for_fd(self, fd: int) -> KeventDescriptor:
"""Given a file descriptor, returns the kevent descriptor object
for it.
:param fd:
OS file descriptor.
:type fd:
``int``
:returns:
A :class:`KeventDescriptor` object.
"""
with self._lock:
return self._descriptor_for_fd[fd]
def get(self, path: bytes | str) -> KeventDescriptor:
"""Obtains a :class:`KeventDescriptor` object for the specified path.
:param path:
Path for which the descriptor will be obtained.
"""
with self._lock:
path = absolute_path(path)
return self._get(path)
def __contains__(self, path: bytes | str) -> bool:
"""Determines whether a :class:`KeventDescriptor has been registered
for the specified path.
:param path:
Path for which the descriptor will be obtained.
"""
with self._lock:
path = absolute_path(path)
return self._has_path(path)
def add(self, path: bytes | str, *, is_directory: bool) -> None:
"""Adds a :class:`KeventDescriptor` to the collection for the given
path.
:param path:
The path for which a :class:`KeventDescriptor` object will be
added.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
with self._lock:
path = absolute_path(path)
if not self._has_path(path):
self._add_descriptor(KeventDescriptor(path, is_directory=is_directory))
def remove(self, path: bytes | str) -> None:
"""Removes the :class:`KeventDescriptor` object for the given path
if it already exists.
:param path:
Path for which the :class:`KeventDescriptor` object will be
removed.
"""
with self._lock:
path = absolute_path(path)
if self._has_path(path):
self._remove_descriptor(self._get(path))
def clear(self) -> None:
"""Clears the collection and closes all open descriptors."""
with self._lock:
for descriptor in self._descriptors:
descriptor.close()
self._descriptors.clear()
self._descriptor_for_fd.clear()
self._descriptor_for_path.clear()
self._kevents = []
# Thread-unsafe methods. Locking is provided at a higher level.
def _get(self, path: bytes | str) -> KeventDescriptor:
"""Returns a kevent descriptor for a given path."""
return self._descriptor_for_path[path]
def _has_path(self, path: bytes | str) -> bool:
"""Determines whether a :class:`KeventDescriptor` for the specified
path exists already in the collection.
"""
return path in self._descriptor_for_path
def _add_descriptor(self, descriptor: KeventDescriptor) -> None:
"""Adds a descriptor to the collection.
:param descriptor:
An instance of :class:`KeventDescriptor` to be added.
"""
self._descriptors.add(descriptor)
self._kevents.append(descriptor.kevent)
self._descriptor_for_path[descriptor.path] = descriptor
self._descriptor_for_fd[descriptor.fd] = descriptor
def _remove_descriptor(self, descriptor: KeventDescriptor) -> None:
"""Removes a descriptor from the collection.
:param descriptor:
An instance of :class:`KeventDescriptor` to be removed.
"""
self._descriptors.remove(descriptor)
del self._descriptor_for_fd[descriptor.fd]
del self._descriptor_for_path[descriptor.path]
self._kevents.remove(descriptor.kevent)
descriptor.close()
class KeventDescriptor:
"""A kevent descriptor convenience data structure to keep together:
* kevent
* directory status
* path
* file descriptor
:param path:
Path string for which a kevent descriptor will be created.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
def __init__(self, path: bytes | str, *, is_directory: bool) -> None:
self._path = absolute_path(path)
self._is_directory = is_directory
self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS)
self._kev = select.kevent(
self._fd,
filter=WATCHDOG_KQ_FILTER,
flags=WATCHDOG_KQ_EV_FLAGS,
fflags=WATCHDOG_KQ_FFLAGS,
)
@property
def fd(self) -> int:
"""OS file descriptor for the kevent descriptor."""
return self._fd
@property
def path(self) -> bytes | str:
"""The path associated with the kevent descriptor."""
return self._path
@property
def kevent(self) -> select.kevent:
"""The kevent object associated with the kevent descriptor."""
return self._kev
@property
def is_directory(self) -> bool:
"""Determines whether the kevent descriptor refers to a directory.
:returns:
``True`` or ``False``
"""
return self._is_directory
def close(self) -> None:
"""Closes the file descriptor associated with a kevent descriptor."""
with contextlib.suppress(OSError):
os.close(self.fd)
@property
def key(self) -> tuple[bytes | str, bool]:
return (self.path, self.is_directory)
def __eq__(self, descriptor: object) -> bool:
if not isinstance(descriptor, KeventDescriptor):
return NotImplemented
return self.key == descriptor.key
def __ne__(self, descriptor: object) -> bool:
if not isinstance(descriptor, KeventDescriptor):
return NotImplemented
return self.key != descriptor.key
def __hash__(self) -> int:
return hash(self.key)
def __repr__(self) -> str:
return f"<{type(self).__name__}: path={self.path!r}, is_directory={self.is_directory}>"
class KqueueEmitter(EventEmitter):
"""kqueue(2)-based event emitter.
.. ADMONITION:: About ``kqueue(2)`` behavior and this implementation
``kqueue(2)`` monitors file system events only for
open descriptors, which means, this emitter does a lot of
book-keeping behind the scenes to keep track of open
descriptors for every entry in the monitored directory tree.
This also means the number of maximum open file descriptors
on your system must be increased **manually**.
Usually, issuing a call to ``ulimit`` should suffice::
ulimit -n 1024
Ensure that you pick a number that is larger than the
number of files you expect to be monitored.
``kqueue(2)`` does not provide enough information about the
following things:
* The destination path of a file or directory that is renamed.
* Creation of a file or directory within a directory; in this
case, ``kqueue(2)`` only indicates a modified event on the
parent directory.
Therefore, this emitter takes a snapshot of the directory
tree when ``kqueue(2)`` detects a change on the file system
to be able to determine the above information.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
:param event_filter:
Collection of event types to emit, or None for no filtering (default).
:type event_filter:
Iterable[:class:`watchdog.events.FileSystemEvent`] | None
:param stat: stat function. See ``os.stat`` for details.
"""
def __init__(
self,
event_queue: EventQueue,
watch: ObservedWatch,
*,
timeout: float = DEFAULT_EMITTER_TIMEOUT,
event_filter: list[type[FileSystemEvent]] | None = None,
stat: Callable[[str], os.stat_result] = os.stat,
) -> None:
super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter)
self._kq = select.kqueue()
self._lock = threading.RLock()
# A collection of KeventDescriptor.
self._descriptors = KeventDescriptorSet()
def custom_stat(path: str, cls: KqueueEmitter = self) -> os.stat_result:
stat_info = stat(path)
cls._register_kevent(path, is_directory=S_ISDIR(stat_info.st_mode))
return stat_info
self._snapshot = DirectorySnapshot(watch.path, recursive=watch.is_recursive, stat=custom_stat)
def _register_kevent(self, path: bytes | str, *, is_directory: bool) -> None:
"""Registers a kevent descriptor for the given path.
:param path:
Path for which a kevent descriptor will be created.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
try:
self._descriptors.add(path, is_directory=is_directory)
except OSError as e:
if e.errno == errno.ENOENT:
# Probably dealing with a temporary file that was created
# and then quickly deleted before we could open
# a descriptor for it. Therefore, simply queue a sequence
# of created and deleted events for the path.
# TODO: We could simply ignore these files.
# Locked files cause the python process to die with
# a bus error when we handle temporary files.
# eg. .git/index.lock when running tig operations.
# I don't fully understand this at the moment.
pass
elif e.errno == errno.EOPNOTSUPP:
# Probably dealing with the socket or special file
# mounted through a file system that does not support
# access to it (e.g. NFS). On BSD systems look at
# EOPNOTSUPP in man 2 open.
pass
else:
# All other errors are propagated.
raise
def _unregister_kevent(self, path: bytes | str) -> None:
"""Convenience function to close the kevent descriptor for a
specified kqueue-monitored path.
:param path:
Path for which the kevent descriptor will be closed.
"""
self._descriptors.remove(path)
def queue_event(self, event: FileSystemEvent) -> None:
"""Handles queueing a single event object.
:param event:
An instance of :class:`watchdog.events.FileSystemEvent`
or a subclass.
"""
# Handles all the book keeping for queued events.
# We do not need to fire moved/deleted events for all subitems in
# a directory tree here, because this function is called by kqueue
# for all those events anyway.
EventEmitter.queue_event(self, event)
if event.event_type == EVENT_TYPE_CREATED:
self._register_kevent(event.src_path, is_directory=event.is_directory)
elif event.event_type == EVENT_TYPE_MOVED:
self._unregister_kevent(event.src_path)
self._register_kevent(event.dest_path, is_directory=event.is_directory)
elif event.event_type == EVENT_TYPE_DELETED:
self._unregister_kevent(event.src_path)
def _gen_kqueue_events(
self, kev: select.kevent, ref_snapshot: DirectorySnapshot, new_snapshot: DirectorySnapshot
) -> Generator[FileSystemEvent]:
"""Generate events from the kevent list returned from the call to
:meth:`select.kqueue.control`.
.. NOTE:: kqueue only tells us about deletions, file modifications,
attribute modifications. The other events, namely,
file creation, directory modification, file rename,
directory rename, directory creation, etc. are
determined by comparing directory snapshots.
"""
descriptor = self._descriptors.get_for_fd(kev.ident)
src_path = descriptor.path
if is_renamed(kev):
# Kqueue does not specify the destination names for renames
# to, so we have to process these using the a snapshot
# of the directory.
yield from self._gen_renamed_events(
src_path,
ref_snapshot,
new_snapshot,
is_directory=descriptor.is_directory,
)
elif is_attrib_modified(kev):
if descriptor.is_directory:
yield DirModifiedEvent(src_path)
else:
yield FileModifiedEvent(src_path)
elif is_modified(kev):
if descriptor.is_directory:
if self.watch.is_recursive or self.watch.path == src_path:
# When a directory is modified, it may be due to
# sub-file/directory renames or new file/directory
# creation. We determine all this by comparing
# snapshots later.
yield DirModifiedEvent(src_path)
else:
yield FileModifiedEvent(src_path)
elif is_deleted(kev):
if descriptor.is_directory:
yield DirDeletedEvent(src_path)
else:
yield FileDeletedEvent(src_path)
def _parent_dir_modified(self, src_path: bytes | str) -> DirModifiedEvent:
"""Helper to generate a DirModifiedEvent on the parent of src_path."""
return DirModifiedEvent(os.path.dirname(src_path))
def _gen_renamed_events(
self,
src_path: bytes | str,
ref_snapshot: DirectorySnapshot,
new_snapshot: DirectorySnapshot,
*,
is_directory: bool,
) -> Generator[FileSystemEvent]:
"""Compares information from two directory snapshots (one taken before
the rename operation and another taken right after) to determine the
destination path of the file system object renamed, and yields
the appropriate events to be queued.
"""
try:
f_inode = ref_snapshot.inode(src_path)
except KeyError:
# Probably caught a temporary file/directory that was renamed
# and deleted. Fires a sequence of created and deleted events
# for the path.
if is_directory:
yield DirCreatedEvent(src_path)
yield DirDeletedEvent(src_path)
else:
yield FileCreatedEvent(src_path)
yield FileDeletedEvent(src_path)
# We don't process any further and bail out assuming
# the event represents deletion/creation instead of movement.
return
dest_path = new_snapshot.path(f_inode)
if dest_path is not None:
dest_path = absolute_path(dest_path)
if is_directory:
yield DirMovedEvent(src_path, dest_path)
else:
yield FileMovedEvent(src_path, dest_path)
yield self._parent_dir_modified(src_path)
yield self._parent_dir_modified(dest_path)
if is_directory and self.watch.is_recursive:
# TODO: Do we need to fire moved events for the items
# inside the directory tree? Does kqueue does this
# all by itself? Check this and then enable this code
# only if it doesn't already.
# A: It doesn't. So I've enabled this block.
yield from generate_sub_moved_events(src_path, dest_path)
else:
# If the new snapshot does not have an inode for the
# old path, we haven't found the new name. Therefore,
# we mark it as deleted and remove unregister the path.
if is_directory:
yield DirDeletedEvent(src_path)
else:
yield FileDeletedEvent(src_path)
yield self._parent_dir_modified(src_path)
def _read_events(self, timeout: float) -> list[select.kevent]:
"""Reads events from a call to the blocking
:meth:`select.kqueue.control()` method.
:param timeout:
Blocking timeout for reading events.
:type timeout:
``float`` (seconds)
"""
return self._kq.control(self._descriptors.kevents, MAX_EVENTS, timeout)
def queue_events(self, timeout: float) -> None:
"""Queues events by reading them from a call to the blocking
:meth:`select.kqueue.control()` method.
:param timeout:
Blocking timeout for reading events.
:type timeout:
``float`` (seconds)
"""
with self._lock:
try:
event_list = self._read_events(timeout)
# TODO: investigate why order appears to be reversed
event_list.reverse()
# Take a fresh snapshot of the directory and update the
# saved snapshot.
new_snapshot = DirectorySnapshot(self.watch.path, recursive=self.watch.is_recursive)
ref_snapshot = self._snapshot
self._snapshot = new_snapshot
diff_events = new_snapshot - ref_snapshot
# Process events
for directory_created in diff_events.dirs_created:
self.queue_event(DirCreatedEvent(directory_created))
for file_created in diff_events.files_created:
self.queue_event(FileCreatedEvent(file_created))
for file_modified in diff_events.files_modified:
self.queue_event(FileModifiedEvent(file_modified))
for kev in event_list:
for event in self._gen_kqueue_events(kev, ref_snapshot, new_snapshot):
self.queue_event(event)
except OSError as e:
if e.errno != errno.EBADF:
raise
def on_thread_stop(self) -> None:
# Clean up.
with self._lock:
self._descriptors.clear()
self._kq.close()
class KqueueObserver(BaseObserver):
"""Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None:
super().__init__(KqueueEmitter, timeout=timeout)

View File

@@ -0,0 +1,142 @@
""":module: watchdog.observers.polling
:synopsis: Polling emitter implementation.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
Classes
-------
.. autoclass:: PollingObserver
:members:
:show-inheritance:
.. autoclass:: PollingObserverVFS
:members:
:show-inheritance:
:special-members:
"""
from __future__ import annotations
import os
import threading
from functools import partial
from typing import TYPE_CHECKING
from watchdog.events import (
DirCreatedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
FileCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
)
from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter
from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff, EmptyDirectorySnapshot
if TYPE_CHECKING:
from collections.abc import Iterator
from typing import Callable
from watchdog.events import FileSystemEvent
from watchdog.observers.api import EventQueue, ObservedWatch
class PollingEmitter(EventEmitter):
"""Platform-independent emitter that polls a directory to detect file
system changes.
"""
def __init__(
self,
event_queue: EventQueue,
watch: ObservedWatch,
*,
timeout: float = DEFAULT_EMITTER_TIMEOUT,
event_filter: list[type[FileSystemEvent]] | None = None,
stat: Callable[[str], os.stat_result] = os.stat,
listdir: Callable[[str | None], Iterator[os.DirEntry]] = os.scandir,
) -> None:
super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter)
self._snapshot: DirectorySnapshot = EmptyDirectorySnapshot()
self._lock = threading.Lock()
self._take_snapshot: Callable[[], DirectorySnapshot] = lambda: DirectorySnapshot(
self.watch.path,
recursive=self.watch.is_recursive,
stat=stat,
listdir=listdir,
)
def on_thread_start(self) -> None:
self._snapshot = self._take_snapshot()
def queue_events(self, timeout: float) -> None:
# We don't want to hit the disk continuously.
# timeout behaves like an interval for polling emitters.
if self.stopped_event.wait(timeout):
return
with self._lock:
if not self.should_keep_running():
return
# Get event diff between fresh snapshot and previous snapshot.
# Update snapshot.
try:
new_snapshot = self._take_snapshot()
except OSError:
self.queue_event(DirDeletedEvent(self.watch.path))
self.stop()
return
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
self._snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
class PollingObserver(BaseObserver):
"""Platform-independent observer that polls a directory to detect file
system changes.
"""
def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None:
super().__init__(PollingEmitter, timeout=timeout)
class PollingObserverVFS(BaseObserver):
"""File system independent observer that polls a directory to detect changes."""
def __init__(
self,
stat: Callable[[str], os.stat_result],
listdir: Callable[[str | None], Iterator[os.DirEntry]],
*,
polling_interval: int = 1,
) -> None:
""":param stat: stat function. See ``os.stat`` for details.
:param listdir: listdir function. See ``os.scandir`` for details.
:type polling_interval: int
:param polling_interval: interval in seconds between polling the file system.
"""
emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir)
super().__init__(emitter_cls, timeout=polling_interval) # type: ignore[arg-type]

View File

@@ -0,0 +1,109 @@
from __future__ import annotations
import os.path
import platform
import threading
from typing import TYPE_CHECKING
from watchdog.events import (
DirCreatedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
FileCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
generate_sub_created_events,
generate_sub_moved_events,
)
from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter
from watchdog.observers.winapi import close_directory_handle, get_directory_handle, read_events
if TYPE_CHECKING:
from ctypes.wintypes import HANDLE
from watchdog.events import FileSystemEvent
from watchdog.observers.api import EventQueue, ObservedWatch
from watchdog.observers.winapi import WinAPINativeEvent
class WindowsApiEmitter(EventEmitter):
"""Windows API-based emitter that uses ReadDirectoryChangesW
to detect file system changes for a watch.
"""
def __init__(
self,
event_queue: EventQueue,
watch: ObservedWatch,
*,
timeout: float = DEFAULT_EMITTER_TIMEOUT,
event_filter: list[type[FileSystemEvent]] | None = None,
) -> None:
super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter)
self._lock = threading.Lock()
self._whandle: HANDLE | None = None
def on_thread_start(self) -> None:
self._whandle = get_directory_handle(self.watch.path)
if platform.python_implementation() == "PyPy":
def start(self) -> None:
"""PyPy needs some time before receiving events, see #792."""
from time import sleep
super().start()
sleep(0.01)
def on_thread_stop(self) -> None:
if self._whandle:
close_directory_handle(self._whandle)
def _read_events(self) -> list[WinAPINativeEvent]:
if not self._whandle:
return []
return read_events(self._whandle, self.watch.path, recursive=self.watch.is_recursive)
def queue_events(self, timeout: float) -> None:
winapi_events = self._read_events()
with self._lock:
last_renamed_src_path = ""
for winapi_event in winapi_events:
src_path = os.path.join(self.watch.path, winapi_event.src_path)
if winapi_event.is_renamed_old:
last_renamed_src_path = src_path
elif winapi_event.is_renamed_new:
dest_path = src_path
src_path = last_renamed_src_path
if os.path.isdir(dest_path):
self.queue_event(DirMovedEvent(src_path, dest_path))
if self.watch.is_recursive:
for sub_moved_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_moved_event)
else:
self.queue_event(FileMovedEvent(src_path, dest_path))
elif winapi_event.is_modified:
self.queue_event((DirModifiedEvent if os.path.isdir(src_path) else FileModifiedEvent)(src_path))
elif winapi_event.is_added:
isdir = os.path.isdir(src_path)
self.queue_event((DirCreatedEvent if isdir else FileCreatedEvent)(src_path))
if isdir and self.watch.is_recursive:
for sub_created_event in generate_sub_created_events(src_path):
self.queue_event(sub_created_event)
elif winapi_event.is_removed:
self.queue_event(FileDeletedEvent(src_path))
elif winapi_event.is_removed_self:
self.queue_event(DirDeletedEvent(self.watch.path))
self.stop()
class WindowsApiObserver(BaseObserver):
"""Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None:
super().__init__(WindowsApiEmitter, timeout=timeout)

View File

@@ -0,0 +1,382 @@
""":module: watchdog.observers.winapi
:synopsis: Windows API-Python interface (removes dependency on ``pywin32``).
:author: theller@ctypes.org (Thomas Heller)
:author: will@willmcgugan.com (Will McGugan)
:author: ryan@rfk.id.au (Ryan Kelly)
:author: yesudeep@gmail.com (Yesudeep Mangalapilly)
:author: thomas.amland@gmail.com (Thomas Amland)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
:platforms: windows
"""
from __future__ import annotations
import contextlib
import ctypes
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPCWSTR, LPVOID, LPWSTR
from dataclasses import dataclass
from functools import reduce
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
# Invalid handle value.
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
# File notification constants.
FILE_NOTIFY_CHANGE_FILE_NAME = 0x01
FILE_NOTIFY_CHANGE_DIR_NAME = 0x02
FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04
FILE_NOTIFY_CHANGE_SIZE = 0x08
FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010
FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020
FILE_NOTIFY_CHANGE_CREATION = 0x040
FILE_NOTIFY_CHANGE_SECURITY = 0x0100
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_LIST_DIRECTORY = 1
FILE_SHARE_READ = 0x01
FILE_SHARE_WRITE = 0x02
FILE_SHARE_DELETE = 0x04
OPEN_EXISTING = 3
VOLUME_NAME_NT = 0x02
# File action constants.
FILE_ACTION_CREATED = 1
FILE_ACTION_DELETED = 2
FILE_ACTION_MODIFIED = 3
FILE_ACTION_RENAMED_OLD_NAME = 4
FILE_ACTION_RENAMED_NEW_NAME = 5
FILE_ACTION_DELETED_SELF = 0xFFFE
FILE_ACTION_OVERFLOW = 0xFFFF
# Aliases
FILE_ACTION_ADDED = FILE_ACTION_CREATED
FILE_ACTION_REMOVED = FILE_ACTION_DELETED
FILE_ACTION_REMOVED_SELF = FILE_ACTION_DELETED_SELF
THREAD_TERMINATE = 0x0001
# IO waiting constants.
WAIT_ABANDONED = 0x00000080
WAIT_IO_COMPLETION = 0x000000C0
WAIT_OBJECT_0 = 0x00000000
WAIT_TIMEOUT = 0x00000102
# Error codes
ERROR_OPERATION_ABORTED = 995
class OVERLAPPED(ctypes.Structure):
_fields_ = (
("Internal", LPVOID),
("InternalHigh", LPVOID),
("Offset", DWORD),
("OffsetHigh", DWORD),
("Pointer", LPVOID),
("hEvent", HANDLE),
)
def _errcheck_bool(value: Any | None, func: Any, args: Any) -> Any:
if not value:
raise ctypes.WinError() # type: ignore[attr-defined]
return args
def _errcheck_handle(value: Any | None, func: Any, args: Any) -> Any:
if not value:
raise ctypes.WinError() # type: ignore[attr-defined]
if value == INVALID_HANDLE_VALUE:
raise ctypes.WinError() # type: ignore[attr-defined]
return args
def _errcheck_dword(value: Any | None, func: Any, args: Any) -> Any:
if value == 0xFFFFFFFF:
raise ctypes.WinError() # type: ignore[attr-defined]
return args
kernel32 = ctypes.WinDLL("kernel32") # type: ignore[attr-defined]
ReadDirectoryChangesW = kernel32.ReadDirectoryChangesW
ReadDirectoryChangesW.restype = BOOL
ReadDirectoryChangesW.errcheck = _errcheck_bool
ReadDirectoryChangesW.argtypes = (
HANDLE, # hDirectory
LPVOID, # lpBuffer
DWORD, # nBufferLength
BOOL, # bWatchSubtree
DWORD, # dwNotifyFilter
ctypes.POINTER(DWORD), # lpBytesReturned
ctypes.POINTER(OVERLAPPED), # lpOverlapped
LPVOID, # FileIOCompletionRoutine # lpCompletionRoutine
)
CreateFileW = kernel32.CreateFileW
CreateFileW.restype = HANDLE
CreateFileW.errcheck = _errcheck_handle
CreateFileW.argtypes = (
LPCWSTR, # lpFileName
DWORD, # dwDesiredAccess
DWORD, # dwShareMode
LPVOID, # lpSecurityAttributes
DWORD, # dwCreationDisposition
DWORD, # dwFlagsAndAttributes
HANDLE, # hTemplateFile
)
CloseHandle = kernel32.CloseHandle
CloseHandle.restype = BOOL
CloseHandle.argtypes = (HANDLE,) # hObject
CancelIoEx = kernel32.CancelIoEx
CancelIoEx.restype = BOOL
CancelIoEx.errcheck = _errcheck_bool
CancelIoEx.argtypes = (
HANDLE, # hObject
ctypes.POINTER(OVERLAPPED), # lpOverlapped
)
CreateEvent = kernel32.CreateEventW
CreateEvent.restype = HANDLE
CreateEvent.errcheck = _errcheck_handle
CreateEvent.argtypes = (
LPVOID, # lpEventAttributes
BOOL, # bManualReset
BOOL, # bInitialState
LPCWSTR, # lpName
)
SetEvent = kernel32.SetEvent
SetEvent.restype = BOOL
SetEvent.errcheck = _errcheck_bool
SetEvent.argtypes = (HANDLE,) # hEvent
WaitForSingleObjectEx = kernel32.WaitForSingleObjectEx
WaitForSingleObjectEx.restype = DWORD
WaitForSingleObjectEx.errcheck = _errcheck_dword
WaitForSingleObjectEx.argtypes = (
HANDLE, # hObject
DWORD, # dwMilliseconds
BOOL, # bAlertable
)
CreateIoCompletionPort = kernel32.CreateIoCompletionPort
CreateIoCompletionPort.restype = HANDLE
CreateIoCompletionPort.errcheck = _errcheck_handle
CreateIoCompletionPort.argtypes = (
HANDLE, # FileHandle
HANDLE, # ExistingCompletionPort
LPVOID, # CompletionKey
DWORD, # NumberOfConcurrentThreads
)
GetQueuedCompletionStatus = kernel32.GetQueuedCompletionStatus
GetQueuedCompletionStatus.restype = BOOL
GetQueuedCompletionStatus.errcheck = _errcheck_bool
GetQueuedCompletionStatus.argtypes = (
HANDLE, # CompletionPort
LPVOID, # lpNumberOfBytesTransferred
LPVOID, # lpCompletionKey
ctypes.POINTER(OVERLAPPED), # lpOverlapped
DWORD, # dwMilliseconds
)
PostQueuedCompletionStatus = kernel32.PostQueuedCompletionStatus
PostQueuedCompletionStatus.restype = BOOL
PostQueuedCompletionStatus.errcheck = _errcheck_bool
PostQueuedCompletionStatus.argtypes = (
HANDLE, # CompletionPort
DWORD, # lpNumberOfBytesTransferred
DWORD, # lpCompletionKey
ctypes.POINTER(OVERLAPPED), # lpOverlapped
)
GetFinalPathNameByHandleW = kernel32.GetFinalPathNameByHandleW
GetFinalPathNameByHandleW.restype = DWORD
GetFinalPathNameByHandleW.errcheck = _errcheck_dword
GetFinalPathNameByHandleW.argtypes = (
HANDLE, # hFile
LPWSTR, # lpszFilePath
DWORD, # cchFilePath
DWORD, # DWORD
)
class FileNotifyInformation(ctypes.Structure):
_fields_ = (
("NextEntryOffset", DWORD),
("Action", DWORD),
("FileNameLength", DWORD),
("FileName", (ctypes.c_char * 1)),
)
LPFNI = ctypes.POINTER(FileNotifyInformation)
# We don't need to recalculate these flags every time a call is made to
# the win32 API functions.
WATCHDOG_FILE_FLAGS = FILE_FLAG_BACKUP_SEMANTICS
WATCHDOG_FILE_SHARE_FLAGS = reduce(
lambda x, y: x | y,
[
FILE_SHARE_READ,
FILE_SHARE_WRITE,
FILE_SHARE_DELETE,
],
)
WATCHDOG_FILE_NOTIFY_FLAGS = reduce(
lambda x, y: x | y,
[
FILE_NOTIFY_CHANGE_FILE_NAME,
FILE_NOTIFY_CHANGE_DIR_NAME,
FILE_NOTIFY_CHANGE_ATTRIBUTES,
FILE_NOTIFY_CHANGE_SIZE,
FILE_NOTIFY_CHANGE_LAST_WRITE,
FILE_NOTIFY_CHANGE_SECURITY,
FILE_NOTIFY_CHANGE_LAST_ACCESS,
FILE_NOTIFY_CHANGE_CREATION,
],
)
# ReadDirectoryChangesW buffer length.
# To handle cases with lot of changes, this seems the highest safest value we can use.
# Note: it will fail with ERROR_INVALID_PARAMETER when it is greater than 64 KB and
# the application is monitoring a directory over the network.
# This is due to a packet size limitation with the underlying file sharing protocols.
# https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw#remarks
BUFFER_SIZE = 64000
# Buffer length for path-related stuff.
# Introduced to keep the old behavior when we bumped BUFFER_SIZE from 2048 to 64000 in v1.0.0.
PATH_BUFFER_SIZE = 2048
def _parse_event_buffer(read_buffer: bytes, n_bytes: int) -> list[tuple[int, str]]:
results = []
while n_bytes > 0:
fni = ctypes.cast(read_buffer, LPFNI)[0] # type: ignore[arg-type]
ptr = ctypes.addressof(fni) + FileNotifyInformation.FileName.offset
filename = ctypes.string_at(ptr, fni.FileNameLength)
results.append((fni.Action, filename.decode("utf-16")))
num_to_skip = fni.NextEntryOffset
if num_to_skip <= 0:
break
read_buffer = read_buffer[num_to_skip:]
n_bytes -= num_to_skip # num_to_skip is long. n_bytes should be long too.
return results
def _is_observed_path_deleted(handle: HANDLE, path: str) -> bool:
# Comparison of observed path and actual path, returned by
# GetFinalPathNameByHandleW. If directory moved to the trash bin, or
# deleted, actual path will not be equal to observed path.
buff = ctypes.create_unicode_buffer(PATH_BUFFER_SIZE)
GetFinalPathNameByHandleW(handle, buff, PATH_BUFFER_SIZE, VOLUME_NAME_NT)
return buff.value != path
def _generate_observed_path_deleted_event() -> tuple[bytes, int]:
# Create synthetic event for notify that observed directory is deleted
path = ctypes.create_unicode_buffer(".")
event = FileNotifyInformation(0, FILE_ACTION_DELETED_SELF, len(path), path.value.encode("utf-8"))
event_size = ctypes.sizeof(event)
buff = ctypes.create_string_buffer(PATH_BUFFER_SIZE)
ctypes.memmove(buff, ctypes.addressof(event), event_size)
return buff.raw, event_size
def get_directory_handle(path: str) -> HANDLE:
"""Returns a Windows handle to the specified directory path."""
return CreateFileW(
path,
FILE_LIST_DIRECTORY,
WATCHDOG_FILE_SHARE_FLAGS,
None,
OPEN_EXISTING,
WATCHDOG_FILE_FLAGS,
None,
)
def close_directory_handle(handle: HANDLE) -> None:
try:
CancelIoEx(handle, None) # force ReadDirectoryChangesW to return
CloseHandle(handle)
except OSError:
with contextlib.suppress(Exception):
CloseHandle(handle)
def read_directory_changes(handle: HANDLE, path: str, *, recursive: bool) -> tuple[bytes, int]:
"""Read changes to the directory using the specified directory handle.
https://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html
"""
event_buffer = ctypes.create_string_buffer(BUFFER_SIZE)
nbytes = DWORD()
try:
ReadDirectoryChangesW(
handle,
ctypes.byref(event_buffer),
len(event_buffer),
recursive,
WATCHDOG_FILE_NOTIFY_FLAGS,
ctypes.byref(nbytes),
None,
None,
)
except OSError as e:
if e.winerror == ERROR_OPERATION_ABORTED: # type: ignore[attr-defined]
return event_buffer.raw, 0
# Handle the case when the root path is deleted
if _is_observed_path_deleted(handle, path):
return _generate_observed_path_deleted_event()
raise
return event_buffer.raw, int(nbytes.value)
@dataclass(unsafe_hash=True)
class WinAPINativeEvent:
action: int
src_path: str
@property
def is_added(self) -> bool:
return self.action == FILE_ACTION_CREATED
@property
def is_removed(self) -> bool:
return self.action == FILE_ACTION_REMOVED
@property
def is_modified(self) -> bool:
return self.action == FILE_ACTION_MODIFIED
@property
def is_renamed_old(self) -> bool:
return self.action == FILE_ACTION_RENAMED_OLD_NAME
@property
def is_renamed_new(self) -> bool:
return self.action == FILE_ACTION_RENAMED_NEW_NAME
@property
def is_removed_self(self) -> bool:
return self.action == FILE_ACTION_REMOVED_SELF
def read_events(handle: HANDLE, path: str, *, recursive: bool) -> list[WinAPINativeEvent]:
buf, nbytes = read_directory_changes(handle, path, recursive=recursive)
events = _parse_event_buffer(buf, nbytes)
return [WinAPINativeEvent(action, src_path) for action, src_path in events]

View File

@@ -0,0 +1,293 @@
""":module: watchdog.tricks
:synopsis: Utility event handlers.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
Classes
-------
.. autoclass:: Trick
:members:
:show-inheritance:
.. autoclass:: LoggerTrick
:members:
:show-inheritance:
.. autoclass:: ShellCommandTrick
:members:
:show-inheritance:
.. autoclass:: AutoRestartTrick
:members:
:show-inheritance:
"""
from __future__ import annotations
import contextlib
import functools
import logging
import os
import signal
import subprocess
import threading
import time
from watchdog.events import EVENT_TYPE_CLOSED_NO_WRITE, EVENT_TYPE_OPENED, FileSystemEvent, PatternMatchingEventHandler
from watchdog.utils import echo, platform
from watchdog.utils.event_debouncer import EventDebouncer
from watchdog.utils.process_watcher import ProcessWatcher
logger = logging.getLogger(__name__)
echo_events = functools.partial(echo.echo, write=lambda msg: logger.info(msg))
class Trick(PatternMatchingEventHandler):
"""Your tricks should subclass this class."""
def __repr__(self) -> str:
return f"<{type(self).__name__}>"
@classmethod
def generate_yaml(cls) -> str:
return f"""- {cls.__module__}.{cls.__name__}:
args:
- argument1
- argument2
kwargs:
patterns:
- "*.py"
- "*.js"
ignore_patterns:
- "version.py"
ignore_directories: false
"""
class LoggerTrick(Trick):
"""A simple trick that does only logs events."""
@echo_events
def on_any_event(self, event: FileSystemEvent) -> None:
pass
class ShellCommandTrick(Trick):
"""Executes shell commands in response to matched events."""
def __init__(
self,
shell_command: str,
*,
patterns: list[str] | None = None,
ignore_patterns: list[str] | None = None,
ignore_directories: bool = False,
wait_for_process: bool = False,
drop_during_process: bool = False,
):
super().__init__(
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=ignore_directories,
)
self.shell_command = shell_command
self.wait_for_process = wait_for_process
self.drop_during_process = drop_during_process
self.process: subprocess.Popen[bytes] | None = None
self._process_watchers: set[ProcessWatcher] = set()
def on_any_event(self, event: FileSystemEvent) -> None:
if event.event_type in {EVENT_TYPE_OPENED, EVENT_TYPE_CLOSED_NO_WRITE}:
# FIXME: see issue #949, and find a way to better handle that scenario
return
from string import Template
if self.drop_during_process and self.is_process_running():
return
object_type = "directory" if event.is_directory else "file"
context = {
"watch_src_path": event.src_path,
"watch_dest_path": "",
"watch_event_type": event.event_type,
"watch_object": object_type,
}
if self.shell_command is None:
if hasattr(event, "dest_path"):
context["dest_path"] = event.dest_path
command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"'
else:
command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"'
else:
if hasattr(event, "dest_path"):
context["watch_dest_path"] = event.dest_path
command = self.shell_command
command = Template(command).safe_substitute(**context)
self.process = subprocess.Popen(command, shell=True)
if self.wait_for_process:
self.process.wait()
else:
process_watcher = ProcessWatcher(self.process, None)
self._process_watchers.add(process_watcher)
process_watcher.process_termination_callback = functools.partial(
self._process_watchers.discard,
process_watcher,
)
process_watcher.start()
def is_process_running(self) -> bool:
return bool(self._process_watchers or (self.process is not None and self.process.poll() is None))
class AutoRestartTrick(Trick):
"""Starts a long-running subprocess and restarts it on matched events.
The command parameter is a list of command arguments, such as
`['bin/myserver', '-c', 'etc/myconfig.ini']`.
Call `start()` after creating the Trick. Call `stop()` when stopping
the process.
"""
def __init__(
self,
command: list[str],
*,
patterns: list[str] | None = None,
ignore_patterns: list[str] | None = None,
ignore_directories: bool = False,
stop_signal: signal.Signals | int = signal.SIGINT,
kill_after: int = 10,
debounce_interval_seconds: int = 0,
restart_on_command_exit: bool = True,
):
if kill_after < 0:
error = "kill_after must be non-negative."
raise ValueError(error)
if debounce_interval_seconds < 0:
error = "debounce_interval_seconds must be non-negative."
raise ValueError(error)
super().__init__(
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=ignore_directories,
)
self.command = command
self.stop_signal = stop_signal.value if isinstance(stop_signal, signal.Signals) else stop_signal
self.kill_after = kill_after
self.debounce_interval_seconds = debounce_interval_seconds
self.restart_on_command_exit = restart_on_command_exit
self.process: subprocess.Popen[bytes] | None = None
self.process_watcher: ProcessWatcher | None = None
self.event_debouncer: EventDebouncer | None = None
self.restart_count = 0
self._is_process_stopping = False
self._is_trick_stopping = False
self._stopping_lock = threading.RLock()
def start(self) -> None:
if self.debounce_interval_seconds:
self.event_debouncer = EventDebouncer(
debounce_interval_seconds=self.debounce_interval_seconds,
events_callback=lambda events: self._restart_process(),
)
self.event_debouncer.start()
self._start_process()
def stop(self) -> None:
# Ensure the body of the function is only run once.
with self._stopping_lock:
if self._is_trick_stopping:
return
self._is_trick_stopping = True
process_watcher = self.process_watcher
if self.event_debouncer is not None:
self.event_debouncer.stop()
self._stop_process()
# Don't leak threads: Wait for background threads to stop.
if self.event_debouncer is not None:
self.event_debouncer.join()
if process_watcher is not None:
process_watcher.join()
def _start_process(self) -> None:
if self._is_trick_stopping:
return
# windows doesn't have setsid
self.process = subprocess.Popen(self.command, preexec_fn=getattr(os, "setsid", None))
if self.restart_on_command_exit:
self.process_watcher = ProcessWatcher(self.process, self._restart_process)
self.process_watcher.start()
def _stop_process(self) -> None:
# Ensure the body of the function is not run in parallel in different threads.
with self._stopping_lock:
if self._is_process_stopping:
return
self._is_process_stopping = True
try:
if self.process_watcher is not None:
self.process_watcher.stop()
self.process_watcher = None
if self.process is not None:
try:
kill_process(self.process.pid, self.stop_signal)
except OSError:
# Process is already gone
pass
else:
kill_time = time.time() + self.kill_after
while time.time() < kill_time:
if self.process.poll() is not None:
break
time.sleep(0.25)
else:
# Process is already gone
with contextlib.suppress(OSError):
kill_process(self.process.pid, 9)
self.process = None
finally:
self._is_process_stopping = False
@echo_events
def on_any_event(self, event: FileSystemEvent) -> None:
if event.event_type in {EVENT_TYPE_OPENED, EVENT_TYPE_CLOSED_NO_WRITE}:
# FIXME: see issue #949, and find a way to better handle that scenario
return
if self.event_debouncer is not None:
self.event_debouncer.handle_event(event)
else:
self._restart_process()
def _restart_process(self) -> None:
if self._is_trick_stopping:
return
self._stop_process()
self._start_process()
self.restart_count += 1
if platform.is_windows():
def kill_process(pid: int, stop_signal: int) -> None:
os.kill(pid, stop_signal)
else:
def kill_process(pid: int, stop_signal: int) -> None:
os.killpg(os.getpgid(pid), stop_signal)

View File

@@ -0,0 +1,122 @@
""":module: watchdog.utils
:synopsis: Utility classes and functions.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
Classes
-------
.. autoclass:: BaseThread
:members:
:show-inheritance:
:inherited-members:
"""
from __future__ import annotations
import sys
import threading
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from types import ModuleType
from watchdog.tricks import Trick
class UnsupportedLibcError(Exception):
pass
class WatchdogShutdownError(Exception):
"""Semantic exception used to signal an external shutdown event."""
class BaseThread(threading.Thread):
"""Convenience class for creating stoppable threads."""
def __init__(self) -> None:
threading.Thread.__init__(self)
if hasattr(self, "daemon"):
self.daemon = True
else:
self.setDaemon(True)
self._stopped_event = threading.Event()
@property
def stopped_event(self) -> threading.Event:
return self._stopped_event
def should_keep_running(self) -> bool:
"""Determines whether the thread should continue running."""
return not self._stopped_event.is_set()
def on_thread_stop(self) -> None:
"""Override this method instead of :meth:`stop()`.
:meth:`stop()` calls this method.
This method is called immediately after the thread is signaled to stop.
"""
def stop(self) -> None:
"""Signals the thread to stop."""
self._stopped_event.set()
self.on_thread_stop()
def on_thread_start(self) -> None:
"""Override this method instead of :meth:`start()`. :meth:`start()`
calls this method.
This method is called right before this thread is started and this
object's run() method is invoked.
"""
def start(self) -> None:
self.on_thread_start()
threading.Thread.start(self)
def load_module(module_name: str) -> ModuleType:
"""Imports a module given its name and returns a handle to it."""
try:
__import__(module_name)
except ImportError as e:
error = f"No module named {module_name}"
raise ImportError(error) from e
return sys.modules[module_name]
def load_class(dotted_path: str) -> type[Trick]:
"""Loads and returns a class definition provided a dotted path
specification the last part of the dotted path is the class name
and there is at least one module name preceding the class name.
Notes
-----
You will need to ensure that the module you are trying to load
exists in the Python path.
Examples
--------
- module.name.ClassName # Provided module.name is in the Python path.
- module.ClassName # Provided module is in the Python path.
What won't work:
- ClassName
- modle.name.ClassName # Typo in module name.
- module.name.ClasNam # Typo in classname.
"""
dotted_path_split = dotted_path.split(".")
if len(dotted_path_split) <= 1:
error = f"Dotted module path {dotted_path} must contain a module name and a classname"
raise ValueError(error)
klass_name = dotted_path_split[-1]
module_name = ".".join(dotted_path_split[:-1])
module = load_module(module_name)
if hasattr(module, klass_name):
return getattr(module, klass_name)
error = f"Module {module_name} does not have class attribute {klass_name}"
raise AttributeError(error)

View File

@@ -0,0 +1,90 @@
"""Utility collections or "bricks".
:module: watchdog.utils.bricks
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: lalinsky@gmail.com (Lukáš Lalinský)
:author: python@rcn.com (Raymond Hettinger)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
Classes
=======
.. autoclass:: OrderedSetQueue
:members:
:show-inheritance:
:inherited-members:
.. autoclass:: OrderedSet
"""
from __future__ import annotations
import queue
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
class SkipRepeatsQueue(queue.Queue):
"""Thread-safe implementation of an special queue where a
put of the last-item put'd will be dropped.
The implementation leverages locking already implemented in the base class
redefining only the primitives.
Queued items must be immutable and hashable so that they can be used
as dictionary keys. You must implement **only read-only properties** and
the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and
:meth:`Item.__ne__()` methods for items to be hashable.
An example implementation follows::
class Item:
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def _key(self):
return (self._a, self._b)
def __eq__(self, item):
return self._key() == item._key()
def __ne__(self, item):
return self._key() != item._key()
def __hash__(self):
return hash(self._key())
based on the OrderedSetQueue below
"""
def _init(self, maxsize: int) -> None:
super()._init(maxsize)
self._last_item = None
def put(self, item: Any, block: bool = True, timeout: float | None = None) -> None: # noqa: FBT001,FBT002
"""This method will be used by `eventlet`, when enabled, so we cannot use force proper keyword-only
arguments nor touch the signature. Also, the `timeout` argument will be ignored in that case.
"""
if self._last_item is None or item != self._last_item:
super().put(item, block, timeout)
def _put(self, item: Any) -> None:
super()._put(item)
self._last_item = item
def _get(self) -> Any:
item = super()._get()
if item is self._last_item:
self._last_item = None
return item

View File

@@ -0,0 +1,77 @@
""":module: watchdog.utils.delayed_queue
:author: thomas.amland@gmail.com (Thomas Amland)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
"""
from __future__ import annotations
import threading
import time
from collections import deque
from typing import Callable, Generic, TypeVar
T = TypeVar("T")
class DelayedQueue(Generic[T]):
def __init__(self, delay: float) -> None:
self.delay_sec = delay
self._lock = threading.Lock()
self._not_empty = threading.Condition(self._lock)
self._queue: deque[tuple[T, float, bool]] = deque()
self._closed = False
def put(self, element: T, *, delay: bool = False) -> None:
"""Add element to queue."""
self._lock.acquire()
self._queue.append((element, time.time(), delay))
self._not_empty.notify()
self._lock.release()
def close(self) -> None:
"""Close queue, indicating no more items will be added."""
self._closed = True
# Interrupt the blocking _not_empty.wait() call in get
self._not_empty.acquire()
self._not_empty.notify()
self._not_empty.release()
def get(self) -> T | None:
"""Remove and return an element from the queue, or this queue has been
closed raise the Closed exception.
"""
while True:
# wait for element to be added to queue
self._not_empty.acquire()
while len(self._queue) == 0 and not self._closed:
self._not_empty.wait()
if self._closed:
self._not_empty.release()
return None
head, insert_time, delay = self._queue[0]
self._not_empty.release()
# wait for delay if required
if delay:
time_left = insert_time + self.delay_sec - time.time()
while time_left > 0:
time.sleep(time_left)
time_left = insert_time + self.delay_sec - time.time()
# return element if it's still in the queue
with self._lock:
if len(self._queue) > 0 and self._queue[0][0] is head:
self._queue.popleft()
return head
def remove(self, predicate: Callable[[T], bool]) -> T | None:
"""Remove and return the first items for which predicate is True,
ignoring delay.
"""
with self._lock:
for i, (elem, *_) in enumerate(self._queue):
if predicate(elem):
del self._queue[i]
return elem
return None

View File

@@ -0,0 +1,424 @@
""":module: watchdog.utils.dirsnapshot
:synopsis: Directory snapshots and comparison.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
.. ADMONITION:: Where are the moved events? They "disappeared"
This implementation does not take partition boundaries
into consideration. It will only work when the directory
tree is entirely on the same file system. More specifically,
any part of the code that depends on inode numbers can
break if partition boundaries are crossed. In these cases,
the snapshot diff will represent file/directory movement as
created and deleted events.
Classes
-------
.. autoclass:: DirectorySnapshot
:members:
:show-inheritance:
.. autoclass:: DirectorySnapshotDiff
:members:
:show-inheritance:
.. autoclass:: EmptyDirectorySnapshot
:members:
:show-inheritance:
"""
from __future__ import annotations
import contextlib
import errno
import os
from stat import S_ISDIR
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Iterator
from typing import Any, Callable
class DirectorySnapshotDiff:
"""Compares two directory snapshots and creates an object that represents
the difference between the two snapshots.
:param ref:
The reference directory snapshot.
:type ref:
:class:`DirectorySnapshot`
:param snapshot:
The directory snapshot which will be compared
with the reference snapshot.
:type snapshot:
:class:`DirectorySnapshot`
:param ignore_device:
A boolean indicating whether to ignore the device id or not.
By default, a file may be uniquely identified by a combination of its first
inode and its device id. The problem is that the device id may (or may not)
change between system boots. This problem would cause the DirectorySnapshotDiff
to think a file has been deleted and created again but it would be the
exact same file.
Set to True only if you are sure you will always use the same device.
:type ignore_device:
:class:`bool`
"""
def __init__(
self,
ref: DirectorySnapshot,
snapshot: DirectorySnapshot,
*,
ignore_device: bool = False,
) -> None:
created = snapshot.paths - ref.paths
deleted = ref.paths - snapshot.paths
if ignore_device:
def get_inode(directory: DirectorySnapshot, full_path: bytes | str) -> int | tuple[int, int]:
return directory.inode(full_path)[0]
else:
def get_inode(directory: DirectorySnapshot, full_path: bytes | str) -> int | tuple[int, int]:
return directory.inode(full_path)
# check that all unchanged paths have the same inode
for path in ref.paths & snapshot.paths:
if get_inode(ref, path) != get_inode(snapshot, path):
created.add(path)
deleted.add(path)
# find moved paths
moved: set[tuple[bytes | str, bytes | str]] = set()
for path in set(deleted):
inode = ref.inode(path)
new_path = snapshot.path(inode)
if new_path:
# file is not deleted but moved
deleted.remove(path)
moved.add((path, new_path))
for path in set(created):
inode = snapshot.inode(path)
old_path = ref.path(inode)
if old_path:
created.remove(path)
moved.add((old_path, path))
# find modified paths
# first check paths that have not moved
modified: set[bytes | str] = set()
for path in ref.paths & snapshot.paths:
if get_inode(ref, path) == get_inode(snapshot, path) and (
ref.mtime(path) != snapshot.mtime(path) or ref.size(path) != snapshot.size(path)
):
modified.add(path)
for old_path, new_path in moved:
if ref.mtime(old_path) != snapshot.mtime(new_path) or ref.size(old_path) != snapshot.size(new_path):
modified.add(old_path)
self._dirs_created = [path for path in created if snapshot.isdir(path)]
self._dirs_deleted = [path for path in deleted if ref.isdir(path)]
self._dirs_modified = [path for path in modified if ref.isdir(path)]
self._dirs_moved = [(frm, to) for (frm, to) in moved if ref.isdir(frm)]
self._files_created = list(created - set(self._dirs_created))
self._files_deleted = list(deleted - set(self._dirs_deleted))
self._files_modified = list(modified - set(self._dirs_modified))
self._files_moved = list(moved - set(self._dirs_moved))
def __str__(self) -> str:
return self.__repr__()
def __repr__(self) -> str:
fmt = (
"<{0} files(created={1}, deleted={2}, modified={3}, moved={4}),"
" folders(created={5}, deleted={6}, modified={7}, moved={8})>"
)
return fmt.format(
type(self).__name__,
len(self._files_created),
len(self._files_deleted),
len(self._files_modified),
len(self._files_moved),
len(self._dirs_created),
len(self._dirs_deleted),
len(self._dirs_modified),
len(self._dirs_moved),
)
@property
def files_created(self) -> list[bytes | str]:
"""List of files that were created."""
return self._files_created
@property
def files_deleted(self) -> list[bytes | str]:
"""List of files that were deleted."""
return self._files_deleted
@property
def files_modified(self) -> list[bytes | str]:
"""List of files that were modified."""
return self._files_modified
@property
def files_moved(self) -> list[tuple[bytes | str, bytes | str]]:
"""List of files that were moved.
Each event is a two-tuple the first item of which is the path
that has been renamed to the second item in the tuple.
"""
return self._files_moved
@property
def dirs_modified(self) -> list[bytes | str]:
"""List of directories that were modified."""
return self._dirs_modified
@property
def dirs_moved(self) -> list[tuple[bytes | str, bytes | str]]:
"""List of directories that were moved.
Each event is a two-tuple the first item of which is the path
that has been renamed to the second item in the tuple.
"""
return self._dirs_moved
@property
def dirs_deleted(self) -> list[bytes | str]:
"""List of directories that were deleted."""
return self._dirs_deleted
@property
def dirs_created(self) -> list[bytes | str]:
"""List of directories that were created."""
return self._dirs_created
class ContextManager:
"""Context manager that creates two directory snapshots and a
diff object that represents the difference between the two snapshots.
:param path:
The directory path for which a snapshot should be taken.
:type path:
``str``
:param recursive:
``True`` if the entire directory tree should be included in the
snapshot; ``False`` otherwise.
:type recursive:
``bool``
:param stat:
Use custom stat function that returns a stat structure for path.
Currently only st_dev, st_ino, st_mode and st_mtime are needed.
A function taking a ``path`` as argument which will be called
for every entry in the directory tree.
:param listdir:
Use custom listdir function. For details see ``os.scandir``.
:param ignore_device:
A boolean indicating whether to ignore the device id or not.
By default, a file may be uniquely identified by a combination of its first
inode and its device id. The problem is that the device id may (or may not)
change between system boots. This problem would cause the DirectorySnapshotDiff
to think a file has been deleted and created again but it would be the
exact same file.
Set to True only if you are sure you will always use the same device.
:type ignore_device:
:class:`bool`
"""
def __init__(
self,
path: str,
*,
recursive: bool = True,
stat: Callable[[str], os.stat_result] = os.stat,
listdir: Callable[[str | None], Iterator[os.DirEntry]] = os.scandir,
ignore_device: bool = False,
) -> None:
self.path = path
self.recursive = recursive
self.stat = stat
self.listdir = listdir
self.ignore_device = ignore_device
def __enter__(self) -> None:
self.pre_snapshot = self.get_snapshot()
def __exit__(self, *args: object) -> None:
self.post_snapshot = self.get_snapshot()
self.diff = DirectorySnapshotDiff(
self.pre_snapshot,
self.post_snapshot,
ignore_device=self.ignore_device,
)
def get_snapshot(self) -> DirectorySnapshot:
return DirectorySnapshot(
path=self.path,
recursive=self.recursive,
stat=self.stat,
listdir=self.listdir,
)
class DirectorySnapshot:
"""A snapshot of stat information of files in a directory.
:param path:
The directory path for which a snapshot should be taken.
:type path:
``str``
:param recursive:
``True`` if the entire directory tree should be included in the
snapshot; ``False`` otherwise.
:type recursive:
``bool``
:param stat:
Use custom stat function that returns a stat structure for path.
Currently only st_dev, st_ino, st_mode and st_mtime are needed.
A function taking a ``path`` as argument which will be called
for every entry in the directory tree.
:param listdir:
Use custom listdir function. For details see ``os.scandir``.
"""
def __init__(
self,
path: str,
*,
recursive: bool = True,
stat: Callable[[str], os.stat_result] = os.stat,
listdir: Callable[[str | None], Iterator[os.DirEntry]] = os.scandir,
) -> None:
self.recursive = recursive
self.stat = stat
self.listdir = listdir
self._stat_info: dict[bytes | str, os.stat_result] = {}
self._inode_to_path: dict[tuple[int, int], bytes | str] = {}
st = self.stat(path)
self._stat_info[path] = st
self._inode_to_path[(st.st_ino, st.st_dev)] = path
for p, st in self.walk(path):
i = (st.st_ino, st.st_dev)
self._inode_to_path[i] = p
self._stat_info[p] = st
def walk(self, root: str) -> Iterator[tuple[str, os.stat_result]]:
try:
paths = [os.path.join(root, entry.name) for entry in self.listdir(root)]
except OSError as e:
# Directory may have been deleted between finding it in the directory
# list of its parent and trying to delete its contents. If this
# happens we treat it as empty. Likewise if the directory was replaced
# with a file of the same name (less likely, but possible).
if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
return
else:
raise
entries = []
for p in paths:
with contextlib.suppress(OSError):
entry = (p, self.stat(p))
entries.append(entry)
yield entry
if self.recursive:
for path, st in entries:
with contextlib.suppress(PermissionError):
if S_ISDIR(st.st_mode):
yield from self.walk(path)
@property
def paths(self) -> set[bytes | str]:
"""Set of file/directory paths in the snapshot."""
return set(self._stat_info.keys())
def path(self, uid: tuple[int, int]) -> bytes | str | None:
"""Returns path for id. None if id is unknown to this snapshot."""
return self._inode_to_path.get(uid)
def inode(self, path: bytes | str) -> tuple[int, int]:
"""Returns an id for path."""
st = self._stat_info[path]
return (st.st_ino, st.st_dev)
def isdir(self, path: bytes | str) -> bool:
return S_ISDIR(self._stat_info[path].st_mode)
def mtime(self, path: bytes | str) -> float:
return self._stat_info[path].st_mtime
def size(self, path: bytes | str) -> int:
return self._stat_info[path].st_size
def stat_info(self, path: bytes | str) -> os.stat_result:
"""Returns a stat information object for the specified path from
the snapshot.
Attached information is subject to change. Do not use unless
you specify `stat` in constructor. Use :func:`inode`, :func:`mtime`,
:func:`isdir` instead.
:param path:
The path for which stat information should be obtained
from a snapshot.
"""
return self._stat_info[path]
def __sub__(self, previous_dirsnap: DirectorySnapshot) -> DirectorySnapshotDiff:
"""Allow subtracting a DirectorySnapshot object instance from
another.
:returns:
A :class:`DirectorySnapshotDiff` object.
"""
return DirectorySnapshotDiff(previous_dirsnap, self)
def __str__(self) -> str:
return self.__repr__()
def __repr__(self) -> str:
return str(self._stat_info)
class EmptyDirectorySnapshot(DirectorySnapshot):
"""Class to implement an empty snapshot. This is used together with
DirectorySnapshot and DirectorySnapshotDiff in order to get all the files/folders
in the directory as created.
"""
def __init__(self) -> None:
pass
@staticmethod
def path(_: Any) -> None:
"""Mock up method to return the path of the received inode. As the snapshot
is intended to be empty, it always returns None.
:returns:
None.
"""
return
@property
def paths(self) -> set:
"""Mock up method to return a set of file/directory paths in the snapshot. As
the snapshot is intended to be empty, it always returns an empty set.
:returns:
An empty set.
"""
return set()

View File

@@ -0,0 +1,68 @@
# echo.py: Tracing function calls using Python decorators.
#
# Written by Thomas Guest <tag@wordaligned.org>
# Please see http://wordaligned.org/articles/echo
#
# Place into the public domain.
"""Echo calls made to functions in a module.
"Echoing" a function call means printing out the name of the function
and the values of its arguments before making the call (which is more
commonly referred to as "tracing", but Python already has a trace module).
Alternatively, echo.echo can be used to decorate functions. Calls to the
decorated function will be echoed.
Example:
-------
@echo.echo
def my_function(args):
pass
"""
from __future__ import annotations
import functools
import sys
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable
def format_arg_value(arg_val: tuple[str, tuple[Any, ...]]) -> str:
"""Return a string representing a (name, value) pair."""
arg, val = arg_val
return f"{arg}={val!r}"
def echo(fn: Callable, write: Callable[[str], int | None] = sys.stdout.write) -> Callable:
"""Echo calls to a function.
Returns a decorated version of the input function which "echoes" calls
made to it by writing out the function's name and the arguments it was
called with.
"""
# Unpack function's arg count, arg names, arg defaults
code = fn.__code__
argcount = code.co_argcount
argnames = code.co_varnames[:argcount]
fn_defaults: tuple[Any] = fn.__defaults__ or ()
argdefs = dict(list(zip(argnames[-len(fn_defaults) :], fn_defaults)))
@functools.wraps(fn)
def wrapped(*v: Any, **k: Any) -> Callable:
# Collect function arguments by chaining together positional,
# defaulted, extra positional and keyword arguments.
positional = list(map(format_arg_value, list(zip(argnames, v))))
defaulted = [format_arg_value((a, argdefs[a])) for a in argnames[len(v) :] if a not in k]
nameless = list(map(repr, v[argcount:]))
keyword = list(map(format_arg_value, list(k.items())))
args = positional + defaulted + nameless + keyword
write(f"{fn.__name__}({', '.join(args)})\n")
return fn(*v, **k)
return wrapped

View File

@@ -0,0 +1,66 @@
from __future__ import annotations
import logging
import threading
from typing import TYPE_CHECKING
from watchdog.utils import BaseThread
if TYPE_CHECKING:
from typing import Callable
from watchdog.events import FileSystemEvent
logger = logging.getLogger(__name__)
class EventDebouncer(BaseThread):
"""Background thread for debouncing event handling.
When an event is received, wait until the configured debounce interval
passes before calling the callback. If additional events are received
before the interval passes, reset the timer and keep waiting. When the
debouncing interval passes, the callback will be called with a list of
events in the order in which they were received.
"""
def __init__(
self,
debounce_interval_seconds: int,
events_callback: Callable[[list[FileSystemEvent]], None],
) -> None:
super().__init__()
self.debounce_interval_seconds = debounce_interval_seconds
self.events_callback = events_callback
self._events: list[FileSystemEvent] = []
self._cond = threading.Condition()
def handle_event(self, event: FileSystemEvent) -> None:
with self._cond:
self._events.append(event)
self._cond.notify()
def stop(self) -> None:
with self._cond:
super().stop()
self._cond.notify()
def run(self) -> None:
with self._cond:
while True:
# Wait for first event (or shutdown).
self._cond.wait()
if self.debounce_interval_seconds:
# Wait for additional events (or shutdown) until the debounce interval passes.
while self.should_keep_running():
if not self._cond.wait(timeout=self.debounce_interval_seconds):
break
if not self.should_keep_running():
break
events = self._events
self._events = []
self.events_callback(events)

View File

@@ -0,0 +1,99 @@
""":module: watchdog.utils.patterns
:synopsis: Common wildcard searching/filtering functionality for files.
:author: boris.staletic@gmail.com (Boris Staletic)
:author: yesudeep@gmail.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
"""
from __future__ import annotations
# Non-pure path objects are only allowed on their respective OS's.
# Thus, these utilities require "pure" path objects that don't access the filesystem.
# Since pathlib doesn't have a `case_sensitive` parameter, we have to approximate it
# by converting input paths to `PureWindowsPath` and `PurePosixPath` where:
# - `PureWindowsPath` is always case-insensitive.
# - `PurePosixPath` is always case-sensitive.
# Reference: https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.match
from pathlib import PurePosixPath, PureWindowsPath
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Iterator
def _match_path(
raw_path: str,
included_patterns: set[str],
excluded_patterns: set[str],
*,
case_sensitive: bool,
) -> bool:
"""Internal function same as :func:`match_path` but does not check arguments."""
path: PurePosixPath | PureWindowsPath
if case_sensitive:
path = PurePosixPath(raw_path)
else:
included_patterns = {pattern.lower() for pattern in included_patterns}
excluded_patterns = {pattern.lower() for pattern in excluded_patterns}
path = PureWindowsPath(raw_path)
common_patterns = included_patterns & excluded_patterns
if common_patterns:
error = f"conflicting patterns `{common_patterns}` included and excluded"
raise ValueError(error)
return any(path.match(p) for p in included_patterns) and not any(path.match(p) for p in excluded_patterns)
def filter_paths(
paths: list[str],
*,
included_patterns: list[str] | None = None,
excluded_patterns: list[str] | None = None,
case_sensitive: bool = True,
) -> Iterator[str]:
"""Filters from a set of paths based on acceptable patterns and
ignorable patterns.
:param paths:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
A list of pathnames that matched the allowable patterns and passed
through the ignored patterns.
"""
included = set(["*"] if included_patterns is None else included_patterns)
excluded = set([] if excluded_patterns is None else excluded_patterns)
for path in paths:
if _match_path(path, included, excluded, case_sensitive=case_sensitive):
yield path
def match_any_paths(
paths: list[str],
*,
included_patterns: list[str] | None = None,
excluded_patterns: list[str] | None = None,
case_sensitive: bool = True,
) -> bool:
"""Matches from a set of paths based on acceptable patterns and
ignorable patterns.
See ``filter_paths()`` for signature details.
"""
return any(
filter_paths(
paths,
included_patterns=included_patterns,
excluded_patterns=excluded_patterns,
case_sensitive=case_sensitive,
),
)

View File

@@ -0,0 +1,44 @@
from __future__ import annotations
import sys
PLATFORM_WINDOWS = "windows"
PLATFORM_LINUX = "linux"
PLATFORM_BSD = "bsd"
PLATFORM_DARWIN = "darwin"
PLATFORM_UNKNOWN = "unknown"
def get_platform_name() -> str:
if sys.platform.startswith("win"):
return PLATFORM_WINDOWS
if sys.platform.startswith("darwin"):
return PLATFORM_DARWIN
if sys.platform.startswith("linux"):
return PLATFORM_LINUX
if sys.platform.startswith(("dragonfly", "freebsd", "netbsd", "openbsd", "bsd")):
return PLATFORM_BSD
return PLATFORM_UNKNOWN
__platform__ = get_platform_name()
def is_linux() -> bool:
return __platform__ == PLATFORM_LINUX
def is_bsd() -> bool:
return __platform__ == PLATFORM_BSD
def is_darwin() -> bool:
return __platform__ == PLATFORM_DARWIN
def is_windows() -> bool:
return __platform__ == PLATFORM_WINDOWS

View File

@@ -0,0 +1,30 @@
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from watchdog.utils import BaseThread
if TYPE_CHECKING:
import subprocess
from typing import Callable
logger = logging.getLogger(__name__)
class ProcessWatcher(BaseThread):
def __init__(self, popen_obj: subprocess.Popen, process_termination_callback: Callable[[], None] | None) -> None:
super().__init__()
self.popen_obj = popen_obj
self.process_termination_callback = process_termination_callback
def run(self) -> None:
while self.popen_obj.poll() is None:
if self.stopped_event.wait(timeout=0.1):
return
try:
if not self.stopped_event.is_set() and self.process_termination_callback:
self.process_termination_callback()
except Exception:
logger.exception("Error calling process termination callback")

View File

@@ -0,0 +1,11 @@
from __future__ import annotations
# When updating this version number, please update the
# ``docs/source/global.rst.inc`` file as well.
VERSION_MAJOR = 6
VERSION_MINOR = 0
VERSION_BUILD = 0
VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD)
VERSION_STRING = f"{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_BUILD}"
__version__ = VERSION_INFO

View File

@@ -0,0 +1,807 @@
""":module: watchdog.watchmedo
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
:synopsis: ``watchmedo`` shell script utility.
"""
from __future__ import annotations
import errno
import logging
import os
import os.path
import sys
import time
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from io import StringIO
from textwrap import dedent
from typing import TYPE_CHECKING, Any
from watchdog.utils import WatchdogShutdownError, load_class, platform
from watchdog.version import VERSION_STRING
if TYPE_CHECKING:
from argparse import Namespace, _SubParsersAction
from typing import Callable
from watchdog.events import FileSystemEventHandler
from watchdog.observers import ObserverType
from watchdog.observers.api import BaseObserver
logging.basicConfig(level=logging.INFO)
CONFIG_KEY_TRICKS = "tricks"
CONFIG_KEY_PYTHON_PATH = "python-path"
class HelpFormatter(RawDescriptionHelpFormatter):
"""A nicer help formatter.
Help for arguments can be indented and contain new lines.
It will be de-dented and arguments in the help
will be separated by a blank line for better readability.
Source: https://github.com/httpie/httpie/blob/2423f89/httpie/cli/argparser.py#L31
"""
def __init__(self, *args: Any, max_help_position: int = 6, **kwargs: Any) -> None:
# A smaller indent for args help.
kwargs["max_help_position"] = max_help_position
super().__init__(*args, **kwargs)
def __repr__(self) -> str:
return f"<{type(self).__name__}>"
def _split_lines(self, text: str, width: int) -> list[str]:
text = dedent(text).strip() + "\n\n"
return text.splitlines()
epilog = """\
Copyright 2018-2024 Mickaël Schoentgen & contributors
Copyright 2014-2018 Thomas Amland & contributors
Copyright 2012-2014 Google, Inc.
Copyright 2011-2012 Yesudeep Mangalapilly
Licensed under the terms of the Apache license, version 2.0. Please see
LICENSE in the source code for more information."""
cli = ArgumentParser(epilog=epilog, formatter_class=HelpFormatter)
cli.add_argument("--version", action="version", version=VERSION_STRING)
subparsers = cli.add_subparsers(dest="top_command")
command_parsers = {}
Argument = tuple[list[str], Any]
def argument(*name_or_flags: str, **kwargs: Any) -> Argument:
"""Convenience function to properly format arguments to pass to the
command decorator.
"""
return list(name_or_flags), kwargs
def command(
args: list[Argument],
*,
parent: _SubParsersAction[ArgumentParser] = subparsers,
cmd_aliases: list[str] | None = None,
) -> Callable:
"""Decorator to define a new command in a sanity-preserving way.
The function will be stored in the ``func`` variable when the parser
parses arguments so that it can be called directly like so::
>>> args = cli.parse_args()
>>> args.func(args)
"""
def decorator(func: Callable) -> Callable:
name = func.__name__.replace("_", "-")
desc = dedent(func.__doc__ or "")
parser = parent.add_parser(name, aliases=cmd_aliases or [], description=desc, formatter_class=HelpFormatter)
command_parsers[name] = parser
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument("-q", "--quiet", dest="verbosity", action="append_const", const=-1)
verbosity_group.add_argument("-v", "--verbose", dest="verbosity", action="append_const", const=1)
for name_or_flags, kwargs in args:
parser.add_argument(*name_or_flags, **kwargs)
parser.set_defaults(func=func)
return func
return decorator
def path_split(pathname_spec: str, *, separator: str = os.pathsep) -> list[str]:
"""Splits a pathname specification separated by an OS-dependent separator.
:param pathname_spec:
The pathname specification.
:param separator:
(OS Dependent) `:` on Unix and `;` on Windows or user-specified.
"""
return pathname_spec.split(separator)
def add_to_sys_path(pathnames: list[str], *, index: int = 0) -> None:
"""Adds specified paths at specified index into the sys.path list.
:param paths:
A list of paths to add to the sys.path
:param index:
(Default 0) The index in the sys.path list where the paths will be
added.
"""
for pathname in pathnames[::-1]:
sys.path.insert(index, pathname)
def load_config(tricks_file_pathname: str) -> dict:
"""Loads the YAML configuration from the specified file.
:param tricks_file_path:
The path to the tricks configuration file.
:returns:
A dictionary of configuration information.
"""
import yaml
with open(tricks_file_pathname, "rb") as f:
return yaml.safe_load(f.read())
def parse_patterns(
patterns_spec: str, ignore_patterns_spec: str, *, separator: str = ";"
) -> tuple[list[str], list[str]]:
"""Parses pattern argument specs and returns a two-tuple of
(patterns, ignore_patterns).
"""
patterns = patterns_spec.split(separator)
ignore_patterns = ignore_patterns_spec.split(separator)
if ignore_patterns == [""]:
ignore_patterns = []
return patterns, ignore_patterns
def observe_with(
observer: BaseObserver,
event_handler: FileSystemEventHandler,
pathnames: list[str],
*,
recursive: bool,
) -> None:
"""Single observer thread with a scheduled path and event handler.
:param observer:
The observer thread.
:param event_handler:
Event handler which will be called in response to file system events.
:param pathnames:
A list of pathnames to monitor.
:param recursive:
``True`` if recursive; ``False`` otherwise.
"""
for pathname in set(pathnames):
observer.schedule(event_handler, pathname, recursive=recursive)
observer.start()
try:
while True:
time.sleep(1)
except WatchdogShutdownError:
observer.stop()
observer.join()
def schedule_tricks(observer: BaseObserver, tricks: list[dict], pathname: str, *, recursive: bool) -> None:
"""Schedules tricks with the specified observer and for the given watch
path.
:param observer:
The observer thread into which to schedule the trick and watch.
:param tricks:
A list of tricks.
:param pathname:
A path name which should be watched.
:param recursive:
``True`` if recursive; ``False`` otherwise.
"""
for trick in tricks:
for name, value in trick.items():
trick_cls = load_class(name)
handler = trick_cls(**value)
trick_pathname = getattr(handler, "source_directory", None) or pathname
observer.schedule(handler, trick_pathname, recursive=recursive)
@command(
[
argument("files", nargs="*", help="perform tricks from given file"),
argument(
"--python-path",
default=".",
help=f"Paths separated by {os.pathsep!r} to add to the Python path.",
),
argument(
"--interval",
"--timeout",
dest="timeout",
default=1.0,
type=float,
help="Use this as the polling interval/blocking timeout (in seconds).",
),
argument(
"--recursive",
action="store_true",
default=True,
help="Recursively monitor paths (defaults to True).",
),
argument("--debug-force-polling", action="store_true", help="[debug] Forces polling."),
argument(
"--debug-force-kqueue",
action="store_true",
help="[debug] Forces BSD kqueue(2).",
),
argument(
"--debug-force-winapi",
action="store_true",
help="[debug] Forces Windows API.",
),
argument(
"--debug-force-fsevents",
action="store_true",
help="[debug] Forces macOS FSEvents.",
),
argument(
"--debug-force-inotify",
action="store_true",
help="[debug] Forces Linux inotify(7).",
),
],
cmd_aliases=["tricks"],
)
def tricks_from(args: Namespace) -> None:
"""Command to execute tricks from a tricks configuration file."""
observer_cls: ObserverType
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver
observer_cls = PollingObserver
elif args.debug_force_kqueue:
from watchdog.observers.kqueue import KqueueObserver
observer_cls = KqueueObserver
elif (not TYPE_CHECKING and args.debug_force_winapi) or (TYPE_CHECKING and platform.is_windows()):
from watchdog.observers.read_directory_changes import WindowsApiObserver
observer_cls = WindowsApiObserver
elif args.debug_force_inotify:
from watchdog.observers.inotify import InotifyObserver
observer_cls = InotifyObserver
elif args.debug_force_fsevents:
from watchdog.observers.fsevents import FSEventsObserver
observer_cls = FSEventsObserver
else:
# Automatically picks the most appropriate observer for the platform
# on which it is running.
from watchdog.observers import Observer
observer_cls = Observer
add_to_sys_path(path_split(args.python_path))
observers = []
for tricks_file in args.files:
observer = observer_cls(timeout=args.timeout)
if not os.path.exists(tricks_file):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), tricks_file)
config = load_config(tricks_file)
try:
tricks = config[CONFIG_KEY_TRICKS]
except KeyError as e:
error = f"No {CONFIG_KEY_TRICKS!r} key specified in {tricks_file!r}."
raise KeyError(error) from e
if CONFIG_KEY_PYTHON_PATH in config:
add_to_sys_path(config[CONFIG_KEY_PYTHON_PATH])
dir_path = os.path.dirname(tricks_file) or os.path.relpath(os.getcwd())
schedule_tricks(observer, tricks, dir_path, recursive=args.recursive)
observer.start()
observers.append(observer)
try:
while True:
time.sleep(1)
except WatchdogShutdownError:
for o in observers:
o.unschedule_all()
o.stop()
for o in observers:
o.join()
@command(
[
argument(
"trick_paths",
nargs="*",
help="Dotted paths for all the tricks you want to generate.",
),
argument(
"--python-path",
default=".",
help=f"Paths separated by {os.pathsep!r} to add to the Python path.",
),
argument(
"--append-to-file",
default=None,
help="""
Appends the generated tricks YAML to a file.
If not specified, prints to standard output.""",
),
argument(
"-a",
"--append-only",
dest="append_only",
action="store_true",
help="""
If --append-to-file is not specified, produces output for
appending instead of a complete tricks YAML file.""",
),
],
cmd_aliases=["generate-tricks-yaml"],
)
def tricks_generate_yaml(args: Namespace) -> None:
"""Command to generate Yaml configuration for tricks named on the command line."""
import yaml
python_paths = path_split(args.python_path)
add_to_sys_path(python_paths)
output = StringIO()
for trick_path in args.trick_paths:
trick_cls = load_class(trick_path)
output.write(trick_cls.generate_yaml())
content = output.getvalue()
output.close()
header = yaml.dump({CONFIG_KEY_PYTHON_PATH: python_paths})
header += f"{CONFIG_KEY_TRICKS}:\n"
if args.append_to_file is None:
# Output to standard output.
if not args.append_only:
content = header + content
sys.stdout.write(content)
else:
if not os.path.exists(args.append_to_file):
content = header + content
with open(args.append_to_file, "a", encoding="utf-8") as file:
file.write(content)
@command(
[
argument(
"directories",
nargs="*",
default=".",
help="Directories to watch. (default: '.').",
),
argument(
"-p",
"--pattern",
"--patterns",
dest="patterns",
default="*",
help="Matches event paths with these patterns (separated by ;).",
),
argument(
"-i",
"--ignore-pattern",
"--ignore-patterns",
dest="ignore_patterns",
default="",
help="Ignores event paths with these patterns (separated by ;).",
),
argument(
"-D",
"--ignore-directories",
dest="ignore_directories",
action="store_true",
help="Ignores events for directories.",
),
argument(
"-R",
"--recursive",
dest="recursive",
action="store_true",
help="Monitors the directories recursively.",
),
argument(
"--interval",
"--timeout",
dest="timeout",
default=1.0,
type=float,
help="Use this as the polling interval/blocking timeout.",
),
argument("--debug-force-polling", action="store_true", help="[debug] Forces polling."),
argument(
"--debug-force-kqueue",
action="store_true",
help="[debug] Forces BSD kqueue(2).",
),
argument(
"--debug-force-winapi",
action="store_true",
help="[debug] Forces Windows API.",
),
argument(
"--debug-force-fsevents",
action="store_true",
help="[debug] Forces macOS FSEvents.",
),
argument(
"--debug-force-inotify",
action="store_true",
help="[debug] Forces Linux inotify(7).",
),
],
)
def log(args: Namespace) -> None:
"""Command to log file system events to the console."""
from watchdog.tricks import LoggerTrick
patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns)
handler = LoggerTrick(
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories,
)
observer_cls: ObserverType
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver
observer_cls = PollingObserver
elif args.debug_force_kqueue:
from watchdog.observers.kqueue import KqueueObserver
observer_cls = KqueueObserver
elif (not TYPE_CHECKING and args.debug_force_winapi) or (TYPE_CHECKING and platform.is_windows()):
from watchdog.observers.read_directory_changes import WindowsApiObserver
observer_cls = WindowsApiObserver
elif args.debug_force_inotify:
from watchdog.observers.inotify import InotifyObserver
observer_cls = InotifyObserver
elif args.debug_force_fsevents:
from watchdog.observers.fsevents import FSEventsObserver
observer_cls = FSEventsObserver
else:
# Automatically picks the most appropriate observer for the platform
# on which it is running.
from watchdog.observers import Observer
observer_cls = Observer
observer = observer_cls(timeout=args.timeout)
observe_with(observer, handler, args.directories, recursive=args.recursive)
@command(
[
argument("directories", nargs="*", default=".", help="Directories to watch."),
argument(
"-c",
"--command",
dest="command",
default=None,
help="""
Shell command executed in response to matching events.
These interpolation variables are available to your command string:
${watch_src_path} - event source path
${watch_dest_path} - event destination path (for moved events)
${watch_event_type} - event type
${watch_object} - 'file' or 'directory'
Note:
Please ensure you do not use double quotes (") to quote
your command string. That will force your shell to
interpolate before the command is processed by this
command.
Example:
--command='echo "${watch_src_path}"'
""",
),
argument(
"-p",
"--pattern",
"--patterns",
dest="patterns",
default="*",
help="Matches event paths with these patterns (separated by ;).",
),
argument(
"-i",
"--ignore-pattern",
"--ignore-patterns",
dest="ignore_patterns",
default="",
help="Ignores event paths with these patterns (separated by ;).",
),
argument(
"-D",
"--ignore-directories",
dest="ignore_directories",
default=False,
action="store_true",
help="Ignores events for directories.",
),
argument(
"-R",
"--recursive",
dest="recursive",
action="store_true",
help="Monitors the directories recursively.",
),
argument(
"--interval",
"--timeout",
dest="timeout",
default=1.0,
type=float,
help="Use this as the polling interval/blocking timeout.",
),
argument(
"-w",
"--wait",
dest="wait_for_process",
action="store_true",
help="Wait for process to finish to avoid multiple simultaneous instances.",
),
argument(
"-W",
"--drop",
dest="drop_during_process",
action="store_true",
help="Ignore events that occur while command is still being"
" executed to avoid multiple simultaneous instances.",
),
argument("--debug-force-polling", action="store_true", help="[debug] Forces polling."),
],
)
def shell_command(args: Namespace) -> None:
"""Command to execute shell commands in response to file system events."""
from watchdog.tricks import ShellCommandTrick
if not args.command:
args.command = None
observer_cls: ObserverType
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver
observer_cls = PollingObserver
else:
from watchdog.observers import Observer
observer_cls = Observer
patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns)
handler = ShellCommandTrick(
args.command,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories,
wait_for_process=args.wait_for_process,
drop_during_process=args.drop_during_process,
)
observer = observer_cls(timeout=args.timeout)
observe_with(observer, handler, args.directories, recursive=args.recursive)
@command(
[
argument("command", help="Long-running command to run in a subprocess."),
argument(
"command_args",
metavar="arg",
nargs="*",
help="""
Command arguments.
Note: Use -- before the command arguments, otherwise watchmedo will
try to interpret them.
""",
),
argument(
"-d",
"--directory",
dest="directories",
metavar="DIRECTORY",
action="append",
help="Directory to watch. Use another -d or --directory option for each directory.",
),
argument(
"-p",
"--pattern",
"--patterns",
dest="patterns",
default="*",
help="Matches event paths with these patterns (separated by ;).",
),
argument(
"-i",
"--ignore-pattern",
"--ignore-patterns",
dest="ignore_patterns",
default="",
help="Ignores event paths with these patterns (separated by ;).",
),
argument(
"-D",
"--ignore-directories",
dest="ignore_directories",
default=False,
action="store_true",
help="Ignores events for directories.",
),
argument(
"-R",
"--recursive",
dest="recursive",
action="store_true",
help="Monitors the directories recursively.",
),
argument(
"--interval",
"--timeout",
dest="timeout",
default=1.0,
type=float,
help="Use this as the polling interval/blocking timeout.",
),
argument(
"--signal",
dest="signal",
default="SIGINT",
help="Stop the subprocess with this signal (default SIGINT).",
),
argument("--debug-force-polling", action="store_true", help="[debug] Forces polling."),
argument(
"--kill-after",
dest="kill_after",
default=10.0,
type=float,
help="When stopping, kill the subprocess after the specified timeout in seconds (default 10.0).",
),
argument(
"--debounce-interval",
dest="debounce_interval",
default=0.0,
type=float,
help="After a file change, Wait until the specified interval (in "
"seconds) passes with no file changes, and only then restart.",
),
argument(
"--no-restart-on-command-exit",
dest="restart_on_command_exit",
default=True,
action="store_false",
help="Don't auto-restart the command after it exits.",
),
],
)
def auto_restart(args: Namespace) -> None:
"""Command to start a long-running subprocess and restart it on matched events."""
observer_cls: ObserverType
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver
observer_cls = PollingObserver
else:
from watchdog.observers import Observer
observer_cls = Observer
import signal
from watchdog.tricks import AutoRestartTrick
if not args.directories:
args.directories = ["."]
# Allow either signal name or number.
stop_signal = getattr(signal, args.signal) if args.signal.startswith("SIG") else int(args.signal)
# Handle termination signals by raising a semantic exception which will
# allow us to gracefully unwind and stop the observer
termination_signals = {signal.SIGTERM, signal.SIGINT}
if hasattr(signal, "SIGHUP"):
termination_signals.add(signal.SIGHUP)
def handler_termination_signal(_signum: signal._SIGNUM, _frame: object) -> None:
# Neuter all signals so that we don't attempt a double shutdown
for signum in termination_signals:
signal.signal(signum, signal.SIG_IGN)
raise WatchdogShutdownError
for signum in termination_signals:
signal.signal(signum, handler_termination_signal)
patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns)
command = [args.command]
command.extend(args.command_args)
handler = AutoRestartTrick(
command,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories,
stop_signal=stop_signal,
kill_after=args.kill_after,
debounce_interval_seconds=args.debounce_interval,
restart_on_command_exit=args.restart_on_command_exit,
)
handler.start()
observer = observer_cls(timeout=args.timeout)
try:
observe_with(observer, handler, args.directories, recursive=args.recursive)
except WatchdogShutdownError:
pass
finally:
handler.stop()
class LogLevelError(Exception):
pass
def _get_log_level_from_args(args: Namespace) -> str:
verbosity = sum(args.verbosity or [])
if verbosity < -1:
error = "-q/--quiet may be specified only once."
raise LogLevelError(error)
if verbosity > 2:
error = "-v/--verbose may be specified up to 2 times."
raise LogLevelError(error)
return ["ERROR", "WARNING", "INFO", "DEBUG"][1 + verbosity]
def main() -> int:
"""Entry-point function."""
args = cli.parse_args()
if args.top_command is None:
cli.print_help()
return 1
try:
log_level = _get_log_level_from_args(args)
except LogLevelError as exc:
print(f"Error: {exc.args[0]}", file=sys.stderr) # noqa:T201
command_parsers[args.top_command].print_help()
return 1
logging.getLogger("watchdog").setLevel(log_level)
try:
args.func(args)
except KeyboardInterrupt:
return 130
return 0
if __name__ == "__main__":
sys.exit(main())