[ty] Sync vendored typeshed stubs (#19334)
Some checks are pending
CI / cargo clippy (push) Blocked by required conditions
CI / Determine changes (push) Waiting to run
CI / cargo fmt (push) Waiting to run
CI / cargo test (linux) (push) Blocked by required conditions
CI / cargo test (linux, release) (push) Blocked by required conditions
CI / cargo test (windows) (push) Blocked by required conditions
CI / cargo test (wasm) (push) Blocked by required conditions
CI / cargo build (release) (push) Waiting to run
CI / cargo build (msrv) (push) Blocked by required conditions
CI / cargo fuzz build (push) Blocked by required conditions
CI / fuzz parser (push) Blocked by required conditions
CI / test scripts (push) Blocked by required conditions
CI / mkdocs (push) Waiting to run
CI / ecosystem (push) Blocked by required conditions
CI / Fuzz for new ty panics (push) Blocked by required conditions
CI / cargo shear (push) Blocked by required conditions
CI / python package (push) Waiting to run
CI / pre-commit (push) Waiting to run
CI / formatter instabilities and black similarity (push) Blocked by required conditions
CI / test ruff-lsp (push) Blocked by required conditions
CI / check playground (push) Blocked by required conditions
CI / benchmarks-instrumented (push) Blocked by required conditions
CI / benchmarks-walltime (push) Blocked by required conditions
[ty Playground] Release / publish (push) Waiting to run

Co-authored-by: typeshedbot <>
Co-authored-by: Alex Waygood <alex.waygood@gmail.com>
This commit is contained in:
github-actions[bot] 2025-07-14 17:34:09 +01:00 committed by GitHub
parent 059e90a98f
commit 4f60f0e925
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
649 changed files with 86333 additions and 8607 deletions

View file

@ -423,14 +423,14 @@ mod tests {
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type-definition]: Type definition
--> stdlib/builtins.pyi:461:7
--> stdlib/builtins.pyi:1120:7
|
459 | def __getitem__(self, key: int, /) -> str | int | None: ...
460 |
461 | class str(Sequence[str]):
1118 | def __getitem__(self, key: int, /) -> str | int | None: ...
1119 |
1120 | class str(Sequence[str]):
| ^^^
462 | @overload
463 | def __new__(cls, object: object = ...) -> Self: ...
1121 | """
1122 | str(object='') -> str
|
info: Source
--> main.py:4:13
@ -452,14 +452,14 @@ mod tests {
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type-definition]: Type definition
--> stdlib/builtins.pyi:461:7
--> stdlib/builtins.pyi:1120:7
|
459 | def __getitem__(self, key: int, /) -> str | int | None: ...
460 |
461 | class str(Sequence[str]):
1118 | def __getitem__(self, key: int, /) -> str | int | None: ...
1119 |
1120 | class str(Sequence[str]):
| ^^^
462 | @overload
463 | def __new__(cls, object: object = ...) -> Self: ...
1121 | """
1122 | str(object='') -> str
|
info: Source
--> main.py:2:22
@ -568,14 +568,14 @@ mod tests {
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type-definition]: Type definition
--> stdlib/builtins.pyi:461:7
--> stdlib/builtins.pyi:1120:7
|
459 | def __getitem__(self, key: int, /) -> str | int | None: ...
460 |
461 | class str(Sequence[str]):
1118 | def __getitem__(self, key: int, /) -> str | int | None: ...
1119 |
1120 | class str(Sequence[str]):
| ^^^
462 | @overload
463 | def __new__(cls, object: object = ...) -> Self: ...
1121 | """
1122 | str(object='') -> str
|
info: Source
--> main.py:4:18
@ -601,16 +601,16 @@ mod tests {
// TODO: This should jump to `str` and not `int` because
// the keyword is typed as a string. It's only the passed argument that
// is an int. Navigating to `str` would match pyright's behavior.
assert_snapshot!(test.goto_type_definition(), @r"
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type-definition]: Type definition
--> stdlib/builtins.pyi:244:7
--> stdlib/builtins.pyi:365:7
|
242 | _LiteralInteger = _PositiveInteger | _NegativeInteger | Literal[0] # noqa: Y026 # TODO: Use TypeAlias once mypy bugs are fixed
243 |
244 | class int:
363 | _LiteralInteger = _PositiveInteger | _NegativeInteger | Literal[0] # noqa: Y026 # TODO: Use TypeAlias once mypy bugs are fixed
364 |
365 | class int:
| ^^^
245 | @overload
246 | def __new__(cls, x: ConvertibleToInt = ..., /) -> Self: ...
366 | """
367 | int([x]) -> integer
|
info: Source
--> main.py:4:18
@ -620,7 +620,7 @@ mod tests {
4 | test(a= 123)
| ^
|
");
"#);
}
#[test]
@ -637,14 +637,14 @@ f(**kwargs<CURSOR>)
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type-definition]: Type definition
--> stdlib/builtins.pyi:1136:7
--> stdlib/builtins.pyi:3458:7
|
1134 | def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
1135 |
1136 | class dict(MutableMapping[_KT, _VT]):
3456 | """
3457 |
3458 | class dict(MutableMapping[_KT, _VT]):
| ^^^^
1137 | # __init__ should be kept roughly in line with `collections.UserDict.__init__`, which has similar semantics
1138 | # Also multiprocessing.managers.SyncManager.dict()
3459 | """
3460 | dict() -> new empty dictionary
|
info: Source
--> main.py:6:5
@ -666,16 +666,16 @@ f(**kwargs<CURSOR>)
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type-definition]: Type definition
--> stdlib/builtins.pyi:461:7
--> stdlib/builtins.pyi:1120:7
|
459 | def __getitem__(self, key: int, /) -> str | int | None: ...
460 |
461 | class str(Sequence[str]):
1118 | def __getitem__(self, key: int, /) -> str | int | None: ...
1119 |
1120 | class str(Sequence[str]):
| ^^^
462 | @overload
463 | def __new__(cls, object: object = ...) -> Self: ...
1121 | """
1122 | str(object='') -> str
|
info: Source
--> main.py:3:17
@ -684,7 +684,7 @@ f(**kwargs<CURSOR>)
3 | a
| ^
|
");
"#);
}
#[test]
@ -759,16 +759,16 @@ f(**kwargs<CURSOR>)
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type-definition]: Type definition
--> stdlib/builtins.pyi:461:7
--> stdlib/builtins.pyi:1120:7
|
459 | def __getitem__(self, key: int, /) -> str | int | None: ...
460 |
461 | class str(Sequence[str]):
1118 | def __getitem__(self, key: int, /) -> str | int | None: ...
1119 |
1120 | class str(Sequence[str]):
| ^^^
462 | @overload
463 | def __new__(cls, object: object = ...) -> Self: ...
1121 | """
1122 | str(object='') -> str
|
info: Source
--> main.py:4:27
@ -778,7 +778,7 @@ f(**kwargs<CURSOR>)
4 | print(a)
| ^
|
");
"#);
}
#[test]
@ -790,15 +790,16 @@ f(**kwargs<CURSOR>)
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
assert_snapshot!(test.goto_type_definition(), @r#"
info[goto-type-definition]: Type definition
--> stdlib/types.pyi:691:11
--> stdlib/types.pyi:1073:11
|
689 | if sys.version_info >= (3, 10):
690 | @final
691 | class NoneType:
1071 | if sys.version_info >= (3, 10):
1072 | @final
1073 | class NoneType:
| ^^^^^^^^
692 | def __bool__(self) -> Literal[False]: ...
1074 | """
1075 | The type of the None singleton.
|
info: Source
--> main.py:3:17
@ -809,14 +810,14 @@ f(**kwargs<CURSOR>)
|
info[goto-type-definition]: Type definition
--> stdlib/builtins.pyi:461:7
--> stdlib/builtins.pyi:1120:7
|
459 | def __getitem__(self, key: int, /) -> str | int | None: ...
460 |
461 | class str(Sequence[str]):
1118 | def __getitem__(self, key: int, /) -> str | int | None: ...
1119 |
1120 | class str(Sequence[str]):
| ^^^
462 | @overload
463 | def __new__(cls, object: object = ...) -> Self: ...
1121 | """
1122 | str(object='') -> str
|
info: Source
--> main.py:3:17
@ -825,7 +826,7 @@ f(**kwargs<CURSOR>)
3 | a
| ^
|
");
"#);
}
impl CursorTest {

View file

@ -29,16 +29,16 @@ error[invalid-argument-type]: Argument to function `loads` is incorrect
| ^ Expected `str | bytes | bytearray`, found `Literal[5]`
|
info: Function defined here
--> stdlib/json/__init__.pyi:39:5
--> stdlib/json/__init__.pyi:221:5
|
37 | **kwds: Any,
38 | ) -> None: ...
39 | def loads(
219 | """
220 |
221 | def loads(
| ^^^^^
40 | s: str | bytes | bytearray,
222 | s: str | bytes | bytearray,
| -------------------------- Parameter declared here
41 | *,
42 | cls: type[JSONDecoder] | None = None,
223 | *,
224 | cls: type[JSONDecoder] | None = None,
|
info: rule `invalid-argument-type` is enabled by default

View file

@ -1 +1 @@
f64707592dd3c32f756ddeebd012acb2b072aa0d
997284534f8be3159aa56c7d102ada07c2ff6f48

View file

@ -1,11 +1,73 @@
"""
Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped or that the release version is undetermined.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/cpython/compile.h.
No feature line is ever to be deleted from this file.
"""
from typing_extensions import TypeAlias
_VersionInfo: TypeAlias = tuple[int, int, int, str, int]
class _Feature:
def __init__(self, optionalRelease: _VersionInfo, mandatoryRelease: _VersionInfo | None, compiler_flag: int) -> None: ...
def getOptionalRelease(self) -> _VersionInfo: ...
def getMandatoryRelease(self) -> _VersionInfo | None: ...
def getOptionalRelease(self) -> _VersionInfo:
"""
Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
def getMandatoryRelease(self) -> _VersionInfo | None:
"""
Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, or the release date is undetermined, is None.
"""
compiler_flag: int
absolute_import: _Feature

View file

@ -1,3 +1,7 @@
"""
Accelerator module for asyncio
"""
import sys
from asyncio.events import AbstractEventLoop
from collections.abc import Awaitable, Callable, Coroutine, Generator
@ -11,6 +15,21 @@ _T_co = TypeVar("_T_co", covariant=True)
_TaskYieldType: TypeAlias = Future[object] | None
class Future(Awaitable[_T]):
"""
This class is *almost* compatible with concurrent.futures.Future.
Differences:
- result() and exception() do not take a timeout argument and
raise an exception when the future isn't done yet.
- Callbacks registered with add_done_callback() are always called
via the event loop's call_soon_threadsafe().
- This class is not compatible with the wait() and as_completed()
methods in the concurrent.futures package.
"""
_state: str
@property
def _exception(self) -> BaseException | None: ...
@ -21,24 +40,107 @@ class Future(Awaitable[_T]):
def _log_traceback(self, val: Literal[False]) -> None: ...
_asyncio_future_blocking: bool # is a part of duck-typing contract for `Future`
def __init__(self, *, loop: AbstractEventLoop | None = ...) -> None: ...
def __del__(self) -> None: ...
def get_loop(self) -> AbstractEventLoop: ...
def __del__(self) -> None:
"""
Called when the instance is about to be destroyed.
"""
def get_loop(self) -> AbstractEventLoop:
"""
Return the event loop the Future is bound to.
"""
@property
def _callbacks(self) -> list[tuple[Callable[[Self], Any], Context]]: ...
def add_done_callback(self, fn: Callable[[Self], object], /, *, context: Context | None = None) -> None: ...
def cancel(self, msg: Any | None = None) -> bool: ...
def cancelled(self) -> bool: ...
def done(self) -> bool: ...
def result(self) -> _T: ...
def exception(self) -> BaseException | None: ...
def remove_done_callback(self, fn: Callable[[Self], object], /) -> int: ...
def set_result(self, result: _T, /) -> None: ...
def set_exception(self, exception: type | BaseException, /) -> None: ...
def __iter__(self) -> Generator[Any, None, _T]: ...
def __await__(self) -> Generator[Any, None, _T]: ...
def add_done_callback(self, fn: Callable[[Self], object], /, *, context: Context | None = None) -> None:
"""
Add a callback to be run when the future becomes done.
The callback is called with a single argument - the future object. If
the future is already done when this is called, the callback is
scheduled with call_soon.
"""
def cancel(self, msg: Any | None = None) -> bool:
"""
Cancel the future and schedule callbacks.
If the future is already done or cancelled, return False. Otherwise,
change the future's state to cancelled, schedule the callbacks and
return True.
"""
def cancelled(self) -> bool:
"""
Return True if the future was cancelled.
"""
def done(self) -> bool:
"""
Return True if the future is done.
Done means either that a result / exception are available, or that the
future was cancelled.
"""
def result(self) -> _T:
"""
Return the result this future represents.
If the future has been cancelled, raises CancelledError. If the
future's result isn't yet available, raises InvalidStateError. If
the future is done and has an exception set, this exception is raised.
"""
def exception(self) -> BaseException | None:
"""
Return the exception that was set on this future.
The exception (or None if no exception was set) is returned only if
the future is done. If the future has been cancelled, raises
CancelledError. If the future isn't done yet, raises
InvalidStateError.
"""
def remove_done_callback(self, fn: Callable[[Self], object], /) -> int:
"""
Remove all instances of a callback from the "call when done" list.
Returns the number of callbacks removed.
"""
def set_result(self, result: _T, /) -> None:
"""
Mark the future done and set its result.
If the future is already done when this method is called, raises
InvalidStateError.
"""
def set_exception(self, exception: type | BaseException, /) -> None:
"""
Mark the future done and set an exception.
If the future is already done when this method is called, raises
InvalidStateError.
"""
def __iter__(self) -> Generator[Any, None, _T]:
"""
Implement iter(self).
"""
def __await__(self) -> Generator[Any, None, _T]:
"""
Return an iterator to be used in await expression.
"""
@property
def _loop(self) -> AbstractEventLoop: ...
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
def __class_getitem__(cls, item: Any, /) -> GenericAlias:
"""
See PEP 585
"""
if sys.version_info >= (3, 12):
_TaskCompatibleCoro: TypeAlias = Coroutine[Any, Any, _T_co]
@ -50,6 +152,10 @@ else:
# since the only reason why `asyncio.Future` is invariant is the `set_result()` method,
# and `asyncio.Task.set_result()` always raises.
class Task(Future[_T_co]): # type: ignore[type-var] # pyright: ignore[reportInvalidTypeArguments]
"""
A coroutine wrapped in a Future.
"""
if sys.version_info >= (3, 12):
def __init__(
self,
@ -84,27 +190,144 @@ class Task(Future[_T_co]): # type: ignore[type-var] # pyright: ignore[reportIn
if sys.version_info >= (3, 12):
def get_context(self) -> Context: ...
def get_stack(self, *, limit: int | None = None) -> list[FrameType]: ...
def print_stack(self, *, limit: int | None = None, file: TextIO | None = None) -> None: ...
def get_stack(self, *, limit: int | None = None) -> list[FrameType]:
"""
Return the list of stack frames for this task's coroutine.
If the coroutine is not done, this returns the stack where it is
suspended. If the coroutine has completed successfully or was
cancelled, this returns an empty list. If the coroutine was
terminated by an exception, this returns the list of traceback
frames.
The frames are always ordered from oldest to newest.
The optional limit gives the maximum number of frames to
return; by default all available frames are returned. Its
meaning differs depending on whether a stack or a traceback is
returned: the newest frames of a stack are returned, but the
oldest frames of a traceback are returned. (This matches the
behavior of the traceback module.)
For reasons beyond our control, only one stack frame is
returned for a suspended coroutine.
"""
def print_stack(self, *, limit: int | None = None, file: TextIO | None = None) -> None:
"""
Print the stack or traceback for this task's coroutine.
This produces output similar to that of the traceback module,
for the frames retrieved by get_stack(). The limit argument
is passed to get_stack(). The file argument is an I/O stream
to which the output is written; by default output is written
to sys.stderr.
"""
if sys.version_info >= (3, 11):
def cancelling(self) -> int: ...
def uncancel(self) -> int: ...
def cancelling(self) -> int:
"""
Return the count of the task's cancellation requests.
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
This count is incremented when .cancel() is called
and may be decremented using .uncancel().
"""
def get_event_loop() -> AbstractEventLoop: ...
def get_running_loop() -> AbstractEventLoop: ...
def _set_running_loop(loop: AbstractEventLoop | None, /) -> None: ...
def _get_running_loop() -> AbstractEventLoop: ...
def _register_task(task: Task[Any]) -> None: ...
def _unregister_task(task: Task[Any]) -> None: ...
def _enter_task(loop: AbstractEventLoop, task: Task[Any]) -> None: ...
def _leave_task(loop: AbstractEventLoop, task: Task[Any]) -> None: ...
def uncancel(self) -> int:
"""
Decrement the task's count of cancellation requests.
This should be used by tasks that catch CancelledError
and wish to continue indefinitely until they are cancelled again.
Returns the remaining number of cancellation requests.
"""
def __class_getitem__(cls, item: Any, /) -> GenericAlias:
"""
See PEP 585
"""
def get_event_loop() -> AbstractEventLoop:
"""
Return an asyncio event loop.
When called from a coroutine or a callback (e.g. scheduled with
call_soon or similar API), this function will always return the
running event loop.
If there is no running event loop set, the function will return
the result of `get_event_loop_policy().get_event_loop()` call.
"""
def get_running_loop() -> AbstractEventLoop:
"""
Return the running event loop. Raise a RuntimeError if there is none.
This function is thread-specific.
"""
def _set_running_loop(loop: AbstractEventLoop | None, /) -> None:
"""
Set the running event loop.
This is a low-level function intended to be used by event loops.
This function is thread-specific.
"""
def _get_running_loop() -> AbstractEventLoop:
"""
Return the running event loop or None.
This is a low-level function intended to be used by event loops.
This function is thread-specific.
"""
def _register_task(task: Task[Any]) -> None:
"""
Register a new task in asyncio as executed by loop.
Returns None.
"""
def _unregister_task(task: Task[Any]) -> None:
"""
Unregister a task.
Returns None.
"""
def _enter_task(loop: AbstractEventLoop, task: Task[Any]) -> None:
"""
Enter into task execution or resume suspended task.
Task belongs to loop.
Returns None.
"""
def _leave_task(loop: AbstractEventLoop, task: Task[Any]) -> None:
"""
Leave task execution or suspend a task.
Task belongs to loop.
Returns None.
"""
if sys.version_info >= (3, 12):
def current_task(loop: AbstractEventLoop | None = None) -> Task[Any] | None: ...
def current_task(loop: AbstractEventLoop | None = None) -> Task[Any] | None:
"""
Return a currently executed task.
"""
if sys.version_info >= (3, 14):
def future_discard_from_awaited_by(future: Future[Any], waiter: Future[Any], /) -> None: ...
def future_add_to_awaited_by(future: Future[Any], waiter: Future[Any], /) -> None: ...
def all_tasks(loop: AbstractEventLoop | None = None) -> set[Task[Any]]: ...
def future_add_to_awaited_by(future: Future[Any], waiter: Future[Any], /) -> None:
"""
Record that `fut` is awaited on by `waiter`.
"""
def all_tasks(loop: AbstractEventLoop | None = None) -> set[Task[Any]]:
"""
Return a set of all tasks for the loop.
"""

View file

@ -1,3 +1,12 @@
"""
Bisection algorithms.
This module provides support for maintaining a list in sorted order without
having to sort the list after each insertion. For long lists of items with
expensive comparison operations, this can be an improvement over the more
common approach.
"""
import sys
from _typeshed import SupportsLenAndGetItem, SupportsRichComparisonT
from collections.abc import Callable, MutableSequence
@ -14,7 +23,20 @@ if sys.version_info >= (3, 10):
hi: int | None = None,
*,
key: None = None,
) -> int: ...
) -> int:
"""
Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(i, x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
A custom key function can be supplied to customize the sort order.
"""
@overload
def bisect_left(
a: SupportsLenAndGetItem[_T],
@ -32,7 +54,20 @@ if sys.version_info >= (3, 10):
hi: int | None = None,
*,
key: None = None,
) -> int: ...
) -> int:
"""
Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(i, x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
A custom key function can be supplied to customize the sort order.
"""
@overload
def bisect_right(
a: SupportsLenAndGetItem[_T],
@ -50,7 +85,18 @@ if sys.version_info >= (3, 10):
hi: int | None = None,
*,
key: None = None,
) -> None: ...
) -> None:
"""
Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
A custom key function can be supplied to customize the sort order.
"""
@overload
def insort_left(
a: MutableSequence[_T], x: _T, lo: int = 0, hi: int | None = None, *, key: Callable[[_T], SupportsRichComparisonT]
@ -63,7 +109,18 @@ if sys.version_info >= (3, 10):
hi: int | None = None,
*,
key: None = None,
) -> None: ...
) -> None:
"""
Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
A custom key function can be supplied to customize the sort order.
"""
@overload
def insort_right(
a: MutableSequence[_T], x: _T, lo: int = 0, hi: int | None = None, *, key: Callable[[_T], SupportsRichComparisonT]
@ -72,13 +129,52 @@ if sys.version_info >= (3, 10):
else:
def bisect_left(
a: SupportsLenAndGetItem[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None
) -> int: ...
) -> int:
"""
Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, i points just
before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
def bisect_right(
a: SupportsLenAndGetItem[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None
) -> int: ...
) -> int:
"""
Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, i points just
beyond the rightmost x already there
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
def insort_left(
a: MutableSequence[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None
) -> None: ...
) -> None:
"""
Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
def insort_right(
a: MutableSequence[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None
) -> None: ...
) -> None:
"""
Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""

View file

@ -1,3 +1,7 @@
"""
_blake2b provides BLAKE2b for hashlib
"""
from _typeshed import ReadableBuffer
from typing import ClassVar, final
from typing_extensions import Self
@ -13,6 +17,10 @@ BLAKE2S_SALT_SIZE: int = 8
@final
class blake2b:
"""
Return a new BLAKE2b hash object.
"""
MAX_DIGEST_SIZE: ClassVar[int] = 64
MAX_KEY_SIZE: ClassVar[int] = 64
PERSON_SIZE: ClassVar[int] = 16
@ -38,13 +46,32 @@ class blake2b:
last_node: bool = False,
usedforsecurity: bool = True,
) -> Self: ...
def copy(self) -> Self: ...
def digest(self) -> bytes: ...
def hexdigest(self) -> str: ...
def update(self, data: ReadableBuffer, /) -> None: ...
def copy(self) -> Self:
"""
Return a copy of the hash object.
"""
def digest(self) -> bytes:
"""
Return the digest value as a bytes object.
"""
def hexdigest(self) -> str:
"""
Return the digest value as a string of hexadecimal digits.
"""
def update(self, data: ReadableBuffer, /) -> None:
"""
Update this hash object's state with the provided bytes-like object.
"""
@final
class blake2s:
"""
Return a new BLAKE2s hash object.
"""
MAX_DIGEST_SIZE: ClassVar[int] = 32
MAX_KEY_SIZE: ClassVar[int] = 32
PERSON_SIZE: ClassVar[int] = 8
@ -70,7 +97,22 @@ class blake2s:
last_node: bool = False,
usedforsecurity: bool = True,
) -> Self: ...
def copy(self) -> Self: ...
def digest(self) -> bytes: ...
def hexdigest(self) -> str: ...
def update(self, data: ReadableBuffer, /) -> None: ...
def copy(self) -> Self:
"""
Return a copy of the hash object.
"""
def digest(self) -> bytes:
"""
Return the digest value as a bytes object.
"""
def hexdigest(self) -> str:
"""
Return the digest value as a string of hexadecimal digits.
"""
def update(self, data: ReadableBuffer, /) -> None:
"""
Update this hash object's state with the provided bytes-like object.
"""

View file

@ -5,20 +5,79 @@ from typing_extensions import Self
@final
class BZ2Compressor:
"""
Create a compressor object for compressing data incrementally.
compresslevel
Compression level, as a number between 1 and 9.
For one-shot compression, use the compress() function instead.
"""
if sys.version_info >= (3, 12):
def __new__(cls, compresslevel: int = 9, /) -> Self: ...
else:
def __init__(self, compresslevel: int = 9, /) -> None: ...
def compress(self, data: ReadableBuffer, /) -> bytes: ...
def flush(self) -> bytes: ...
def compress(self, data: ReadableBuffer, /) -> bytes:
"""
Provide data to the compressor object.
Returns a chunk of compressed data if possible, or b'' otherwise.
When you have finished providing data to the compressor, call the
flush() method to finish the compression process.
"""
def flush(self) -> bytes:
"""
Finish the compression process.
Returns the compressed data left in internal buffers.
The compressor object may not be used after this method is called.
"""
@final
class BZ2Decompressor:
def decompress(self, data: ReadableBuffer, max_length: int = -1) -> bytes: ...
"""
Create a decompressor object for decompressing data incrementally.
For one-shot decompression, use the decompress() function instead.
"""
def decompress(self, data: ReadableBuffer, max_length: int = -1) -> bytes:
"""
Decompress *data*, returning uncompressed data as bytes.
If *max_length* is nonnegative, returns at most *max_length* bytes of
decompressed data. If this limit is reached and further output can be
produced, *self.needs_input* will be set to ``False``. In this case, the next
call to *decompress()* may provide *data* as b'' to obtain more of the output.
If all of the input data was decompressed and returned (either because this
was less than *max_length* bytes, or because *max_length* was negative),
*self.needs_input* will be set to True.
Attempting to decompress data after the end of stream is reached raises an
EOFError. Any data found after the end of the stream is ignored and saved in
the unused_data attribute.
"""
@property
def eof(self) -> bool: ...
def eof(self) -> bool:
"""
True if the end-of-stream marker has been reached.
"""
@property
def needs_input(self) -> bool: ...
def needs_input(self) -> bool:
"""
True if more input is needed before more decompressed data can be produced.
"""
@property
def unused_data(self) -> bytes: ...
def unused_data(self) -> bytes:
"""
Data found after the end of the compressed stream.
"""

View file

@ -16,13 +16,39 @@ _CharMap: TypeAlias = dict[int, int] | _EncodingMap
_Handler: TypeAlias = Callable[[UnicodeError], tuple[str | bytes, int]]
_SearchFunction: TypeAlias = Callable[[str], codecs.CodecInfo | None]
def register(search_function: _SearchFunction, /) -> None: ...
def register(search_function: _SearchFunction, /) -> None:
"""
Register a codec search function.
Search functions are expected to take one argument, the encoding name in
all lower case letters, and either return None, or a tuple of functions
(encoder, decoder, stream_reader, stream_writer) (or a CodecInfo object).
"""
if sys.version_info >= (3, 10):
def unregister(search_function: _SearchFunction, /) -> None: ...
def unregister(search_function: _SearchFunction, /) -> None:
"""
Unregister a codec search function and clear the registry's cache.
def register_error(errors: str, handler: _Handler, /) -> None: ...
def lookup_error(name: str, /) -> _Handler: ...
If the search function is not registered, do nothing.
"""
def register_error(errors: str, handler: _Handler, /) -> None:
"""
Register the specified error handler under the name errors.
handler must be a callable object, that will be called with an exception
instance containing information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple.
"""
def lookup_error(name: str, /) -> _Handler:
"""
lookup_error(errors) -> handler
Return the error handler for the specified error handling name or raise a
LookupError, if no handler exists under this name.
"""
# The type ignore on `encode` and `decode` is to avoid issues with overlapping overloads, for more details, see #300
# https://docs.python.org/3/library/codecs.html#binary-transforms
@ -48,13 +74,33 @@ _BytesToBytesEncoding: TypeAlias = Literal[
_StrToStrEncoding: TypeAlias = Literal["rot13", "rot_13"]
@overload
def encode(obj: ReadableBuffer, encoding: _BytesToBytesEncoding, errors: str = "strict") -> bytes: ...
def encode(obj: ReadableBuffer, encoding: _BytesToBytesEncoding, errors: str = "strict") -> bytes:
"""
Encodes obj using the codec registered for encoding.
The default encoding is 'utf-8'. errors may be given to set a
different error handling scheme. Default is 'strict' meaning that encoding
errors raise a ValueError. Other possible values are 'ignore', 'replace'
and 'backslashreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors.
"""
@overload
def encode(obj: str, encoding: _StrToStrEncoding, errors: str = "strict") -> str: ... # type: ignore[overload-overlap]
@overload
def encode(obj: str, encoding: str = "utf-8", errors: str = "strict") -> bytes: ...
@overload
def decode(obj: ReadableBuffer, encoding: _BytesToBytesEncoding, errors: str = "strict") -> bytes: ... # type: ignore[overload-overlap]
def decode(obj: ReadableBuffer, encoding: _BytesToBytesEncoding, errors: str = "strict") -> bytes: # type: ignore[overload-overlap]
"""
Decodes obj using the codec registered for encoding.
Default encoding is 'utf-8'. errors may be given to set a
different error handling scheme. Default is 'strict' meaning that encoding
errors raise a ValueError. Other possible values are 'ignore', 'replace'
and 'backslashreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors.
"""
@overload
def decode(obj: str, encoding: _StrToStrEncoding, errors: str = "strict") -> str: ...
@ -71,7 +117,11 @@ def decode(
def decode(obj: str, encoding: Literal["hex", "hex_codec"], errors: str = "strict") -> bytes: ...
@overload
def decode(obj: ReadableBuffer, encoding: str = "utf-8", errors: str = "strict") -> str: ...
def lookup(encoding: str, /) -> codecs.CodecInfo: ...
def lookup(encoding: str, /) -> codecs.CodecInfo:
"""
Looks up a codec tuple in the Python codec registry and returns a CodecInfo object.
"""
def charmap_build(map: str, /) -> _CharMap: ...
def ascii_decode(data: ReadableBuffer, errors: str | None = None, /) -> tuple[str, int]: ...
def ascii_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...

View file

@ -1,3 +1,9 @@
"""
Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
import sys
from abc import abstractmethod
from types import MappingProxyType
@ -74,31 +80,55 @@ _VT_co = TypeVar("_VT_co", covariant=True) # Value type covariant containers.
@final
class dict_keys(KeysView[_KT_co], Generic[_KT_co, _VT_co]): # undocumented
def __eq__(self, value: object, /) -> bool: ...
def __reversed__(self) -> Iterator[_KT_co]: ...
def __reversed__(self) -> Iterator[_KT_co]:
"""
Return a reverse iterator over the dict keys.
"""
__hash__: ClassVar[None] # type: ignore[assignment]
if sys.version_info >= (3, 13):
def isdisjoint(self, other: Iterable[_KT_co], /) -> bool: ...
def isdisjoint(self, other: Iterable[_KT_co], /) -> bool:
"""
Return True if the view and the given iterable have a null intersection.
"""
if sys.version_info >= (3, 10):
@property
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ...
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]:
"""
dictionary that this view refers to
"""
@final
class dict_values(ValuesView[_VT_co], Generic[_KT_co, _VT_co]): # undocumented
def __reversed__(self) -> Iterator[_VT_co]: ...
def __reversed__(self) -> Iterator[_VT_co]:
"""
Return a reverse iterator over the dict values.
"""
if sys.version_info >= (3, 10):
@property
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ...
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]:
"""
dictionary that this view refers to
"""
@final
class dict_items(ItemsView[_KT_co, _VT_co]): # undocumented
def __eq__(self, value: object, /) -> bool: ...
def __reversed__(self) -> Iterator[tuple[_KT_co, _VT_co]]: ...
def __reversed__(self) -> Iterator[tuple[_KT_co, _VT_co]]:
"""
Return a reverse iterator over the dict items.
"""
__hash__: ClassVar[None] # type: ignore[assignment]
if sys.version_info >= (3, 13):
def isdisjoint(self, other: Iterable[tuple[_KT_co, _VT_co]], /) -> bool: ...
def isdisjoint(self, other: Iterable[tuple[_KT_co, _VT_co]], /) -> bool:
"""
Return True if the view and the given iterable have a null intersection.
"""
if sys.version_info >= (3, 10):
@property
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ...
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]:
"""
dictionary that this view refers to
"""
if sys.version_info >= (3, 12):
@runtime_checkable

View file

@ -1,3 +1,7 @@
"""
Internal classes used by the gzip, lzma and bz2 modules
"""
# _compression is replaced by compression._common._streams on Python 3.14+ (PEP-784)
from _typeshed import Incomplete, WriteableBuffer
@ -12,9 +16,16 @@ class _Reader(Protocol):
def seekable(self) -> bool: ...
def seek(self, n: int, /) -> Any: ...
class BaseStream(BufferedIOBase): ...
class BaseStream(BufferedIOBase):
"""
Mode-checking helper functions.
"""
class DecompressReader(RawIOBase):
"""
Adapts the decompressor API to a RawIOBase reader API
"""
def __init__(
self,
fp: _Reader,

View file

@ -1,3 +1,7 @@
"""
Context Variables
"""
import sys
from collections.abc import Callable, Iterator, Mapping
from types import GenericAlias, TracebackType
@ -18,14 +22,43 @@ class ContextVar(Generic[_T]):
@property
def name(self) -> str: ...
@overload
def get(self) -> _T: ...
def get(self) -> _T:
"""
Return a value for the context variable for the current context.
If there is no value for the variable in the current context, the method will:
* return the value of the default argument of the method, if provided; or
* return the default value for the context variable, if it was created
with one; or
* raise a LookupError.
"""
@overload
def get(self, default: _T, /) -> _T: ...
@overload
def get(self, default: _D, /) -> _D | _T: ...
def set(self, value: _T, /) -> Token[_T]: ...
def reset(self, token: Token[_T], /) -> None: ...
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
def set(self, value: _T, /) -> Token[_T]:
"""
Call to set a new value for the context variable in the current context.
The required value argument is the new value for the context variable.
Returns a Token object that can be used to restore the variable to its previous
value via the `ContextVar.reset()` method.
"""
def reset(self, token: Token[_T], /) -> None:
"""
Reset the context variable.
The variable is reset to the value it had before the `ContextVar.set()` that
created the token was used.
"""
def __class_getitem__(cls, item: Any, /) -> GenericAlias:
"""
See PEP 585
"""
@final
class Token(Generic[_T]):
@ -35,12 +68,22 @@ class Token(Generic[_T]):
def old_value(self) -> Any: ... # returns either _T or MISSING, but that's hard to express
MISSING: ClassVar[object]
__hash__: ClassVar[None] # type: ignore[assignment]
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
def __class_getitem__(cls, item: Any, /) -> GenericAlias:
"""
See PEP 585
"""
if sys.version_info >= (3, 14):
def __enter__(self) -> Self: ...
def __enter__(self) -> Self:
"""
Enter into Token context manager.
"""
def __exit__(
self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None
) -> None: ...
) -> None:
"""
Exit from Token context manager, restore the linked ContextVar.
"""
def copy_context() -> Context: ...
@ -50,15 +93,37 @@ def copy_context() -> Context: ...
class Context(Mapping[ContextVar[Any], Any]):
def __init__(self) -> None: ...
@overload
def get(self, key: ContextVar[_T], default: None = None, /) -> _T | None: ...
def get(self, key: ContextVar[_T], default: None = None, /) -> _T | None:
"""
Return the value for `key` if `key` has the value in the context object.
If `key` does not exist, return `default`. If `default` is not given,
return None.
"""
@overload
def get(self, key: ContextVar[_T], default: _T, /) -> _T: ...
@overload
def get(self, key: ContextVar[_T], default: _D, /) -> _T | _D: ...
def run(self, callable: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs) -> _T: ...
def copy(self) -> Context: ...
def copy(self) -> Context:
"""
Return a shallow copy of the context object.
"""
__hash__: ClassVar[None] # type: ignore[assignment]
def __getitem__(self, key: ContextVar[_T], /) -> _T: ...
def __iter__(self) -> Iterator[ContextVar[Any]]: ...
def __len__(self) -> int: ...
def __getitem__(self, key: ContextVar[_T], /) -> _T:
"""
Return self[key].
"""
def __iter__(self) -> Iterator[ContextVar[Any]]:
"""
Implement iter(self).
"""
def __len__(self) -> int:
"""
Return len(self).
"""
def __eq__(self, value: object, /) -> bool: ...

View file

@ -1,3 +1,7 @@
"""
CSV parsing and writing.
"""
import csv
import sys
from _typeshed import SupportsWrite
@ -25,6 +29,12 @@ class Error(Exception): ...
_DialectLike: TypeAlias = str | Dialect | csv.Dialect | type[Dialect | csv.Dialect]
class Dialect:
"""
CSV dialect
The Dialect type records CSV parsing and generation options.
"""
delimiter: str
quotechar: str | None
escapechar: str | None
@ -49,22 +59,69 @@ class Dialect:
if sys.version_info >= (3, 10):
# This class calls itself _csv.reader.
class Reader:
"""
CSV reader
Reader objects are responsible for reading and parsing tabular data
in CSV format.
"""
@property
def dialect(self) -> Dialect: ...
line_num: int
def __iter__(self) -> Self: ...
def __next__(self) -> list[str]: ...
def __iter__(self) -> Self:
"""
Implement iter(self).
"""
def __next__(self) -> list[str]:
"""
Implement next(self).
"""
# This class calls itself _csv.writer.
class Writer:
"""
CSV writer
Writer objects are responsible for generating tabular data
in CSV format from sequence input.
"""
@property
def dialect(self) -> Dialect: ...
if sys.version_info >= (3, 13):
def writerow(self, row: Iterable[Any], /) -> Any: ...
def writerows(self, rows: Iterable[Iterable[Any]], /) -> None: ...
def writerow(self, row: Iterable[Any], /) -> Any:
"""
writerow(iterable)
Construct and write a CSV record from an iterable of fields. Non-string
elements will be converted to string.
"""
def writerows(self, rows: Iterable[Iterable[Any]], /) -> None:
"""
writerows(iterable of iterables)
Construct and write a series of iterables to a csv file. Non-string
elements will be converted to string.
"""
else:
def writerow(self, row: Iterable[Any]) -> Any: ...
def writerows(self, rows: Iterable[Iterable[Any]]) -> None: ...
def writerow(self, row: Iterable[Any]) -> Any:
"""
writerow(iterable)
Construct and write a CSV record from an iterable of fields. Non-string
elements will be converted to string.
"""
def writerows(self, rows: Iterable[Iterable[Any]]) -> None:
"""
writerows(iterable of iterables)
Construct and write a series of iterables to a csv file. Non-string
elements will be converted to string.
"""
# For the return types below.
# These aliases can be removed when typeshed drops support for 3.9.
@ -101,7 +158,22 @@ def writer(
lineterminator: str = "\r\n",
quoting: _QuotingType = 0,
strict: bool = False,
) -> _writer: ...
) -> _writer:
"""
csv_writer = csv.writer(fileobj [, dialect='excel']
[optional keyword args])
for row in sequence:
csv_writer.writerow(row)
[or]
csv_writer = csv.writer(fileobj [, dialect='excel']
[optional keyword args])
csv_writer.writerows(rows)
The "fileobj" argument can be any object that supports the file API.
"""
def reader(
csvfile: Iterable[str],
/,
@ -115,7 +187,23 @@ def reader(
lineterminator: str = "\r\n",
quoting: _QuotingType = 0,
strict: bool = False,
) -> _reader: ...
) -> _reader:
"""
csv_reader = reader(iterable [, dialect='excel']
[optional keyword args])
for row in csv_reader:
process(row)
The "iterable" argument can be any object that returns a line
of input for each iteration, such as a file object or a list. The
optional "dialect" parameter is discussed below. The function
also accepts optional keyword arguments which override settings
provided by the dialect.
The returned object is an iterator. Each iteration returns a row
of the CSV file (which can span multiple input lines).
"""
def register_dialect(
name: str,
dialect: type[Dialect | csv.Dialect] = ...,
@ -128,8 +216,39 @@ def register_dialect(
lineterminator: str = "\r\n",
quoting: _QuotingType = 0,
strict: bool = False,
) -> None: ...
def unregister_dialect(name: str) -> None: ...
def get_dialect(name: str) -> Dialect: ...
def list_dialects() -> list[str]: ...
def field_size_limit(new_limit: int = ...) -> int: ...
) -> None:
"""
Create a mapping from a string name to a dialect class.
dialect = csv.register_dialect(name[, dialect[, **fmtparams]])
"""
def unregister_dialect(name: str) -> None:
"""
Delete the name/dialect mapping associated with a string name.
csv.unregister_dialect(name)
"""
def get_dialect(name: str) -> Dialect:
"""
Return the dialect instance associated with name.
dialect = csv.get_dialect(name)
"""
def list_dialects() -> list[str]:
"""
Return a list of all known dialect names.
names = csv.list_dialects()
"""
def field_size_limit(new_limit: int = ...) -> int:
"""
Sets an upper limit on parsed fields.
csv.field_size_limit([limit])
Returns old limit. If limit is not given, no new limit is set and
the old limit is returned
"""

View file

@ -1,3 +1,7 @@
"""
Create and manipulate C compatible data types in Python.
"""
import _typeshed
import sys
from _typeshed import ReadableBuffer, StrOrBytesPath, WriteableBuffer
@ -47,10 +51,20 @@ if sys.platform == "win32":
def FreeLibrary(handle: int, /) -> None: ...
else:
def dlclose(handle: int, /) -> None: ...
def dlclose(handle: int, /) -> None:
"""
dlclose a library
"""
# The default for flag is RTLD_GLOBAL|RTLD_LOCAL, which is platform dependent.
def dlopen(name: StrOrBytesPath, flag: int = ..., /) -> int: ...
def dlsym(handle: int, name: str, /) -> int: ...
def dlopen(name: StrOrBytesPath, flag: int = ..., /) -> int:
"""
dlopen(name, flag={RTLD_GLOBAL|RTLD_LOCAL}) open a shared library
"""
def dlsym(handle: int, name: str, /) -> int:
"""
find symbol in shared library
"""
if sys.version_info >= (3, 13):
# This class is not exposed. It calls itself _ctypes.CType_Type.
@ -97,6 +111,10 @@ class _PyCSimpleType(_CTypeBaseType):
def __rmul__(self: type[_CT], value: int, /) -> type[Array[_CT]]: ... # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
class _SimpleCData(_CData, Generic[_T], metaclass=_PyCSimpleType):
"""
XXX to be provided
"""
value: _T
# The TypeVar can be unsolved here,
# but we can't use overloads without creating many, many mypy false-positive errors
@ -121,6 +139,10 @@ class _PyCPointerType(_CTypeBaseType):
def __rmul__(cls: type[_CT], other: int) -> type[Array[_CT]]: ... # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
class _Pointer(_PointerLike, _CData, Generic[_CT], metaclass=_PyCPointerType):
"""
XXX to be provided
"""
_type_: type[_CT]
contents: _CT
@overload
@ -128,17 +150,41 @@ class _Pointer(_PointerLike, _CData, Generic[_CT], metaclass=_PyCPointerType):
@overload
def __init__(self, arg: _CT) -> None: ...
@overload
def __getitem__(self, key: int, /) -> Any: ...
def __getitem__(self, key: int, /) -> Any:
"""
Return self[key].
"""
@overload
def __getitem__(self, key: slice, /) -> list[Any]: ...
def __setitem__(self, key: int, value: Any, /) -> None: ...
def __setitem__(self, key: int, value: Any, /) -> None:
"""
Set self[key] to value.
"""
if sys.version_info < (3, 14):
@overload
def POINTER(type: None, /) -> type[c_void_p]: ...
def POINTER(type: None, /) -> type[c_void_p]:
"""
Create and return a new ctypes pointer type.
type
A ctypes type.
Pointer types are cached and reused internally,
so calling this function repeatedly is cheap.
"""
@overload
def POINTER(type: type[_CT], /) -> type[_Pointer[_CT]]: ...
def pointer(obj: _CT, /) -> _Pointer[_CT]: ...
def pointer(obj: _CT, /) -> _Pointer[_CT]:
"""
Create a new pointer instance, pointing to 'obj'.
The returned object is of the type POINTER(type(obj)). Note that if you
just want to pass a pointer to an object to a foreign function call, you
should use byref(obj) which is much faster.
"""
# This class is not exposed. It calls itself _ctypes.CArgObject.
@final
@ -146,10 +192,18 @@ if sys.version_info < (3, 14):
class _CArgObject: ...
if sys.version_info >= (3, 14):
def byref(obj: _CData | _CDataType, offset: int = 0, /) -> _CArgObject: ...
def byref(obj: _CData | _CDataType, offset: int = 0, /) -> _CArgObject:
"""
Return a pointer lookalike to a C instance, only usable as function argument.
"""
else:
def byref(obj: _CData | _CDataType, offset: int = 0) -> _CArgObject: ...
def byref(obj: _CData | _CDataType, offset: int = 0) -> _CArgObject:
"""
byref(C instance[, offset=0]) -> byref-object
Return a pointer lookalike to a C instance, only usable
as function argument
"""
_ECT: TypeAlias = Callable[[_CData | _CDataType | None, CFuncPtr, tuple[_CData | _CDataType, ...]], _CDataType]
_PF: TypeAlias = tuple[int] | tuple[int, str | None] | tuple[int, str | None, Any]
@ -168,6 +222,10 @@ class _PyCFuncPtrType(_CTypeBaseType):
def __rmul__(cls: type[_CT], other: int) -> type[Array[_CT]]: ... # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
class CFuncPtr(_PointerLike, _CData, metaclass=_PyCFuncPtrType):
"""
Function Pointer
"""
restype: type[_CDataType] | Callable[[int], Any] | None
argtypes: Sequence[type[_CDataType]]
errcheck: _ECT
@ -187,7 +245,10 @@ class CFuncPtr(_PointerLike, _CData, metaclass=_PyCFuncPtrType):
cls, vtbl_index: int, name: str, paramflags: tuple[_PF, ...] | None = ..., iid: _CData | _CDataType | None = ..., /
) -> Self: ...
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
def __call__(self, *args: Any, **kwargs: Any) -> Any:
"""
Call self as a function.
"""
_GetT = TypeVar("_GetT")
_SetT = TypeVar("_SetT")
@ -229,6 +290,10 @@ class _UnionType(_CTypeBaseType):
def __rmul__(cls: type[_CT], other: int) -> type[Array[_CT]]: ... # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
class Union(_CData, metaclass=_UnionType):
"""
Union base class
"""
_fields_: ClassVar[Sequence[tuple[str, type[_CDataType]] | tuple[str, type[_CDataType], int]]]
_pack_: ClassVar[int]
_anonymous_: ClassVar[Sequence[str]]
@ -257,6 +322,10 @@ class _PyCStructType(_CTypeBaseType):
def __rmul__(cls: type[_CT], other: int) -> type[Array[_CT]]: ... # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
class Structure(_CData, metaclass=_PyCStructType):
"""
Structure base class
"""
_fields_: ClassVar[Sequence[tuple[str, type[_CDataType]] | tuple[str, type[_CDataType], int]]]
_pack_: ClassVar[int]
_anonymous_: ClassVar[Sequence[str]]
@ -281,6 +350,16 @@ class _PyCArrayType(_CTypeBaseType):
def __rmul__(cls: type[_CT], other: int) -> type[Array[_CT]]: ... # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
class Array(_CData, Generic[_CT], metaclass=_PyCArrayType):
"""
Abstract base class for arrays.
The recommended way to create concrete array types is by multiplying any
ctypes data type with a non-negative integer. Alternatively, you can subclass
this type and define _length_ and _type_ class variables. Array elements can
be read and written using standard subscript and slice accesses for slice
reads, the resulting object is not itself an Array.
"""
@property
@abstractmethod
def _length_(self) -> int: ...
@ -311,28 +390,65 @@ class Array(_CData, Generic[_CT], metaclass=_PyCArrayType):
# the array element type would belong are annotated with Any instead.
def __init__(self, *args: Any) -> None: ...
@overload
def __getitem__(self, key: int, /) -> Any: ...
def __getitem__(self, key: int, /) -> Any:
"""
Return self[key].
"""
@overload
def __getitem__(self, key: slice, /) -> list[Any]: ...
@overload
def __setitem__(self, key: int, value: Any, /) -> None: ...
def __setitem__(self, key: int, value: Any, /) -> None:
"""
Set self[key] to value.
"""
@overload
def __setitem__(self, key: slice, value: Iterable[Any], /) -> None: ...
def __iter__(self) -> Iterator[Any]: ...
# Can't inherit from Sized because the metaclass conflict between
# Sized and _CData prevents using _CDataMeta.
def __len__(self) -> int: ...
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
def __len__(self) -> int:
"""
Return len(self).
"""
def __class_getitem__(cls, item: Any, /) -> GenericAlias:
"""
See PEP 585
"""
def addressof(obj: _CData | _CDataType, /) -> int:
"""
Return the address of the C instance internal buffer
"""
def alignment(obj_or_type: _CData | _CDataType | type[_CData | _CDataType], /) -> int:
"""
alignment(C type) -> integer
alignment(C instance) -> integer
Return the alignment requirements of a C instance
"""
def addressof(obj: _CData | _CDataType, /) -> int: ...
def alignment(obj_or_type: _CData | _CDataType | type[_CData | _CDataType], /) -> int: ...
def get_errno() -> int: ...
def resize(obj: _CData | _CDataType, size: int, /) -> None: ...
def resize(obj: _CData | _CDataType, size: int, /) -> None:
"""
Resize the memory buffer of a ctypes instance
"""
def set_errno(value: int, /) -> int: ...
def sizeof(obj_or_type: _CData | _CDataType | type[_CData | _CDataType], /) -> int: ...
def sizeof(obj_or_type: _CData | _CDataType | type[_CData | _CDataType], /) -> int:
"""
Return the size in bytes of a C instance.
"""
def PyObj_FromPtr(address: int, /) -> Any: ...
def Py_DECREF(o: _T, /) -> _T: ...
def Py_INCREF(o: _T, /) -> _T: ...
def buffer_info(o: _CData | _CDataType | type[_CData | _CDataType], /) -> tuple[str, int, tuple[int, ...]]: ...
def buffer_info(o: _CData | _CDataType | type[_CData | _CDataType], /) -> tuple[str, int, tuple[int, ...]]:
"""
Return buffer interface information
"""
def call_cdeclfunction(address: int, arguments: tuple[Any, ...], /) -> Any: ...
def call_function(address: int, arguments: tuple[Any, ...], /) -> Any: ...

File diff suppressed because it is too large Load diff

View file

@ -8,20 +8,86 @@ class error(Exception): ...
@final
class panel:
def above(self) -> panel: ...
def below(self) -> panel: ...
def bottom(self) -> None: ...
def hidden(self) -> bool: ...
def hide(self) -> None: ...
def move(self, y: int, x: int, /) -> None: ...
def replace(self, win: window, /) -> None: ...
def set_userptr(self, obj: object, /) -> None: ...
def show(self) -> None: ...
def top(self) -> None: ...
def userptr(self) -> object: ...
def window(self) -> window: ...
def above(self) -> panel:
"""
Return the panel above the current panel.
"""
def bottom_panel() -> panel: ...
def new_panel(win: window, /) -> panel: ...
def top_panel() -> panel: ...
def update_panels() -> panel: ...
def below(self) -> panel:
"""
Return the panel below the current panel.
"""
def bottom(self) -> None:
"""
Push the panel to the bottom of the stack.
"""
def hidden(self) -> bool:
"""
Return True if the panel is hidden (not visible), False otherwise.
"""
def hide(self) -> None:
"""
Hide the panel.
This does not delete the object, it just makes the window on screen invisible.
"""
def move(self, y: int, x: int, /) -> None:
"""
Move the panel to the screen coordinates (y, x).
"""
def replace(self, win: window, /) -> None:
"""
Change the window associated with the panel to the window win.
"""
def set_userptr(self, obj: object, /) -> None:
"""
Set the panel's user pointer to obj.
"""
def show(self) -> None:
"""
Display the panel (which might have been hidden).
"""
def top(self) -> None:
"""
Push panel to the top of the stack.
"""
def userptr(self) -> object:
"""
Return the user pointer for the panel.
"""
def window(self) -> window:
"""
Return the window object associated with the panel.
"""
def bottom_panel() -> panel:
"""
Return the bottom panel in the panel stack.
"""
def new_panel(win: window, /) -> panel:
"""
Return a panel object, associating it with the given window win.
"""
def top_panel() -> panel:
"""
Return the top panel in the panel stack.
"""
def update_panels() -> panel:
"""
Updates the virtual screen after changes in the panel stack.
This does not call curses.doupdate(), so you'll have to do this yourself.
"""

View file

@ -39,6 +39,28 @@ if sys.platform != "win32":
__init__: None # type: ignore[assignment]
if sys.version_info >= (3, 11):
def open(filename: StrOrBytesPath, flags: str = "r", mode: int = 0o666, /) -> _dbm: ...
def open(filename: StrOrBytesPath, flags: str = "r", mode: int = 0o666, /) -> _dbm:
"""
Return a database object.
filename
The filename to open.
flags
How to open the file. "r" for reading, "w" for writing, etc.
mode
If creating a new file, the mode bits for the new file
(e.g. os.O_RDWR).
"""
else:
def open(filename: str, flags: str = "r", mode: int = 0o666, /) -> _dbm: ...
def open(filename: str, flags: str = "r", mode: int = 0o666, /) -> _dbm:
"""
Return a database object.
filename
The filename to open.
flags
How to open the file. "r" for reading, "w" for writing, etc.
mode
If creating a new file, the mode bits for the new file
(e.g. os.O_RDWR).
"""

View file

@ -1,3 +1,7 @@
"""
C decimal arithmetic module
"""
import sys
from decimal import (
Clamped as Clamped,
@ -44,8 +48,15 @@ MIN_ETINY: Final[int]
if sys.version_info >= (3, 14):
IEEE_CONTEXT_MAX_BITS: Final[int]
def setcontext(context: Context, /) -> None: ...
def getcontext() -> Context: ...
def setcontext(context: Context, /) -> None:
"""
Set a new default context.
"""
def getcontext() -> Context:
"""
Get the current default context.
"""
if sys.version_info >= (3, 11):
def localcontext(
@ -59,13 +70,30 @@ if sys.version_info >= (3, 11):
clamp: int | None = ...,
traps: dict[_TrapType, bool] | None = ...,
flags: dict[_TrapType, bool] | None = ...,
) -> _ContextManager: ...
) -> _ContextManager:
"""
Return a context manager that will set the default context to a copy of ctx
on entry to the with-statement and restore the previous default context when
exiting the with-statement. If no context is specified, a copy of the current
default context is used.
"""
else:
def localcontext(ctx: Context | None = None) -> _ContextManager: ...
def localcontext(ctx: Context | None = None) -> _ContextManager:
"""
Return a context manager that will set the default context to a copy of ctx
on entry to the with-statement and restore the previous default context when
exiting the with-statement. If no context is specified, a copy of the current
default context is used.
"""
if sys.version_info >= (3, 14):
def IEEEContext(bits: int, /) -> Context: ...
def IEEEContext(bits: int, /) -> Context:
"""
Return a context object initialized to the proper values for one of the
IEEE interchange formats. The argument must be a multiple of 32 and less
than IEEE_CONTEXT_MAX_BITS.
"""
DefaultContext: Context
BasicContext: Context

View file

@ -1,3 +1,12 @@
"""
Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
import importlib.abc
import importlib.machinery
import sys
@ -14,16 +23,71 @@ def __import__(
locals: Mapping[str, object] | None = None,
fromlist: Sequence[str] = (),
level: int = 0,
) -> ModuleType: ...
) -> ModuleType:
"""
Import a module.
The 'globals' argument is used to infer where the import is occurring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
def spec_from_loader(
name: str, loader: LoaderProtocol | None, *, origin: str | None = None, is_package: bool | None = None
) -> importlib.machinery.ModuleSpec | None: ...
def module_from_spec(spec: importlib.machinery.ModuleSpec) -> types.ModuleType: ...
) -> importlib.machinery.ModuleSpec | None:
"""
Return a module spec based on various loader methods.
"""
def module_from_spec(spec: importlib.machinery.ModuleSpec) -> types.ModuleType:
"""
Create a module based on the provided spec.
"""
def _init_module_attrs(
spec: importlib.machinery.ModuleSpec, module: types.ModuleType, *, override: bool = False
) -> types.ModuleType: ...
class ModuleSpec:
"""
The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module. `loader` is the loader
to use when loading the module. `parent` is the name of the
package the module is in. The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`cached` is the location of the cached bytecode file, if any. It
corresponds to the `__cached__` attribute.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(
self,
name: str,
@ -40,12 +104,22 @@ class ModuleSpec:
loader_state: Any
cached: str | None
@property
def parent(self) -> str | None: ...
def parent(self) -> str | None:
"""
The name of the module's parent.
"""
has_location: bool
def __eq__(self, other: object) -> bool: ...
__hash__: ClassVar[None] # type: ignore[assignment]
class BuiltinImporter(importlib.abc.MetaPathFinder, importlib.abc.InspectLoader):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
# MetaPathFinder
if sys.version_info < (3, 12):
@classmethod
@ -57,29 +131,67 @@ class BuiltinImporter(importlib.abc.MetaPathFinder, importlib.abc.InspectLoader)
) -> ModuleSpec | None: ...
# InspectLoader
@classmethod
def is_package(cls, fullname: str) -> bool: ...
def is_package(cls, fullname: str) -> bool:
"""
Return False as built-in modules are never packages.
"""
@classmethod
def load_module(cls, fullname: str) -> types.ModuleType: ...
def load_module(cls, fullname: str) -> types.ModuleType:
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module() instead.
"""
@classmethod
def get_code(cls, fullname: str) -> None: ...
def get_code(cls, fullname: str) -> None:
"""
Return None as built-in modules do not have code objects.
"""
@classmethod
def get_source(cls, fullname: str) -> None: ...
def get_source(cls, fullname: str) -> None:
"""
Return None as built-in modules do not have source code.
"""
# Loader
if sys.version_info < (3, 12):
@staticmethod
def module_repr(module: types.ModuleType) -> str: ...
if sys.version_info >= (3, 10):
@staticmethod
def create_module(spec: ModuleSpec) -> types.ModuleType | None: ...
def create_module(spec: ModuleSpec) -> types.ModuleType | None:
"""
Create a built-in module
"""
@staticmethod
def exec_module(module: types.ModuleType) -> None: ...
def exec_module(module: types.ModuleType) -> None:
"""
Exec a built-in module
"""
else:
@classmethod
def create_module(cls, spec: ModuleSpec) -> types.ModuleType | None: ...
def create_module(cls, spec: ModuleSpec) -> types.ModuleType | None:
"""
Create a built-in module
"""
@classmethod
def exec_module(cls, module: types.ModuleType) -> None: ...
def exec_module(cls, module: types.ModuleType) -> None:
"""
Exec a built-in module
"""
class FrozenImporter(importlib.abc.MetaPathFinder, importlib.abc.InspectLoader):
"""
Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
# MetaPathFinder
if sys.version_info < (3, 12):
@classmethod
@ -91,23 +203,46 @@ class FrozenImporter(importlib.abc.MetaPathFinder, importlib.abc.InspectLoader):
) -> ModuleSpec | None: ...
# InspectLoader
@classmethod
def is_package(cls, fullname: str) -> bool: ...
def is_package(cls, fullname: str) -> bool:
"""
Return True if the frozen module is a package.
"""
@classmethod
def load_module(cls, fullname: str) -> types.ModuleType: ...
def load_module(cls, fullname: str) -> types.ModuleType:
"""
Load a frozen module.
This method is deprecated. Use exec_module() instead.
"""
@classmethod
def get_code(cls, fullname: str) -> None: ...
def get_code(cls, fullname: str) -> None:
"""
Return the code object for the frozen module.
"""
@classmethod
def get_source(cls, fullname: str) -> None: ...
def get_source(cls, fullname: str) -> None:
"""
Return None as frozen modules do not have source code.
"""
# Loader
if sys.version_info < (3, 12):
@staticmethod
def module_repr(m: types.ModuleType) -> str: ...
if sys.version_info >= (3, 10):
@staticmethod
def create_module(spec: ModuleSpec) -> types.ModuleType | None: ...
def create_module(spec: ModuleSpec) -> types.ModuleType | None:
"""
Set __file__, if able.
"""
else:
@classmethod
def create_module(cls, spec: ModuleSpec) -> types.ModuleType | None: ...
def create_module(cls, spec: ModuleSpec) -> types.ModuleType | None:
"""
Set __file__, if able.
"""
@staticmethod
def exec_module(module: types.ModuleType) -> None: ...

View file

@ -1,3 +1,12 @@
"""
Core implementation of path-based import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
import _ast
import _io
import importlib.abc
@ -26,21 +35,69 @@ else:
MAGIC_NUMBER: bytes
def cache_from_source(path: StrPath, debug_override: bool | None = None, *, optimization: Any | None = None) -> str: ...
def source_from_cache(path: StrPath) -> str: ...
def decode_source(source_bytes: ReadableBuffer) -> str: ...
def cache_from_source(path: StrPath, debug_override: bool | None = None, *, optimization: Any | None = None) -> str:
"""
Given the path to a .py file, return the path to its .pyc file.
The .py file does not need to exist; this simply returns the path to the
.pyc file calculated as if the .py file were imported.
The 'optimization' parameter controls the presumed optimization level of
the bytecode file. If 'optimization' is not None, the string representation
of the argument is taken and verified to be alphanumeric (else ValueError
is raised).
The debug_override parameter is deprecated. If debug_override is not None,
a True value is the same as setting 'optimization' to the empty string
while a False value is equivalent to setting 'optimization' to '1'.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
def source_from_cache(path: StrPath) -> str:
"""
Given the path to a .pyc. file, return the path to its .py file.
The .pyc file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147/488 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
def decode_source(source_bytes: ReadableBuffer) -> str:
"""
Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
def spec_from_file_location(
name: str,
location: StrOrBytesPath | None = None,
*,
loader: LoaderProtocol | None = None,
submodule_search_locations: list[str] | None = ...,
) -> importlib.machinery.ModuleSpec | None: ...
) -> importlib.machinery.ModuleSpec | None:
"""
Return a module spec based on a file location.
To indicate that the module is a package, set
submodule_search_locations to a list of directory paths. An
empty list is sufficient, though its not otherwise useful to the
import system.
The loader must take a spec as its only __init__() arg.
"""
@deprecated(
"Deprecated as of Python 3.6: Use site configuration instead. "
"Future versions of Python may not enable this finder by default."
)
class WindowsRegistryFinder(importlib.abc.MetaPathFinder):
"""
Meta path finder for modules declared in the Windows registry.
"""
if sys.version_info < (3, 12):
@classmethod
def find_module(cls, fullname: str, path: Sequence[str] | None = None) -> importlib.abc.Loader | None: ...
@ -51,23 +108,56 @@ class WindowsRegistryFinder(importlib.abc.MetaPathFinder):
) -> ModuleSpec | None: ...
class PathFinder(importlib.abc.MetaPathFinder):
"""
Meta path finder for sys.path and package __path__ attributes.
"""
if sys.version_info >= (3, 10):
@staticmethod
def invalidate_caches() -> None: ...
def invalidate_caches() -> None:
"""
Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_cache (where implemented).
"""
else:
@classmethod
def invalidate_caches(cls) -> None: ...
def invalidate_caches(cls) -> None:
"""
Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_cache (where implemented).
"""
if sys.version_info >= (3, 10):
@staticmethod
def find_distributions(context: DistributionFinder.Context = ...) -> Iterable[PathDistribution]: ...
def find_distributions(context: DistributionFinder.Context = ...) -> Iterable[PathDistribution]:
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
else:
@classmethod
def find_distributions(cls, context: DistributionFinder.Context = ...) -> Iterable[PathDistribution]: ...
def find_distributions(cls, context: DistributionFinder.Context = ...) -> Iterable[PathDistribution]:
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
@classmethod
def find_spec(
cls, fullname: str, path: Sequence[str] | None = None, target: types.ModuleType | None = None
) -> ModuleSpec | None: ...
) -> ModuleSpec | None:
"""
Try to find a spec for 'fullname' on sys.path or 'path'.
The search is based on sys.path_hooks and sys.path_importer_cache.
"""
if sys.version_info < (3, 12):
@classmethod
def find_module(cls, fullname: str, path: Sequence[str] | None = None) -> importlib.abc.Loader | None: ...
@ -79,36 +169,143 @@ BYTECODE_SUFFIXES: list[str]
EXTENSION_SUFFIXES: list[str]
class FileFinder(importlib.abc.PathEntryFinder):
"""
File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
path: str
def __init__(self, path: str, *loader_details: tuple[type[importlib.abc.Loader], list[str]]) -> None: ...
def __init__(self, path: str, *loader_details: tuple[type[importlib.abc.Loader], list[str]]) -> None:
"""
Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes.
"""
@classmethod
def path_hook(
cls, *loader_details: tuple[type[importlib.abc.Loader], list[str]]
) -> Callable[[str], importlib.abc.PathEntryFinder]: ...
) -> Callable[[str], importlib.abc.PathEntryFinder]:
"""
A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
class _LoaderBasics:
def is_package(self, fullname: str) -> bool: ...
def create_module(self, spec: ModuleSpec) -> types.ModuleType | None: ...
def exec_module(self, module: types.ModuleType) -> None: ...
def load_module(self, fullname: str) -> types.ModuleType: ...
"""
Base class of common code needed by both SourceLoader and
SourcelessFileLoader.
"""
def is_package(self, fullname: str) -> bool:
"""
Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'.
"""
def create_module(self, spec: ModuleSpec) -> types.ModuleType | None:
"""
Use default semantics for module creation.
"""
def exec_module(self, module: types.ModuleType) -> None:
"""
Execute the module.
"""
def load_module(self, fullname: str) -> types.ModuleType:
"""
This method is deprecated.
"""
class SourceLoader(_LoaderBasics):
def path_mtime(self, path: str) -> float: ...
def set_data(self, path: str, data: bytes) -> None: ...
def get_source(self, fullname: str) -> str | None: ...
def path_stats(self, path: str) -> Mapping[str, Any]: ...
def path_mtime(self, path: str) -> float:
"""
Optional method that returns the modification time (an int) for the
specified path (a str).
Raises OSError when the path cannot be handled.
"""
def set_data(self, path: str, data: bytes) -> None:
"""
Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
def get_source(self, fullname: str) -> str | None:
"""
Concrete implementation of InspectLoader.get_source.
"""
def path_stats(self, path: str) -> Mapping[str, Any]:
"""
Optional method returning a metadata dict for the specified
path (a str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
Raises OSError when the path cannot be handled.
"""
def source_to_code(
self, data: ReadableBuffer | str | _ast.Module | _ast.Expression | _ast.Interactive, path: ReadableBuffer | StrPath
) -> types.CodeType: ...
def get_code(self, fullname: str) -> types.CodeType | None: ...
) -> types.CodeType:
"""
Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
def get_code(self, fullname: str) -> types.CodeType | None:
"""
Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
class FileLoader:
"""
Base file loader class which implements the loader protocol methods that
require file system usage.
"""
name: str
path: str
def __init__(self, fullname: str, path: str) -> None: ...
def get_data(self, path: str) -> bytes: ...
def get_filename(self, name: str | None = None) -> str: ...
def load_module(self, name: str | None = None) -> types.ModuleType: ...
def __init__(self, fullname: str, path: str) -> None:
"""
Cache the module name and the path to the file found by the
finder.
"""
def get_data(self, path: str) -> bytes:
"""
Return the data from path as raw bytes.
"""
def get_filename(self, name: str | None = None) -> str:
"""
Return the path to the source file as found by the finder.
"""
def load_module(self, name: str | None = None) -> types.ModuleType:
"""
Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
if sys.version_info >= (3, 10):
def get_resource_reader(self, name: str | None = None) -> importlib.readers.FileReader: ...
else:
@ -119,27 +316,77 @@ class FileLoader:
def contents(self) -> Iterator[str]: ...
class SourceFileLoader(importlib.abc.FileLoader, FileLoader, importlib.abc.SourceLoader, SourceLoader): # type: ignore[misc] # incompatible method arguments in base classes
def set_data(self, path: str, data: ReadableBuffer, *, _mode: int = 0o666) -> None: ...
def path_stats(self, path: str) -> Mapping[str, Any]: ...
"""
Concrete implementation of SourceLoader using the file system.
"""
def set_data(self, path: str, data: ReadableBuffer, *, _mode: int = 0o666) -> None:
"""
Write bytes data to a file.
"""
def path_stats(self, path: str) -> Mapping[str, Any]:
"""
Return the metadata for the path.
"""
def source_to_code( # type: ignore[override] # incompatible with InspectLoader.source_to_code
self,
data: ReadableBuffer | str | _ast.Module | _ast.Expression | _ast.Interactive,
path: ReadableBuffer | StrPath,
*,
_optimize: int = -1,
) -> types.CodeType: ...
) -> types.CodeType:
"""
Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
class SourcelessFileLoader(importlib.abc.FileLoader, FileLoader, _LoaderBasics):
"""
Loader which handles sourceless file imports.
"""
def get_code(self, fullname: str) -> types.CodeType | None: ...
def get_source(self, fullname: str) -> None: ...
def get_source(self, fullname: str) -> None:
"""
Return None as there is no source code.
"""
class ExtensionFileLoader(FileLoader, _LoaderBasics, importlib.abc.ExecutionLoader):
"""
Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name: str, path: str) -> None: ...
def get_filename(self, name: str | None = None) -> str: ...
def get_source(self, fullname: str) -> None: ...
def create_module(self, spec: ModuleSpec) -> types.ModuleType: ...
def exec_module(self, module: types.ModuleType) -> None: ...
def get_code(self, fullname: str) -> None: ...
def get_filename(self, name: str | None = None) -> str:
"""
Return the path to the source file as found by the finder.
"""
def get_source(self, fullname: str) -> None:
"""
Return None as extension modules have no source code.
"""
def create_module(self, spec: ModuleSpec) -> types.ModuleType:
"""
Create an uninitialized extension module
"""
def exec_module(self, module: types.ModuleType) -> None:
"""
Initialize an extension module
"""
def get_code(self, fullname: str) -> None:
"""
Return None as an extension module cannot create a code object.
"""
def __eq__(self, other: object) -> bool: ...
def __hash__(self) -> int: ...
@ -151,15 +398,30 @@ if sys.version_info >= (3, 11):
def is_package(self, fullname: str) -> Literal[True]: ...
def get_source(self, fullname: str) -> Literal[""]: ...
def get_code(self, fullname: str) -> types.CodeType: ...
def create_module(self, spec: ModuleSpec) -> None: ...
def create_module(self, spec: ModuleSpec) -> None:
"""
Use default semantics for module creation.
"""
def exec_module(self, module: types.ModuleType) -> None: ...
@deprecated("load_module() is deprecated; use exec_module() instead")
def load_module(self, fullname: str) -> types.ModuleType: ...
def load_module(self, fullname: str) -> types.ModuleType:
"""
Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
def get_resource_reader(self, module: types.ModuleType) -> importlib.readers.NamespaceReader: ...
if sys.version_info < (3, 12):
@staticmethod
@deprecated("module_repr() is deprecated, and has been removed in Python 3.12")
def module_repr(module: types.ModuleType) -> str: ...
def module_repr(module: types.ModuleType) -> str:
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
_NamespaceLoader = NamespaceLoader
else:
@ -170,19 +432,43 @@ else:
def is_package(self, fullname: str) -> Literal[True]: ...
def get_source(self, fullname: str) -> Literal[""]: ...
def get_code(self, fullname: str) -> types.CodeType: ...
def create_module(self, spec: ModuleSpec) -> None: ...
def create_module(self, spec: ModuleSpec) -> None:
"""
Use default semantics for module creation.
"""
def exec_module(self, module: types.ModuleType) -> None: ...
@deprecated("load_module() is deprecated; use exec_module() instead")
def load_module(self, fullname: str) -> types.ModuleType: ...
def load_module(self, fullname: str) -> types.ModuleType:
"""
Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
if sys.version_info >= (3, 10):
@staticmethod
@deprecated("module_repr() is deprecated, and has been removed in Python 3.12")
def module_repr(module: types.ModuleType) -> str: ...
def module_repr(module: types.ModuleType) -> str:
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
def get_resource_reader(self, module: types.ModuleType) -> importlib.readers.NamespaceReader: ...
else:
@classmethod
@deprecated("module_repr() is deprecated, and has been removed in Python 3.12")
def module_repr(cls, module: types.ModuleType) -> str: ...
def module_repr(cls, module: types.ModuleType) -> str:
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
if sys.version_info >= (3, 13):
class AppleFrameworkLoader(ExtensionFileLoader, importlib.abc.ExecutionLoader): ...
class AppleFrameworkLoader(ExtensionFileLoader, importlib.abc.ExecutionLoader):
"""
A loader for modules that have been packaged as frameworks for
compatibility with Apple's iOS App Store policies.
"""

View file

@ -1,3 +1,16 @@
"""
This module provides an interface to the GNU DBM (GDBM) library.
This module is quite similar to the dbm module, but uses GDBM instead to
provide some additional functionality. Please note that the file formats
created by GDBM and dbm are incompatible.
GDBM objects behave like mappings (dictionaries), except that keys and
values are always immutable bytes-like objects or strings. Printing
a GDBM object doesn't print the keys and values, and the items() and
values() methods are not supported.
"""
import sys
from _typeshed import ReadOnlyBuffer, StrOrBytesPath
from types import TracebackType
@ -42,6 +55,30 @@ if sys.platform != "win32":
__init__: None # type: ignore[assignment]
if sys.version_info >= (3, 11):
def open(filename: StrOrBytesPath, flags: str = "r", mode: int = 0o666, /) -> _gdbm: ...
def open(filename: StrOrBytesPath, flags: str = "r", mode: int = 0o666, /) -> _gdbm:
"""
Open a dbm database and return a dbm object.
The filename argument is the name of the database file.
The optional flags argument can be 'r' (to open an existing database
for reading only -- default), 'w' (to open an existing database for
reading and writing), 'c' (which creates the database if it doesn't
exist), or 'n' (which always creates a new empty database).
Some versions of gdbm support additional flags which must be
appended to one of the flags described above. The module constant
'open_flags' is a string of valid additional flags. The 'f' flag
opens the database in fast mode; altered data will not automatically
be written to the disk after every change. This results in faster
writes to the database, but may result in an inconsistent database
if the program crashes while the database is still open. Use the
sync() method to force any unwritten data to be written to the disk.
The 's' flag causes all database operations to be synchronized to
disk. The 'u' flag disables locking of the database file.
The optional mode argument is the Unix mode of the file, used only
when the database has to be created. It defaults to octal 0o666.
"""
else:
def open(filename: str, flags: str = "r", mode: int = 0o666, /) -> _gdbm: ...

View file

@ -1,3 +1,7 @@
"""
OpenSSL interface for hashlib module
"""
import sys
from _typeshed import ReadableBuffer
from collections.abc import Callable
@ -23,104 +27,327 @@ class _HashObject(Protocol):
def update(self, obj: ReadableBuffer, /) -> None: ...
class HASH:
"""
A hash is an object used to calculate a checksum of a string of information.
Methods:
update() -- updates the current digest with an additional string
digest() -- return the current digest value
hexdigest() -- return the current digest as a string of hexadecimal digits
copy() -- return a copy of the current hash object
Attributes:
name -- the hash algorithm being used by this object
digest_size -- number of bytes in this hashes output
"""
@property
def digest_size(self) -> int: ...
@property
def block_size(self) -> int: ...
@property
def name(self) -> str: ...
def copy(self) -> Self: ...
def digest(self) -> bytes: ...
def hexdigest(self) -> str: ...
def update(self, obj: ReadableBuffer, /) -> None: ...
def copy(self) -> Self:
"""
Return a copy of the hash object.
"""
def digest(self) -> bytes:
"""
Return the digest value as a bytes object.
"""
def hexdigest(self) -> str:
"""
Return the digest value as a string of hexadecimal digits.
"""
def update(self, obj: ReadableBuffer, /) -> None:
"""
Update this hash object's state with the provided string.
"""
if sys.version_info >= (3, 10):
class UnsupportedDigestmodError(ValueError): ...
class HASHXOF(HASH):
def digest(self, length: int) -> bytes: ... # type: ignore[override]
def hexdigest(self, length: int) -> str: ... # type: ignore[override]
"""
A hash is an object used to calculate a checksum of a string of information.
Methods:
update() -- updates the current digest with an additional string
digest(length) -- return the current digest value
hexdigest(length) -- return the current digest as a string of hexadecimal digits
copy() -- return a copy of the current hash object
Attributes:
name -- the hash algorithm being used by this object
digest_size -- number of bytes in this hashes output
"""
def digest(self, length: int) -> bytes: # type: ignore[override]
"""
Return the digest value as a bytes object.
"""
def hexdigest(self, length: int) -> str: # type: ignore[override]
"""
Return the digest value as a string of hexadecimal digits.
"""
@final
class HMAC:
"""
The object used to calculate HMAC of a message.
Methods:
update() -- updates the current digest with an additional string
digest() -- return the current digest value
hexdigest() -- return the current digest as a string of hexadecimal digits
copy() -- return a copy of the current hash object
Attributes:
name -- the name, including the hash algorithm used by this object
digest_size -- number of bytes in digest() output
"""
@property
def digest_size(self) -> int: ...
@property
def block_size(self) -> int: ...
@property
def name(self) -> str: ...
def copy(self) -> Self: ...
def digest(self) -> bytes: ...
def hexdigest(self) -> str: ...
def update(self, msg: ReadableBuffer) -> None: ...
def copy(self) -> Self:
"""
Return a copy ("clone") of the HMAC object.
"""
def digest(self) -> bytes:
"""
Return the digest of the bytes passed to the update() method so far.
"""
def hexdigest(self) -> str:
"""
Return hexadecimal digest of the bytes passed to the update() method so far.
This may be used to exchange the value safely in email or other non-binary
environments.
"""
def update(self, msg: ReadableBuffer) -> None:
"""
Update the HMAC object with msg.
"""
@overload
def compare_digest(a: ReadableBuffer, b: ReadableBuffer, /) -> bool: ...
def compare_digest(a: ReadableBuffer, b: ReadableBuffer, /) -> bool:
"""
Return 'a == b'.
This function uses an approach designed to prevent
timing analysis, making it appropriate for cryptography.
a and b must both be of the same type: either str (ASCII only),
or any bytes-like object.
Note: If a and b are of different lengths, or if an error occurs,
a timing attack could theoretically reveal information about the
types and lengths of a and b--but not their values.
"""
@overload
def compare_digest(a: AnyStr, b: AnyStr, /) -> bool: ...
def get_fips_mode() -> int: ...
def hmac_new(key: bytes | bytearray, msg: ReadableBuffer = b"", digestmod: _DigestMod = None) -> HMAC: ...
def get_fips_mode() -> int:
"""
Determine the OpenSSL FIPS mode of operation.
For OpenSSL 3.0.0 and newer it returns the state of the default provider
in the default OSSL context. It's not quite the same as FIPS_mode() but good
enough for unittests.
Effectively any non-zero return value indicates FIPS mode;
values other than 1 may have additional significance.
"""
def hmac_new(key: bytes | bytearray, msg: ReadableBuffer = b"", digestmod: _DigestMod = None) -> HMAC:
"""
Return a new hmac object.
"""
if sys.version_info >= (3, 13):
def new(
name: str, data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
def openssl_md5(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
def openssl_sha1(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
def openssl_sha224(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
def openssl_sha256(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
def openssl_sha384(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
def openssl_sha512(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
def new(name: str, data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None) -> HASH:
"""
Return a new hash object using the named algorithm.
An optional string argument may be provided and will be
automatically hashed.
The MD5 and SHA1 algorithms are always supported.
"""
def openssl_md5(data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None) -> HASH:
"""
Returns a md5 hash object; optionally initialized with a string
"""
def openssl_sha1(data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None) -> HASH:
"""
Returns a sha1 hash object; optionally initialized with a string
"""
def openssl_sha224(data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None) -> HASH:
"""
Returns a sha224 hash object; optionally initialized with a string
"""
def openssl_sha256(data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None) -> HASH:
"""
Returns a sha256 hash object; optionally initialized with a string
"""
def openssl_sha384(data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None) -> HASH:
"""
Returns a sha384 hash object; optionally initialized with a string
"""
def openssl_sha512(data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None) -> HASH:
"""
Returns a sha512 hash object; optionally initialized with a string
"""
def openssl_sha3_224(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
) -> HASH:
"""
Returns a sha3-224 hash object; optionally initialized with a string
"""
def openssl_sha3_256(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
) -> HASH:
"""
Returns a sha3-256 hash object; optionally initialized with a string
"""
def openssl_sha3_384(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
) -> HASH:
"""
Returns a sha3-384 hash object; optionally initialized with a string
"""
def openssl_sha3_512(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASH: ...
) -> HASH:
"""
Returns a sha3-512 hash object; optionally initialized with a string
"""
def openssl_shake_128(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASHXOF: ...
) -> HASHXOF:
"""
Returns a shake-128 variable hash object; optionally initialized with a string
"""
def openssl_shake_256(
data: ReadableBuffer = b"", *, usedforsecurity: bool = True, string: ReadableBuffer | None = None
) -> HASHXOF: ...
) -> HASHXOF:
"""
Returns a shake-256 variable hash object; optionally initialized with a string
"""
else:
def new(name: str, string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_md5(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_sha1(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_sha224(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_sha256(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_sha384(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_sha512(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_sha3_224(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_sha3_256(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_sha3_384(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_sha3_512(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH: ...
def openssl_shake_128(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASHXOF: ...
def openssl_shake_256(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASHXOF: ...
def new(name: str, string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Return a new hash object using the named algorithm.
An optional string argument may be provided and will be
automatically hashed.
The MD5 and SHA1 algorithms are always supported.
"""
def openssl_md5(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a md5 hash object; optionally initialized with a string
"""
def openssl_sha1(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a sha1 hash object; optionally initialized with a string
"""
def openssl_sha224(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a sha224 hash object; optionally initialized with a string
"""
def openssl_sha256(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a sha256 hash object; optionally initialized with a string
"""
def openssl_sha384(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a sha384 hash object; optionally initialized with a string
"""
def openssl_sha512(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a sha512 hash object; optionally initialized with a string
"""
def openssl_sha3_224(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a sha3-224 hash object; optionally initialized with a string
"""
def openssl_sha3_256(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a sha3-256 hash object; optionally initialized with a string
"""
def openssl_sha3_384(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a sha3-384 hash object; optionally initialized with a string
"""
def openssl_sha3_512(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASH:
"""
Returns a sha3-512 hash object; optionally initialized with a string
"""
def openssl_shake_128(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASHXOF:
"""
Returns a shake-128 variable hash object; optionally initialized with a string
"""
def openssl_shake_256(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> HASHXOF:
"""
Returns a shake-256 variable hash object; optionally initialized with a string
"""
def hmac_digest(key: bytes | bytearray, msg: ReadableBuffer, digest: str) -> bytes:
"""
Single-shot HMAC.
"""
def hmac_digest(key: bytes | bytearray, msg: ReadableBuffer, digest: str) -> bytes: ...
def pbkdf2_hmac(
hash_name: str, password: ReadableBuffer, salt: ReadableBuffer, iterations: int, dklen: int | None = None
) -> bytes: ...
def scrypt(
password: ReadableBuffer, *, salt: ReadableBuffer, n: int, r: int, p: int, maxmem: int = 0, dklen: int = 64
) -> bytes: ...
) -> bytes:
"""
Password based key derivation function 2 (PKCS #5 v2.0) with HMAC as pseudorandom function.
"""
def scrypt(password: ReadableBuffer, *, salt: ReadableBuffer, n: int, r: int, p: int, maxmem: int = 0, dklen: int = 64) -> bytes:
"""
scrypt password-based key derivation function.
"""

View file

@ -1,3 +1,34 @@
"""
Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
import sys
from typing import Any, Final, TypeVar
@ -5,15 +36,67 @@ _T = TypeVar("_T") # list items must be comparable
__about__: Final[str]
def heapify(heap: list[Any], /) -> None: ... # list items must be comparable
def heappop(heap: list[_T], /) -> _T: ...
def heappush(heap: list[_T], item: _T, /) -> None: ...
def heappushpop(heap: list[_T], item: _T, /) -> _T: ...
def heapreplace(heap: list[_T], item: _T, /) -> _T: ...
def heapify(heap: list[Any], /) -> None: # list items must be comparable
"""
Transform list into a heap, in-place, in O(len(heap)) time.
"""
def heappop(heap: list[_T], /) -> _T:
"""
Pop the smallest item off the heap, maintaining the heap invariant.
"""
def heappush(heap: list[_T], item: _T, /) -> None:
"""
Push item onto heap, maintaining the heap invariant.
"""
def heappushpop(heap: list[_T], item: _T, /) -> _T:
"""
Push item on the heap, then pop and return the smallest item from the heap.
The combined action runs more efficiently than heappush() followed by
a separate call to heappop().
"""
def heapreplace(heap: list[_T], item: _T, /) -> _T:
"""
Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
if sys.version_info >= (3, 14):
def heapify_max(heap: list[Any], /) -> None: ... # list items must be comparable
def heappop_max(heap: list[_T], /) -> _T: ...
def heappush_max(heap: list[_T], item: _T, /) -> None: ...
def heappushpop_max(heap: list[_T], item: _T, /) -> _T: ...
def heapreplace_max(heap: list[_T], item: _T, /) -> _T: ...
def heapify_max(heap: list[Any], /) -> None: # list items must be comparable
"""
Maxheap variant of heapify.
"""
def heappop_max(heap: list[_T], /) -> _T:
"""
Maxheap variant of heappop.
"""
def heappush_max(heap: list[_T], item: _T, /) -> None:
"""
Push item onto max heap, maintaining the heap invariant.
"""
def heappushpop_max(heap: list[_T], item: _T, /) -> _T:
"""
Maxheap variant of heappushpop.
The combined action runs more efficiently than heappush_max() followed by
a separate call to heappop_max().
"""
def heapreplace_max(heap: list[_T], item: _T, /) -> _T:
"""
Maxheap variant of heapreplace.
"""

View file

@ -1,3 +1,7 @@
"""
(Extremely) low-level import machinery bits as used by importlib.
"""
import sys
import types
from _typeshed import ReadableBuffer
@ -9,22 +13,94 @@ if sys.version_info >= (3, 14):
pyc_magic_number_token: int
def source_hash(key: int, source: ReadableBuffer) -> bytes: ...
def create_builtin(spec: ModuleSpec, /) -> types.ModuleType: ...
def create_dynamic(spec: ModuleSpec, file: Any = None, /) -> types.ModuleType: ...
def acquire_lock() -> None: ...
def exec_builtin(mod: types.ModuleType, /) -> int: ...
def exec_dynamic(mod: types.ModuleType, /) -> int: ...
def extension_suffixes() -> list[str]: ...
def init_frozen(name: str, /) -> types.ModuleType: ...
def is_builtin(name: str, /) -> int: ...
def is_frozen(name: str, /) -> bool: ...
def is_frozen_package(name: str, /) -> bool: ...
def lock_held() -> bool: ...
def release_lock() -> None: ...
def create_builtin(spec: ModuleSpec, /) -> types.ModuleType:
"""
Create an extension module.
"""
def create_dynamic(spec: ModuleSpec, file: Any = None, /) -> types.ModuleType:
"""
Create an extension module.
"""
def acquire_lock() -> None:
"""
Acquires the interpreter's import lock for the current thread.
This lock should be used by import hooks to ensure thread-safety when importing
modules. On platforms without threads, this function does nothing.
"""
def exec_builtin(mod: types.ModuleType, /) -> int:
"""
Initialize a built-in module.
"""
def exec_dynamic(mod: types.ModuleType, /) -> int:
"""
Initialize an extension module.
"""
def extension_suffixes() -> list[str]:
"""
Returns the list of file suffixes used to identify extension modules.
"""
def init_frozen(name: str, /) -> types.ModuleType:
"""
Initializes a frozen module.
"""
def is_builtin(name: str, /) -> int:
"""
Returns True if the module name corresponds to a built-in module.
"""
def is_frozen(name: str, /) -> bool:
"""
Returns True if the module name corresponds to a frozen module.
"""
def is_frozen_package(name: str, /) -> bool:
"""
Returns True if the module name is of a frozen package.
"""
def lock_held() -> bool:
"""
Return True if the import lock is currently held, else False.
On platforms without threads, return False.
"""
def release_lock() -> None:
"""
Release the interpreter's import lock.
On platforms without threads, this function does nothing.
"""
if sys.version_info >= (3, 11):
def find_frozen(name: str, /, *, withdata: bool = False) -> tuple[memoryview | None, bool, str | None] | None: ...
def get_frozen_object(name: str, data: ReadableBuffer | None = None, /) -> types.CodeType: ...
def find_frozen(name: str, /, *, withdata: bool = False) -> tuple[memoryview | None, bool, str | None] | None:
"""
Return info about the corresponding frozen module (if there is one) or None.
The returned info (a 2-tuple):
* data the raw marshalled bytes
* is_package whether or not it is a package
* origname the originally frozen module's name, or None if not
a stdlib module (this will usually be the same as
the module's current name)
"""
def get_frozen_object(name: str, data: ReadableBuffer | None = None, /) -> types.CodeType:
"""
Create a code object for a frozen module.
"""
else:
def get_frozen_object(name: str, /) -> types.CodeType: ...
def get_frozen_object(name: str, /) -> types.CodeType:
"""
Create a code object for a frozen module.
"""

View file

@ -1,3 +1,8 @@
"""
This module provides primitive operations to manage Python interpreters.
The 'interpreters' module provides a more convenient interface.
"""
from _typeshed import structseq
from typing import Any, Final, Literal, SupportsIndex, final
from typing_extensions import Buffer, Self
@ -11,24 +16,54 @@ class ChannelNotFoundError(ChannelError): ...
# Mark as final, since instantiating ChannelID is not supported.
@final
class ChannelID:
"""
A channel ID identifies a channel and may be used as an int.
"""
@property
def end(self) -> Literal["send", "recv", "both"]: ...
def end(self) -> Literal["send", "recv", "both"]:
"""
'send', 'recv', or 'both'
"""
@property
def send(self) -> Self: ...
def send(self) -> Self:
"""
the 'send' end of the channel
"""
@property
def recv(self) -> Self: ...
def recv(self) -> Self:
"""
the 'recv' end of the channel
"""
def __eq__(self, other: object) -> bool: ...
def __ge__(self, other: ChannelID) -> bool: ...
def __gt__(self, other: ChannelID) -> bool: ...
def __hash__(self) -> int: ...
def __index__(self) -> int: ...
def __int__(self) -> int: ...
def __index__(self) -> int:
"""
Return self converted to an integer, if self is suitable for use as an index into a list.
"""
def __int__(self) -> int:
"""
int(self)
"""
def __le__(self, other: ChannelID) -> bool: ...
def __lt__(self, other: ChannelID) -> bool: ...
def __ne__(self, other: object) -> bool: ...
@final
class ChannelInfo(structseq[int], tuple[bool, bool, bool, int, int, int, int, int]):
"""
ChannelInfo
A named tuple of a channel's state.
"""
__match_args__: Final = (
"open",
"closing",
@ -40,47 +75,214 @@ class ChannelInfo(structseq[int], tuple[bool, bool, bool, int, int, int, int, in
"num_interp_recv_released",
)
@property
def open(self) -> bool: ...
@property
def closing(self) -> bool: ...
@property
def closed(self) -> bool: ...
@property
def count(self) -> int: ... # type: ignore[override]
@property
def num_interp_send(self) -> int: ...
@property
def num_interp_send_released(self) -> int: ...
@property
def num_interp_recv(self) -> int: ...
@property
def num_interp_recv_released(self) -> int: ...
@property
def num_interp_both(self) -> int: ...
@property
def num_interp_both_recv_released(self) -> int: ...
@property
def num_interp_both_send_released(self) -> int: ...
@property
def num_interp_both_released(self) -> int: ...
@property
def recv_associated(self) -> bool: ...
@property
def recv_released(self) -> bool: ...
@property
def send_associated(self) -> bool: ...
@property
def send_released(self) -> bool: ...
def open(self) -> bool:
"""
both ends are open
"""
def create(unboundop: Literal[1, 2, 3]) -> ChannelID: ...
def destroy(cid: SupportsIndex) -> None: ...
def list_all() -> list[ChannelID]: ...
def list_interpreters(cid: SupportsIndex, *, send: bool) -> list[int]: ...
def send(cid: SupportsIndex, obj: object, *, blocking: bool = True, timeout: float | None = None) -> None: ...
def send_buffer(cid: SupportsIndex, obj: Buffer, *, blocking: bool = True, timeout: float | None = None) -> None: ...
def recv(cid: SupportsIndex, default: object = ...) -> tuple[Any, Literal[1, 2, 3]]: ...
def close(cid: SupportsIndex, *, send: bool = False, recv: bool = False) -> None: ...
def get_count(cid: SupportsIndex) -> int: ...
def get_info(cid: SupportsIndex) -> ChannelInfo: ...
def get_channel_defaults(cid: SupportsIndex) -> Literal[1, 2, 3]: ...
def release(cid: SupportsIndex, *, send: bool = False, recv: bool = False, force: bool = False) -> None: ...
@property
def closing(self) -> bool:
"""
send is closed, recv is non-empty
"""
@property
def closed(self) -> bool:
"""
both ends are closed
"""
@property
def count(self) -> int: # type: ignore[override]
"""
queued objects
"""
@property
def num_interp_send(self) -> int:
"""
interpreters bound to the send end
"""
@property
def num_interp_send_released(self) -> int:
"""
interpreters bound to the send end and released
"""
@property
def num_interp_recv(self) -> int:
"""
interpreters bound to the send end
"""
@property
def num_interp_recv_released(self) -> int:
"""
interpreters bound to the send end and released
"""
@property
def num_interp_both(self) -> int:
"""
interpreters bound to both ends
"""
@property
def num_interp_both_recv_released(self) -> int:
"""
interpreters bound to both ends and released_from_the recv end
"""
@property
def num_interp_both_send_released(self) -> int:
"""
interpreters bound to both ends and released_from_the send end
"""
@property
def num_interp_both_released(self) -> int:
"""
interpreters bound to both ends and released_from_both
"""
@property
def recv_associated(self) -> bool:
"""
current interpreter is bound to the recv end
"""
@property
def recv_released(self) -> bool:
"""
current interpreter *was* bound to the recv end
"""
@property
def send_associated(self) -> bool:
"""
current interpreter is bound to the send end
"""
@property
def send_released(self) -> bool:
"""
current interpreter *was* bound to the send end
"""
def create(unboundop: Literal[1, 2, 3]) -> ChannelID:
"""
channel_create(unboundop) -> cid
Create a new cross-interpreter channel and return a unique generated ID.
"""
def destroy(cid: SupportsIndex) -> None:
"""
channel_destroy(cid)
Close and finalize the channel. Afterward attempts to use the channel
will behave as though it never existed.
"""
def list_all() -> list[ChannelID]:
"""
channel_list_all() -> [cid]
Return the list of all IDs for active channels.
"""
def list_interpreters(cid: SupportsIndex, *, send: bool) -> list[int]:
"""
channel_list_interpreters(cid, *, send) -> [id]
Return the list of all interpreter IDs associated with an end of the channel.
The 'send' argument should be a boolean indicating whether to use the send or
receive end.
"""
def send(cid: SupportsIndex, obj: object, *, blocking: bool = True, timeout: float | None = None) -> None:
"""
channel_send(cid, obj, *, blocking=True, timeout=None)
Add the object's data to the channel's queue.
By default this waits for the object to be received.
"""
def send_buffer(cid: SupportsIndex, obj: Buffer, *, blocking: bool = True, timeout: float | None = None) -> None:
"""
channel_send_buffer(cid, obj, *, blocking=True, timeout=None)
Add the object's buffer to the channel's queue.
By default this waits for the object to be received.
"""
def recv(cid: SupportsIndex, default: object = ...) -> tuple[Any, Literal[1, 2, 3]]:
"""
channel_recv(cid, [default]) -> (obj, unboundop)
Return a new object from the data at the front of the channel's queue.
If there is nothing to receive then raise ChannelEmptyError, unless
a default value is provided. In that case return it.
"""
def close(cid: SupportsIndex, *, send: bool = False, recv: bool = False) -> None:
"""
channel_close(cid, *, send=None, recv=None, force=False)
Close the channel for all interpreters.
If the channel is empty then the keyword args are ignored and both
ends are immediately closed. Otherwise, if 'force' is True then
all queued items are released and both ends are immediately
closed.
If the channel is not empty *and* 'force' is False then following
happens:
* recv is True (regardless of send):
- raise ChannelNotEmptyError
* recv is None and send is None:
- raise ChannelNotEmptyError
* send is True and recv is not True:
- fully close the 'send' end
- close the 'recv' end to interpreters not already receiving
- fully close it once empty
Closing an already closed channel results in a ChannelClosedError.
Once the channel's ID has no more ref counts in any interpreter
the channel will be destroyed.
"""
def get_count(cid: SupportsIndex) -> int:
"""
get_count(cid)
Return the number of items in the channel.
"""
def get_info(cid: SupportsIndex) -> ChannelInfo:
"""
get_info(cid)
Return details about the channel.
"""
def get_channel_defaults(cid: SupportsIndex) -> Literal[1, 2, 3]:
"""
get_channel_defaults(cid)
Return the channel's default values, set when it was created.
"""
def release(cid: SupportsIndex, *, send: bool = False, recv: bool = False, force: bool = False) -> None:
"""
channel_release(cid, *, send=None, recv=None, force=True)
Close the channel for the current interpreter. 'send' and 'recv'
(bool) may be used to indicate the ends to close. By default both
ends are closed. Closing an already closed end is a noop.
"""

View file

@ -1,19 +1,104 @@
"""
This module provides primitive operations to manage Python interpreters.
The 'interpreters' module provides a more convenient interface.
"""
from typing import Any, Literal, SupportsIndex
from typing_extensions import TypeAlias
_UnboundOp: TypeAlias = Literal[1, 2, 3]
class QueueError(RuntimeError): ...
class QueueError(RuntimeError):
"""
Indicates that a queue-related error happened.
"""
class QueueNotFoundError(QueueError): ...
def bind(qid: SupportsIndex) -> None: ...
def create(maxsize: SupportsIndex, fmt: SupportsIndex, unboundop: _UnboundOp) -> int: ...
def destroy(qid: SupportsIndex) -> None: ...
def get(qid: SupportsIndex) -> tuple[Any, int, _UnboundOp | None]: ...
def get_count(qid: SupportsIndex) -> int: ...
def get_maxsize(qid: SupportsIndex) -> int: ...
def get_queue_defaults(qid: SupportsIndex) -> tuple[int, _UnboundOp]: ...
def is_full(qid: SupportsIndex) -> bool: ...
def list_all() -> list[tuple[int, int, _UnboundOp]]: ...
def put(qid: SupportsIndex, obj: Any, fmt: SupportsIndex, unboundop: _UnboundOp) -> None: ...
def release(qid: SupportsIndex) -> None: ...
def bind(qid: SupportsIndex) -> None:
"""
bind(qid)
Take a reference to the identified queue.
The queue is not destroyed until there are no references left.
"""
def create(maxsize: SupportsIndex, fmt: SupportsIndex, unboundop: _UnboundOp) -> int:
"""
create(maxsize, unboundop, fallback) -> qid
Create a new cross-interpreter queue and return its unique generated ID.
It is a new reference as though bind() had been called on the queue.
The caller is responsible for calling destroy() for the new queue
before the runtime is finalized.
"""
def destroy(qid: SupportsIndex) -> None:
"""
destroy(qid)
Clear and destroy the queue. Afterward attempts to use the queue
will behave as though it never existed.
"""
def get(qid: SupportsIndex) -> tuple[Any, int, _UnboundOp | None]:
"""
get(qid) -> (obj, unboundop)
Return a new object from the data at the front of the queue.
The unbound op is also returned.
If there is nothing to receive then raise QueueEmpty.
"""
def get_count(qid: SupportsIndex) -> int:
"""
get_count(qid)
Return the number of items in the queue.
"""
def get_maxsize(qid: SupportsIndex) -> int:
"""
get_maxsize(qid)
Return the maximum number of items in the queue.
"""
def get_queue_defaults(qid: SupportsIndex) -> tuple[int, _UnboundOp]:
"""
get_queue_defaults(qid)
Return the queue's default values, set when it was created.
"""
def is_full(qid: SupportsIndex) -> bool:
"""
is_full(qid)
Return true if the queue has a maxsize and has reached it.
"""
def list_all() -> list[tuple[int, int, _UnboundOp]]:
"""
list_all() -> [(qid, unboundop, fallback)]
Return the list of IDs for all queues.
Each corresponding default unbound op and fallback is also included.
"""
def put(qid: SupportsIndex, obj: Any, fmt: SupportsIndex, unboundop: _UnboundOp) -> None:
"""
put(qid, obj)
Add the object's data to the queue.
"""
def release(qid: SupportsIndex) -> None:
"""
release(qid)
Release a reference to the queue.
The queue is destroyed once there are no references left.
"""

View file

@ -1,29 +1,140 @@
"""
This module provides primitive operations to manage Python interpreters.
The 'interpreters' module provides a more convenient interface.
"""
import types
from collections.abc import Callable, Mapping
from typing import Final, Literal, SupportsIndex
from collections.abc import Callable
from typing import Any, Final, Literal, SupportsIndex
from typing_extensions import TypeAlias
_Configs: TypeAlias = Literal["default", "isolated", "legacy", "empty", ""]
_SharedDict: TypeAlias = dict[str, Any] # many objects can be shared
class InterpreterError(Exception):
"""
A cross-interpreter operation failed
"""
class InterpreterNotFoundError(InterpreterError):
"""
An interpreter was not found
"""
class InterpreterError(Exception): ...
class InterpreterNotFoundError(InterpreterError): ...
class NotShareableError(ValueError): ...
class CrossInterpreterBufferView:
def __buffer__(self, flags: int, /) -> memoryview: ...
def __buffer__(self, flags: int, /) -> memoryview:
"""
Return a buffer object that exposes the underlying memory of the object.
"""
def new_config(name: _Configs = "isolated", /, **overides: object) -> types.SimpleNamespace:
"""
new_config(name='isolated', /, **overrides) -> type.SimpleNamespace
Return a representation of a new PyInterpreterConfig.
The name determines the initial values of the config. Supported named
configs are: default, isolated, legacy, and empty.
Any keyword arguments are set on the corresponding config fields,
overriding the initial values.
"""
def create(config: types.SimpleNamespace | _Configs | None = "isolated", *, reqrefs: bool = False) -> int:
"""
create([config], *, reqrefs=False) -> ID
Create a new interpreter and return a unique generated ID.
The caller is responsible for destroying the interpreter before exiting,
typically by using _interpreters.destroy(). This can be managed
automatically by passing "reqrefs=True" and then using _incref() and
_decref() appropriately.
"config" must be a valid interpreter config or the name of a
predefined config ("isolated" or "legacy"). The default
is "isolated".
"""
def destroy(id: SupportsIndex, *, restrict: bool = False) -> None:
"""
destroy(id, *, restrict=False)
Destroy the identified interpreter.
Attempting to destroy the current interpreter raises InterpreterError.
So does an unrecognized ID.
"""
def list_all(*, require_ready: bool) -> list[tuple[int, int]]:
"""
list_all() -> [(ID, whence)]
Return a list containing the ID of every existing interpreter.
"""
def get_current() -> tuple[int, int]:
"""
get_current() -> (ID, whence)
Return the ID of current interpreter.
"""
def get_main() -> tuple[int, int]:
"""
get_main() -> (ID, whence)
Return the ID of main interpreter.
"""
def is_running(id: SupportsIndex, *, restrict: bool = False) -> bool:
"""
is_running(id, *, restrict=False) -> bool
Return whether or not the identified interpreter is running.
"""
def get_config(id: SupportsIndex, *, restrict: bool = False) -> types.SimpleNamespace:
"""
get_config(id, *, restrict=False) -> types.SimpleNamespace
Return a representation of the config used to initialize the interpreter.
"""
def whence(id: SupportsIndex) -> int:
"""
whence(id) -> int
Return an identifier for where the interpreter was created.
"""
def new_config(name: _Configs = "isolated", /, **overides: object) -> types.SimpleNamespace: ...
def create(config: types.SimpleNamespace | _Configs | None = "isolated", *, reqrefs: bool = False) -> int: ...
def destroy(id: SupportsIndex, *, restrict: bool = False) -> None: ...
def list_all(*, require_ready: bool) -> list[tuple[int, int]]: ...
def get_current() -> tuple[int, int]: ...
def get_main() -> tuple[int, int]: ...
def is_running(id: SupportsIndex, *, restrict: bool = False) -> bool: ...
def get_config(id: SupportsIndex, *, restrict: bool = False) -> types.SimpleNamespace: ...
def whence(id: SupportsIndex) -> int: ...
def exec(
id: SupportsIndex, code: str | types.CodeType | Callable[[], object], shared: bool | None = None, *, restrict: bool = False
) -> None | types.SimpleNamespace: ...
id: SupportsIndex,
code: str | types.CodeType | Callable[[], object],
shared: _SharedDict | None = None,
*,
restrict: bool = False,
) -> None | types.SimpleNamespace:
"""
exec(id, code, shared=None, *, restrict=False)
Execute the provided code in the identified interpreter.
This is equivalent to running the builtin exec() under the target
interpreter, using the __dict__ of its __main__ module as both
globals and locals.
"code" may be a string containing the text of a Python script.
Functions (and code objects) are also supported, with some restrictions.
The code/function must not take any arguments or be a closure
(i.e. have cell vars). Methods and other callables are not supported.
If a function is provided, its code object is used and all its state
is ignored, including its __globals__ dict.
"""
def call(
id: SupportsIndex,
callable: Callable[..., object],
@ -31,18 +142,68 @@ def call(
kwargs: dict[str, object] | None = None,
*,
restrict: bool = False,
) -> object: ...
) -> object:
"""
call(id, callable, args=None, kwargs=None, *, restrict=False)
Call the provided object in the identified interpreter.
Pass the given args and kwargs, if possible.
"""
def run_string(
id: SupportsIndex, script: str | types.CodeType | Callable[[], object], shared: bool | None = None, *, restrict: bool = False
) -> None: ...
id: SupportsIndex,
script: str | types.CodeType | Callable[[], object],
shared: _SharedDict | None = None,
*,
restrict: bool = False,
) -> None:
"""
run_string(id, script, shared=None, *, restrict=False)
Execute the provided string in the identified interpreter.
(See _interpreters.exec().
"""
def run_func(
id: SupportsIndex, func: types.CodeType | Callable[[], object], shared: bool | None = None, *, restrict: bool = False
) -> None: ...
def set___main___attrs(id: SupportsIndex, updates: Mapping[str, object], *, restrict: bool = False) -> None: ...
id: SupportsIndex, func: types.CodeType | Callable[[], object], shared: _SharedDict | None = None, *, restrict: bool = False
) -> None:
"""
run_func(id, func, shared=None, *, restrict=False)
Execute the body of the provided function in the identified interpreter.
Code objects are also supported. In both cases, closures and args
are not supported. Methods and other callables are not supported either.
(See _interpreters.exec().
"""
def set___main___attrs(id: SupportsIndex, updates: _SharedDict, *, restrict: bool = False) -> None:
"""
set___main___attrs(id, ns, *, restrict=False)
Bind the given attributes in the interpreter's __main__ module.
"""
def incref(id: SupportsIndex, *, implieslink: bool = False, restrict: bool = False) -> None: ...
def decref(id: SupportsIndex, *, restrict: bool = False) -> None: ...
def is_shareable(obj: object) -> bool: ...
def capture_exception(exc: BaseException | None = None) -> types.SimpleNamespace: ...
def is_shareable(obj: object) -> bool:
"""
is_shareable(obj) -> bool
Return True if the object's data may be shared between interpreters and
False otherwise.
"""
def capture_exception(exc: BaseException | None = None) -> types.SimpleNamespace:
"""
capture_exception(exc=None) -> types.SimpleNamespace
Return a snapshot of an exception. If "exc" is None
then the current exception, if any, is used (but not cleared).
The returned snapshot is the same as what _interpreters.exec() returns.
"""
WHENCE_UNKNOWN: Final = 0
WHENCE_RUNTIME: Final = 1

View file

@ -1,3 +1,38 @@
"""
The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
separation between reading and writing to streams; implementations are
allowed to raise an OSError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is an in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes.
"""
import builtins
import codecs
import sys
@ -15,39 +50,191 @@ DEFAULT_BUFFER_SIZE: Final = 8192
open = builtins.open
def open_code(path: str) -> IO[bytes]: ...
def open_code(path: str) -> IO[bytes]:
"""
Opens the provided file with the intent to import the contents.
This may perform extra validation beyond open(), but is otherwise interchangeable
with calling open(path, 'rb').
"""
BlockingIOError = builtins.BlockingIOError
class _IOBase:
def __iter__(self) -> Iterator[bytes]: ...
def __next__(self) -> bytes: ...
"""
The abstract base class for all I/O classes.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. Other bytes-like objects are accepted as method arguments too.
In some cases (such as readinto), a writable object is required. Text
I/O classes work with str data.
Note that calling any method (except additional calls to close(),
which are ignored) on a closed stream should raise a ValueError.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
def __iter__(self) -> Iterator[bytes]:
"""
Implement iter(self).
"""
def __next__(self) -> bytes:
"""
Implement next(self).
"""
def __enter__(self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
def close(self) -> None: ...
def fileno(self) -> int: ...
def flush(self) -> None: ...
def isatty(self) -> bool: ...
def readable(self) -> bool: ...
def close(self) -> None:
"""
Flush and close the IO object.
This method has no effect if the file is already closed.
"""
def fileno(self) -> int:
"""
Return underlying file descriptor if one exists.
Raise OSError if the IO object does not use a file descriptor.
"""
def flush(self) -> None:
"""
Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
def isatty(self) -> bool:
"""
Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
def readable(self) -> bool:
"""
Return whether object was opened for reading.
If False, read() will raise OSError.
"""
read: Callable[..., Any]
def readlines(self, hint: int = -1, /) -> list[bytes]: ...
def seek(self, offset: int, whence: int = 0, /) -> int: ...
def seekable(self) -> bool: ...
def tell(self) -> int: ...
def truncate(self, size: int | None = None, /) -> int: ...
def writable(self) -> bool: ...
def readlines(self, hint: int = -1, /) -> list[bytes]:
"""
Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
def seek(self, offset: int, whence: int = 0, /) -> int:
"""
Change the stream position to the given byte offset.
offset
The stream position, relative to 'whence'.
whence
The relative position to seek from.
The offset is interpreted relative to the position indicated by whence.
Values for whence are:
* os.SEEK_SET or 0 -- start of stream (the default); offset should be zero or positive
* os.SEEK_CUR or 1 -- current stream position; offset may be negative
* os.SEEK_END or 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
def seekable(self) -> bool:
"""
Return whether object supports random access.
If False, seek(), tell() and truncate() will raise OSError.
This method may need to do a test seek().
"""
def tell(self) -> int:
"""
Return current stream position.
"""
def truncate(self, size: int | None = None, /) -> int:
"""
Truncate file to size bytes.
File pointer is left unchanged. Size defaults to the current IO position
as reported by tell(). Return the new size.
"""
def writable(self) -> bool:
"""
Return whether object was opened for writing.
If False, write() will raise OSError.
"""
write: Callable[..., Any]
def writelines(self, lines: Iterable[ReadableBuffer], /) -> None: ...
def readline(self, size: int | None = -1, /) -> bytes: ...
def __del__(self) -> None: ...
def writelines(self, lines: Iterable[ReadableBuffer], /) -> None:
"""
Write a list of lines to stream.
Line separators are not added, so it is usual for each of the
lines provided to have a line separator at the end.
"""
def readline(self, size: int | None = -1, /) -> bytes:
"""
Read and return a line from the stream.
If size is specified, at most size bytes will be read.
The line terminator is always b'\\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
def __del__(self) -> None:
"""
Called when the instance is about to be destroyed.
"""
@property
def closed(self) -> bool: ...
def _checkClosed(self) -> None: ... # undocumented
class _RawIOBase(_IOBase):
def readall(self) -> bytes: ...
"""
Base class for raw binary I/O.
"""
def readall(self) -> bytes:
"""
Read until EOF, using multiple read() call.
"""
# The following methods can return None if the file is in non-blocking mode
# and no data is available.
def readinto(self, buffer: WriteableBuffer, /) -> int | MaybeNone: ...
@ -55,14 +242,88 @@ class _RawIOBase(_IOBase):
def read(self, size: int = -1, /) -> bytes | MaybeNone: ...
class _BufferedIOBase(_IOBase):
def detach(self) -> RawIOBase: ...
"""
Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def detach(self) -> RawIOBase:
"""
Disconnect this buffer from its underlying raw stream and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
def readinto(self, buffer: WriteableBuffer, /) -> int: ...
def write(self, buffer: ReadableBuffer, /) -> int: ...
def write(self, buffer: ReadableBuffer, /) -> int:
"""
Write buffer b to the IO stream.
Return the number of bytes written, which is always
the length of b in bytes.
Raise BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
def readinto1(self, buffer: WriteableBuffer, /) -> int: ...
def read(self, size: int | None = -1, /) -> bytes: ...
def read1(self, size: int = -1, /) -> bytes: ...
def read(self, size: int | None = -1, /) -> bytes:
"""
Read and return up to n bytes.
If the size argument is omitted, None, or negative, read and
return all data until EOF.
If the size argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first).
However, for interactive raw streams (as well as sockets and pipes),
at most one raw read will be issued, and a short result does not
imply that EOF is imminent.
Return an empty bytes object on EOF.
Return None if the underlying raw stream was open in non-blocking
mode and no data is available at the moment.
"""
def read1(self, size: int = -1, /) -> bytes:
"""
Read and return up to size bytes, with at most one read() call to the underlying raw stream.
Return an empty bytes object on EOF.
A short result does not imply that EOF is imminent.
"""
class FileIO(RawIOBase, _RawIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of writelines in the base classes
"""
Open a file.
The mode can be 'r' (default), 'w', 'x' or 'a' for reading,
writing, exclusive creation or appending. The file will be created if it
doesn't exist when opened for writing or appending; it will be truncated
when opened for writing. A FileExistsError will be raised if it already
exists when opened for creating. Opening a file for creating implies
writing so this mode behaves in a similar way to 'w'.Add a '+' to the mode
to allow simultaneous reading and writing. A custom opener can be used by
passing a callable as *opener*. The underlying file descriptor for the file
object is then obtained by calling opener with (*name*, *flags*).
*opener* must return an open file descriptor (passing os.open as *opener*
results in functionality similar to passing None).
"""
mode: str
# The type of "name" equals the argument passed in to the constructor,
# but that can make FileIO incompatible with other I/O types that assume
@ -72,21 +333,85 @@ class FileIO(RawIOBase, _RawIOBase, BinaryIO): # type: ignore[misc] # incompat
self, file: FileDescriptorOrPath, mode: str = "r", closefd: bool = True, opener: _Opener | None = None
) -> None: ...
@property
def closefd(self) -> bool: ...
def seek(self, pos: int, whence: int = 0, /) -> int: ...
def read(self, size: int | None = -1, /) -> bytes | MaybeNone: ...
def closefd(self) -> bool:
"""
True if the file descriptor will be closed by close().
"""
def seek(self, pos: int, whence: int = 0, /) -> int:
"""
Move to new file position and return the file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values
are SEEK_CUR or 1 (move relative to current position, positive or negative),
and SEEK_END or 2 (move relative to end of file, usually negative, although
many platforms allow seeking beyond the end of a file).
Note that not all file objects are seekable.
"""
def read(self, size: int | None = -1, /) -> bytes | MaybeNone:
"""
Read at most size bytes, returned as bytes.
If size is less than 0, read all bytes in the file making multiple read calls.
See ``FileIO.readall``.
Attempts to make only one system call, retrying only per PEP 475 (EINTR). This
means less data may be returned than requested.
In non-blocking mode, returns None if no data is available. Return an empty
bytes object at EOF.
"""
class BytesIO(BufferedIOBase, _BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of methods in the base classes
"""
Buffered I/O implementation using an in-memory bytes buffer.
"""
def __init__(self, initial_bytes: ReadableBuffer = b"") -> None: ...
# BytesIO does not contain a "name" field. This workaround is necessary
# to allow BytesIO sub-classes to add this field, as it is defined
# as a read-only property on IO[].
name: Any
def getvalue(self) -> bytes: ...
def getbuffer(self) -> memoryview: ...
def read1(self, size: int | None = -1, /) -> bytes: ...
def readlines(self, size: int | None = None, /) -> list[bytes]: ...
def seek(self, pos: int, whence: int = 0, /) -> int: ...
def getvalue(self) -> bytes:
"""
Retrieve the entire contents of the BytesIO object.
"""
def getbuffer(self) -> memoryview:
"""
Get a read-write view over the contents of the BytesIO object.
"""
def read1(self, size: int | None = -1, /) -> bytes:
"""
Read at most size bytes, returned as a bytes object.
If the size argument is negative or omitted, read until EOF is reached.
Return an empty bytes object at EOF.
"""
def readlines(self, size: int | None = None, /) -> list[bytes]:
"""
List of bytes objects, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
def seek(self, pos: int, whence: int = 0, /) -> int:
"""
Change stream position.
Seek to byte offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= 0;
1 Current position - pos may be negative;
2 End of stream - pos usually negative.
Returns the new absolute position.
"""
class _BufferedReaderStream(Protocol):
def read(self, n: int = ..., /) -> bytes: ...
@ -116,6 +441,10 @@ class _BufferedReaderStream(Protocol):
_BufferedReaderStreamT = TypeVar("_BufferedReaderStreamT", bound=_BufferedReaderStream, default=_BufferedReaderStream)
class BufferedReader(BufferedIOBase, _BufferedIOBase, BinaryIO, Generic[_BufferedReaderStreamT]): # type: ignore[misc] # incompatible definitions of methods in the base classes
"""
Create a new buffered reader using the given readable raw IO object.
"""
raw: _BufferedReaderStreamT
def __init__(self, raw: _BufferedReaderStreamT, buffer_size: int = 8192) -> None: ...
def peek(self, size: int = 0, /) -> bytes: ...
@ -123,6 +452,14 @@ class BufferedReader(BufferedIOBase, _BufferedIOBase, BinaryIO, Generic[_Buffere
def truncate(self, pos: int | None = None, /) -> int: ...
class BufferedWriter(BufferedIOBase, _BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of writelines in the base classes
"""
A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
raw: RawIOBase
def __init__(self, raw: RawIOBase, buffer_size: int = 8192) -> None: ...
def write(self, buffer: ReadableBuffer, /) -> int: ...
@ -130,6 +467,14 @@ class BufferedWriter(BufferedIOBase, _BufferedIOBase, BinaryIO): # type: ignore
def truncate(self, pos: int | None = None, /) -> int: ...
class BufferedRandom(BufferedIOBase, _BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of methods in the base classes
"""
A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
mode: str
name: Any
raw: RawIOBase
@ -139,21 +484,90 @@ class BufferedRandom(BufferedIOBase, _BufferedIOBase, BinaryIO): # type: ignore
def truncate(self, pos: int | None = None, /) -> int: ...
class BufferedRWPair(BufferedIOBase, _BufferedIOBase, Generic[_BufferedReaderStreamT]):
"""
A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, reader: _BufferedReaderStreamT, writer: RawIOBase, buffer_size: int = 8192, /) -> None: ...
def peek(self, size: int = 0, /) -> bytes: ...
class _TextIOBase(_IOBase):
"""
Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable.
"""
encoding: str
errors: str | None
newlines: str | tuple[str, ...] | None
def __iter__(self) -> Iterator[str]: ... # type: ignore[override]
def __next__(self) -> str: ... # type: ignore[override]
def detach(self) -> BinaryIO: ...
def write(self, s: str, /) -> int: ...
def writelines(self, lines: Iterable[str], /) -> None: ... # type: ignore[override]
def readline(self, size: int = -1, /) -> str: ... # type: ignore[override]
def readlines(self, hint: int = -1, /) -> list[str]: ... # type: ignore[override]
def read(self, size: int | None = -1, /) -> str: ...
def __iter__(self) -> Iterator[str]: # type: ignore[override]
"""
Implement iter(self).
"""
def __next__(self) -> str: # type: ignore[override]
"""
Implement next(self).
"""
def detach(self) -> BinaryIO:
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an unusable state.
"""
def write(self, s: str, /) -> int:
"""
Write string s to stream.
Return the number of characters written
(which is always equal to the length of the string).
"""
def writelines(self, lines: Iterable[str], /) -> None: # type: ignore[override]
"""
Write a list of lines to stream.
Line separators are not added, so it is usual for each of the
lines provided to have a line separator at the end.
"""
def readline(self, size: int = -1, /) -> str: # type: ignore[override]
"""
Read until newline or EOF.
Return an empty string if EOF is hit immediately.
If size is specified, at most size characters will be read.
"""
def readlines(self, hint: int = -1, /) -> list[str]: # type: ignore[override]
"""
Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
def read(self, size: int | None = -1, /) -> str:
"""
Read at most size characters from stream.
Read from underlying buffer until we have size characters or we hit EOF.
If size is negative or omitted, read until EOF.
"""
@type_check_only
class _WrappedBuffer(Protocol):
@ -181,6 +595,37 @@ class _WrappedBuffer(Protocol):
_BufferT_co = TypeVar("_BufferT_co", bound=_WrappedBuffer, default=_WrappedBuffer, covariant=True)
class TextIOWrapper(TextIOBase, _TextIOBase, TextIO, Generic[_BufferT_co]): # type: ignore[misc] # incompatible definitions of write in the base classes
"""
Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getencoding().
errors determines the strictness of encoding and decoding (see
help(codecs.Codec) or the documentation for codecs.register) and
defaults to "strict".
newline controls how line endings are handled. It can be None, '',
'\\n', '\\r', and '\\r\\n'. It works as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\\n', '\\r', or '\\r\\n', and
these are translated into '\\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '' or '\\n', no translation takes place. If newline is any
of the other legal values, any '\\n' characters written are translated
to the given string.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
def __init__(
self,
buffer: _BufferT_co,
@ -205,28 +650,94 @@ class TextIOWrapper(TextIOBase, _TextIOBase, TextIO, Generic[_BufferT_co]): # t
newline: str | None = None,
line_buffering: bool | None = None,
write_through: bool | None = None,
) -> None: ...
) -> None:
"""
Reconfigure the text stream with new parameters.
This also does an implicit stream flush.
"""
def readline(self, size: int = -1, /) -> str: ... # type: ignore[override]
# Equals the "buffer" argument passed in to the constructor.
def detach(self) -> _BufferT_co: ... # type: ignore[override]
# TextIOWrapper's version of seek only supports a limited subset of
# operations.
def seek(self, cookie: int, whence: int = 0, /) -> int: ...
def seek(self, cookie: int, whence: int = 0, /) -> int:
"""
Set the stream position, and return the new stream position.
cookie
Zero or an opaque number returned by tell().
whence
The relative position to seek from.
Four operations are supported, given by the following argument
combinations:
- seek(0, SEEK_SET): Rewind to the start of the stream.
- seek(cookie, SEEK_SET): Restore a previous position;
'cookie' must be a number returned by tell().
- seek(0, SEEK_END): Fast-forward to the end of the stream.
- seek(0, SEEK_CUR): Leave the current stream position unchanged.
Any other argument combinations are invalid,
and may raise exceptions.
"""
def truncate(self, pos: int | None = None, /) -> int: ...
class StringIO(TextIOBase, _TextIOBase, TextIO): # type: ignore[misc] # incompatible definitions of write in the base classes
"""
Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value: str | None = "", newline: str | None = "\n") -> None: ...
# StringIO does not contain a "name" field. This workaround is necessary
# to allow StringIO sub-classes to add this field, as it is defined
# as a read-only property on IO[].
name: Any
def getvalue(self) -> str: ...
def getvalue(self) -> str:
"""
Retrieve the entire contents of the object.
"""
@property
def line_buffering(self) -> bool: ...
def seek(self, pos: int, whence: int = 0, /) -> int: ...
def truncate(self, pos: int | None = None, /) -> int: ...
def seek(self, pos: int, whence: int = 0, /) -> int:
"""
Change stream position.
Seek to character offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= 0;
1 Current position - pos must be 0;
2 End of stream - pos must be 0.
Returns the new absolute position.
"""
def truncate(self, pos: int | None = None, /) -> int:
"""
Truncate size to pos.
The pos argument defaults to the current file position, as
returned by tell(). The current file position is unchanged.
Returns the new absolute position.
"""
class IncrementalNewlineDecoder:
"""
Codec used when reading a file in universal newlines mode.
It wraps another incremental decoder, translating \\r\\n and \\r into \\n.
It also records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece. When used with decoder=None, it expects unicode strings as
decode input and translates newlines without first invoking an external
decoder.
"""
def __init__(self, decoder: codecs.IncrementalDecoder | None, translate: bool, errors: str = "strict") -> None: ...
def decode(self, input: ReadableBuffer | str, final: bool = False) -> str: ...
@property
@ -237,6 +748,20 @@ class IncrementalNewlineDecoder:
if sys.version_info >= (3, 10):
@overload
def text_encoding(encoding: None, stacklevel: int = 2, /) -> Literal["locale", "utf-8"]: ...
def text_encoding(encoding: None, stacklevel: int = 2, /) -> Literal["locale", "utf-8"]:
"""
A helper function to choose the text encoding.
When encoding is not None, this function returns it.
Otherwise, this function returns the default text encoding
(i.e. "locale" or "utf-8" depends on UTF-8 mode).
This function emits an EncodingWarning if encoding is None and
sys.flags.warn_default_encoding is true.
This can be used in APIs with an encoding=None parameter.
However, please consider using encoding="utf-8" for new APIs.
"""
@overload
def text_encoding(encoding: _T, stacklevel: int = 2, /) -> _T: ...

View file

@ -1,41 +1,88 @@
"""
json speedups
"""
from collections.abc import Callable
from typing import Any, final
from typing_extensions import Self
@final
class make_encoder:
"""
Encoder(markers, default, encoder, indent, key_separator, item_separator, sort_keys, skipkeys, allow_nan)
"""
@property
def sort_keys(self) -> bool: ...
def sort_keys(self) -> bool:
"""
sort_keys
"""
@property
def skipkeys(self) -> bool: ...
def skipkeys(self) -> bool:
"""
skipkeys
"""
@property
def key_separator(self) -> str: ...
def key_separator(self) -> str:
"""
key_separator
"""
@property
def indent(self) -> int | None: ...
def indent(self) -> str | None:
"""
indent
"""
@property
def markers(self) -> dict[int, Any] | None: ...
def markers(self) -> dict[int, Any] | None:
"""
markers
"""
@property
def default(self) -> Callable[[Any], Any]: ...
def default(self) -> Callable[[Any], Any]:
"""
default
"""
@property
def encoder(self) -> Callable[[str], str]: ...
def encoder(self) -> Callable[[str], str]:
"""
encoder
"""
@property
def item_separator(self) -> str: ...
def item_separator(self) -> str:
"""
item_separator
"""
def __new__(
cls,
markers: dict[int, Any] | None,
default: Callable[[Any], Any],
encoder: Callable[[str], str],
indent: int | None,
indent: str | None,
key_separator: str,
item_separator: str,
sort_keys: bool,
skipkeys: bool,
allow_nan: bool,
) -> Self: ...
def __call__(self, obj: object, _current_indent_level: int) -> Any: ...
def __call__(self, obj: object, _current_indent_level: int) -> Any:
"""
Call self as a function.
"""
@final
class make_scanner:
"""
JSON scanner object
"""
object_hook: Any
object_pairs_hook: Any
parse_int: Any
@ -44,8 +91,35 @@ class make_scanner:
strict: bool
# TODO: 'context' needs the attrs above (ducktype), but not __call__.
def __new__(cls, context: make_scanner) -> Self: ...
def __call__(self, string: str, index: int) -> tuple[Any, int]: ...
def __call__(self, string: str, index: int) -> tuple[Any, int]:
"""
Call self as a function.
"""
def encode_basestring(s: str, /) -> str: ...
def encode_basestring_ascii(s: str, /) -> str: ...
def scanstring(string: str, end: int, strict: bool = ...) -> tuple[str, int]: ...
def encode_basestring(s: str, /) -> str:
"""
encode_basestring(string) -> string
Return a JSON representation of a Python string
"""
def encode_basestring_ascii(s: str, /) -> str:
"""
encode_basestring_ascii(string) -> string
Return an ASCII-only JSON representation of a Python string
"""
def scanstring(string: str, end: int, strict: bool = ...) -> tuple[str, int]:
"""
scanstring(string, end, strict=True) -> (string, end)
Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote.
"""

View file

@ -1,3 +1,7 @@
"""
Support for POSIX locales.
"""
import sys
from _typeshed import StrPath
from typing import Final, Literal, TypedDict, type_check_only
@ -31,14 +35,31 @@ LC_NUMERIC: Final[int]
LC_ALL: Final[int]
CHAR_MAX: Final = 127
def setlocale(category: int, locale: str | None = None, /) -> str: ...
def localeconv() -> _LocaleConv: ...
def setlocale(category: int, locale: str | None = None, /) -> str:
"""
Activates/queries locale processing.
"""
def localeconv() -> _LocaleConv:
"""
Returns numeric and monetary locale-specific parameters.
"""
if sys.version_info >= (3, 11):
def getencoding() -> str: ...
def getencoding() -> str:
"""
Get the current locale encoding.
"""
def strcoll(os1: str, os2: str, /) -> int: ...
def strxfrm(string: str, /) -> str: ...
def strcoll(os1: str, os2: str, /) -> int:
"""
Compares two strings according to the locale.
"""
def strxfrm(string: str, /) -> str:
"""
Return a string that can be used as a key for locale-aware comparisons.
"""
# native gettext functions
# https://docs.python.org/3/library/locale.html#access-to-message-catalogs
@ -108,14 +129,43 @@ if sys.platform != "win32":
CRNCYSTR: Final[int]
ALT_DIGITS: Final[int]
def nl_langinfo(key: int, /) -> str: ...
def nl_langinfo(key: int, /) -> str:
"""
Return the value for the locale information associated with key.
"""
# This is dependent on `libintl.h` which is a part of `gettext`
# system dependency. These functions might be missing.
# But, we always say that they are present.
def gettext(msg: str, /) -> str: ...
def dgettext(domain: str | None, msg: str, /) -> str: ...
def dcgettext(domain: str | None, msg: str, category: int, /) -> str: ...
def textdomain(domain: str | None, /) -> str: ...
def bindtextdomain(domain: str, dir: StrPath | None, /) -> str: ...
def bind_textdomain_codeset(domain: str, codeset: str | None, /) -> str | None: ...
def gettext(msg: str, /) -> str:
"""
gettext(msg) -> string
Return translation of msg.
"""
def dgettext(domain: str | None, msg: str, /) -> str:
"""
dgettext(domain, msg) -> string
Return translation of msg in domain.
"""
def dcgettext(domain: str | None, msg: str, category: int, /) -> str:
"""
Return translation of msg in domain and category.
"""
def textdomain(domain: str | None, /) -> str:
"""
Set the C library's textdmain to domain, returning the new domain.
"""
def bindtextdomain(domain: str, dir: StrPath | None, /) -> str:
"""
Bind the C library's domain to dir.
"""
def bind_textdomain_codeset(domain: str, codeset: str | None, /) -> str | None:
"""
Bind the C library's domain to codeset.
"""

View file

@ -1,3 +1,7 @@
"""
Fast profiler
"""
import sys
from _typeshed import structseq
from collections.abc import Callable
@ -5,13 +9,66 @@ from types import CodeType
from typing import Any, Final, final
class Profiler:
"""
Build a profiler object using the specified timer function.
The default timer is a fast built-in one based on real time.
For custom timer functions returning integers, 'timeunit' can
be a float specifying a scale (that is, how long each integer unit
is, in seconds).
"""
def __init__(
self, timer: Callable[[], float] | None = None, timeunit: float = 0.0, subcalls: bool = True, builtins: bool = True
) -> None: ...
def getstats(self) -> list[profiler_entry]: ...
def enable(self, subcalls: bool = True, builtins: bool = True) -> None: ...
def disable(self) -> None: ...
def clear(self) -> None: ...
def getstats(self) -> list[profiler_entry]:
"""
list of profiler_entry objects.
getstats() -> list of profiler_entry objects
Return all information collected by the profiler.
Each profiler_entry is a tuple-like object with the
following attributes:
code code object
callcount how many times this was called
reccallcount how many times called recursively
totaltime total time in this entry
inlinetime inline time in this entry (not in subcalls)
calls details of the calls
The calls attribute is either None or a list of
profiler_subentry objects:
code called code object
callcount how many times this is called
reccallcount how many times this is called recursively
totaltime total time spent in this call
inlinetime inline time (not in further subcalls)
"""
def enable(self, subcalls: bool = True, builtins: bool = True) -> None:
"""
Start collecting profiling information.
subcalls
If True, also records for each function
statistics separated according to its current caller.
builtins
If True, records the time spent in
built-in functions separately from their caller.
"""
def disable(self) -> None:
"""
Stop collecting profiling information.
"""
def clear(self) -> None:
"""
Clear all profiling information collected so far.
"""
@final
class profiler_entry(structseq[Any], tuple[CodeType | str, int, int, float, float, list[profiler_subentry]]):

View file

@ -37,23 +37,105 @@ PRESET_EXTREME: int # v big number
@final
class LZMADecompressor:
"""
Create a decompressor object for decompressing data incrementally.
format
Specifies the container format of the input stream. If this is
FORMAT_AUTO (the default), the decompressor will automatically detect
whether the input is FORMAT_XZ or FORMAT_ALONE. Streams created with
FORMAT_RAW cannot be autodetected.
memlimit
Limit the amount of memory used by the decompressor. This will cause
decompression to fail if the input cannot be decompressed within the
given limit.
filters
A custom filter chain. This argument is required for FORMAT_RAW, and
not accepted with any other format. When provided, this should be a
sequence of dicts, each indicating the ID and options for a single
filter.
For one-shot decompression, use the decompress() function instead.
"""
if sys.version_info >= (3, 12):
def __new__(cls, format: int | None = ..., memlimit: int | None = ..., filters: _FilterChain | None = ...) -> Self: ...
else:
def __init__(self, format: int | None = ..., memlimit: int | None = ..., filters: _FilterChain | None = ...) -> None: ...
def decompress(self, data: ReadableBuffer, max_length: int = -1) -> bytes: ...
def decompress(self, data: ReadableBuffer, max_length: int = -1) -> bytes:
"""
Decompress *data*, returning uncompressed data as bytes.
If *max_length* is nonnegative, returns at most *max_length* bytes of
decompressed data. If this limit is reached and further output can be
produced, *self.needs_input* will be set to ``False``. In this case, the next
call to *decompress()* may provide *data* as b'' to obtain more of the output.
If all of the input data was decompressed and returned (either because this
was less than *max_length* bytes, or because *max_length* was negative),
*self.needs_input* will be set to True.
Attempting to decompress data after the end of stream is reached raises an
EOFError. Any data found after the end of the stream is ignored and saved in
the unused_data attribute.
"""
@property
def check(self) -> int: ...
def check(self) -> int:
"""
ID of the integrity check used by the input stream.
"""
@property
def eof(self) -> bool: ...
def eof(self) -> bool:
"""
True if the end-of-stream marker has been reached.
"""
@property
def unused_data(self) -> bytes: ...
def unused_data(self) -> bytes:
"""
Data found after the end of the compressed stream.
"""
@property
def needs_input(self) -> bool: ...
def needs_input(self) -> bool:
"""
True if more input is needed before more decompressed data can be produced.
"""
@final
class LZMACompressor:
"""
LZMACompressor(format=FORMAT_XZ, check=-1, preset=None, filters=None)
Create a compressor object for compressing data incrementally.
format specifies the container format to use for the output. This can
be FORMAT_XZ (default), FORMAT_ALONE, or FORMAT_RAW.
check specifies the integrity check to use. For FORMAT_XZ, the default
is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not support integrity
checks; for these formats, check must be omitted, or be CHECK_NONE.
The settings used by the compressor can be specified either as a
preset compression level (with the 'preset' argument), or in detail
as a custom filter chain (with the 'filters' argument). For FORMAT_XZ
and FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset
level. For FORMAT_RAW, the caller must always specify a filter chain;
the raw compressor does not support preset compression levels.
preset (if provided) should be an integer in the range 0-9, optionally
OR-ed with the constant PRESET_EXTREME.
filters (if provided) should be a sequence of dicts. Each dict should
have an entry for "id" indicating the ID of the filter, plus
additional entries for options to the filter.
For one-shot compression, use the compress() function instead.
"""
if sys.version_info >= (3, 12):
def __new__(
cls, format: int | None = ..., check: int = ..., preset: int | None = ..., filters: _FilterChain | None = ...
@ -63,9 +145,33 @@ class LZMACompressor:
self, format: int | None = ..., check: int = ..., preset: int | None = ..., filters: _FilterChain | None = ...
) -> None: ...
def compress(self, data: ReadableBuffer, /) -> bytes: ...
def flush(self) -> bytes: ...
def compress(self, data: ReadableBuffer, /) -> bytes:
"""
Provide data to the compressor object.
class LZMAError(Exception): ...
Returns a chunk of compressed data if possible, or b'' otherwise.
def is_check_supported(check_id: int, /) -> bool: ...
When you have finished providing data to the compressor, call the
flush() method to finish the compression process.
"""
def flush(self) -> bytes:
"""
Finish the compression process.
Returns the compressed data left in internal buffers.
The compressor object may not be used after this method is called.
"""
class LZMAError(Exception):
"""
Call to liblzma failed.
"""
def is_check_supported(check_id: int, /) -> bool:
"""
Test whether the given integrity check is supported.
Always returns True for CHECK_NONE and CHECK_CRC32.
"""

View file

@ -1,13 +1,29 @@
"""
Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import sys
from typing import Any
class ParserBase:
"""
Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers.
"""
def reset(self) -> None: ...
def getpos(self) -> tuple[int, int]: ...
def getpos(self) -> tuple[int, int]:
"""
Return current line number and offset.
"""
def unknown_decl(self, data: str) -> None: ...
def parse_comment(self, i: int, report: int = 1) -> int: ... # undocumented
def parse_comment(self, i: int, report: bool = True) -> int: ... # undocumented
def parse_declaration(self, i: int) -> int: ... # undocumented
def parse_marked_section(self, i: int, report: int = 1) -> int: ... # undocumented
def parse_marked_section(self, i: int, report: bool = True) -> int: ... # undocumented
def updatepos(self, i: int, j: int) -> int: ... # undocumented
if sys.version_info < (3, 10):
# Removed from ParserBase: https://bugs.python.org/issue31844

View file

@ -1,3 +1,13 @@
"""
Operator interface.
This module exports a set of functions implemented in C corresponding
to the intrinsic operators of Python. For example, operator.add(x, y)
is equivalent to the expression x+y. The function names are those
used for special methods; variants without leading and trailing
'__' are also provided for convenience.
"""
import sys
from _typeshed import SupportsGetItem
from collections.abc import Callable, Container, Iterable, MutableMapping, MutableSequence, Sequence
@ -40,76 +50,308 @@ class _SupportsPos(Protocol[_T_co]):
def __pos__(self) -> _T_co: ...
# All four comparison functions must have the same signature, or we get false-positive errors
def lt(a: _SupportsComparison, b: _SupportsComparison, /) -> Any: ...
def le(a: _SupportsComparison, b: _SupportsComparison, /) -> Any: ...
def eq(a: object, b: object, /) -> Any: ...
def ne(a: object, b: object, /) -> Any: ...
def ge(a: _SupportsComparison, b: _SupportsComparison, /) -> Any: ...
def gt(a: _SupportsComparison, b: _SupportsComparison, /) -> Any: ...
def not_(a: object, /) -> bool: ...
def truth(a: object, /) -> bool: ...
def is_(a: object, b: object, /) -> bool: ...
def is_not(a: object, b: object, /) -> bool: ...
def abs(a: SupportsAbs[_T], /) -> _T: ...
def add(a: Any, b: Any, /) -> Any: ...
def and_(a: Any, b: Any, /) -> Any: ...
def floordiv(a: Any, b: Any, /) -> Any: ...
def index(a: SupportsIndex, /) -> int: ...
def inv(a: _SupportsInversion[_T_co], /) -> _T_co: ...
def invert(a: _SupportsInversion[_T_co], /) -> _T_co: ...
def lshift(a: Any, b: Any, /) -> Any: ...
def mod(a: Any, b: Any, /) -> Any: ...
def mul(a: Any, b: Any, /) -> Any: ...
def matmul(a: Any, b: Any, /) -> Any: ...
def neg(a: _SupportsNeg[_T_co], /) -> _T_co: ...
def or_(a: Any, b: Any, /) -> Any: ...
def pos(a: _SupportsPos[_T_co], /) -> _T_co: ...
def pow(a: Any, b: Any, /) -> Any: ...
def rshift(a: Any, b: Any, /) -> Any: ...
def sub(a: Any, b: Any, /) -> Any: ...
def truediv(a: Any, b: Any, /) -> Any: ...
def xor(a: Any, b: Any, /) -> Any: ...
def concat(a: Sequence[_T], b: Sequence[_T], /) -> Sequence[_T]: ...
def contains(a: Container[object], b: object, /) -> bool: ...
def countOf(a: Iterable[object], b: object, /) -> int: ...
def lt(a: _SupportsComparison, b: _SupportsComparison, /) -> Any:
"""
Same as a < b.
"""
def le(a: _SupportsComparison, b: _SupportsComparison, /) -> Any:
"""
Same as a <= b.
"""
def eq(a: object, b: object, /) -> Any:
"""
Same as a == b.
"""
def ne(a: object, b: object, /) -> Any:
"""
Same as a != b.
"""
def ge(a: _SupportsComparison, b: _SupportsComparison, /) -> Any:
"""
Same as a >= b.
"""
def gt(a: _SupportsComparison, b: _SupportsComparison, /) -> Any:
"""
Same as a > b.
"""
def not_(a: object, /) -> bool:
"""
Same as not a.
"""
def truth(a: object, /) -> bool:
"""
Return True if a is true, False otherwise.
"""
def is_(a: object, b: object, /) -> bool:
"""
Same as a is b.
"""
def is_not(a: object, b: object, /) -> bool:
"""
Same as a is not b.
"""
def abs(a: SupportsAbs[_T], /) -> _T:
"""
Same as abs(a).
"""
def add(a: Any, b: Any, /) -> Any:
"""
Same as a + b.
"""
def and_(a: Any, b: Any, /) -> Any:
"""
Same as a & b.
"""
def floordiv(a: Any, b: Any, /) -> Any:
"""
Same as a // b.
"""
def index(a: SupportsIndex, /) -> int:
"""
Same as a.__index__()
"""
def inv(a: _SupportsInversion[_T_co], /) -> _T_co:
"""
Same as ~a.
"""
def invert(a: _SupportsInversion[_T_co], /) -> _T_co:
"""
Same as ~a.
"""
def lshift(a: Any, b: Any, /) -> Any:
"""
Same as a << b.
"""
def mod(a: Any, b: Any, /) -> Any:
"""
Same as a % b.
"""
def mul(a: Any, b: Any, /) -> Any:
"""
Same as a * b.
"""
def matmul(a: Any, b: Any, /) -> Any:
"""
Same as a @ b.
"""
def neg(a: _SupportsNeg[_T_co], /) -> _T_co:
"""
Same as -a.
"""
def or_(a: Any, b: Any, /) -> Any:
"""
Same as a | b.
"""
def pos(a: _SupportsPos[_T_co], /) -> _T_co:
"""
Same as +a.
"""
def pow(a: Any, b: Any, /) -> Any:
"""
Same as a ** b.
"""
def rshift(a: Any, b: Any, /) -> Any:
"""
Same as a >> b.
"""
def sub(a: Any, b: Any, /) -> Any:
"""
Same as a - b.
"""
def truediv(a: Any, b: Any, /) -> Any:
"""
Same as a / b.
"""
def xor(a: Any, b: Any, /) -> Any:
"""
Same as a ^ b.
"""
def concat(a: Sequence[_T], b: Sequence[_T], /) -> Sequence[_T]:
"""
Same as a + b, for a and b sequences.
"""
def contains(a: Container[object], b: object, /) -> bool:
"""
Same as b in a (note reversed operands).
"""
def countOf(a: Iterable[object], b: object, /) -> int:
"""
Return the number of items in a which are, or which equal, b.
"""
@overload
def delitem(a: MutableSequence[Any], b: SupportsIndex, /) -> None: ...
def delitem(a: MutableSequence[Any], b: SupportsIndex, /) -> None:
"""
Same as del a[b].
"""
@overload
def delitem(a: MutableSequence[Any], b: slice, /) -> None: ...
@overload
def delitem(a: MutableMapping[_K, Any], b: _K, /) -> None: ...
@overload
def getitem(a: Sequence[_T], b: slice, /) -> Sequence[_T]: ...
def getitem(a: Sequence[_T], b: slice, /) -> Sequence[_T]:
"""
Same as a[b].
"""
@overload
def getitem(a: SupportsGetItem[_K, _V], b: _K, /) -> _V: ...
def indexOf(a: Iterable[_T], b: _T, /) -> int: ...
def indexOf(a: Iterable[_T], b: _T, /) -> int:
"""
Return the first index of b in a.
"""
@overload
def setitem(a: MutableSequence[_T], b: SupportsIndex, c: _T, /) -> None: ...
def setitem(a: MutableSequence[_T], b: SupportsIndex, c: _T, /) -> None:
"""
Same as a[b] = c.
"""
@overload
def setitem(a: MutableSequence[_T], b: slice, c: Sequence[_T], /) -> None: ...
@overload
def setitem(a: MutableMapping[_K, _V], b: _K, c: _V, /) -> None: ...
def length_hint(obj: object, default: int = 0, /) -> int: ...
def iadd(a: Any, b: Any, /) -> Any: ...
def iand(a: Any, b: Any, /) -> Any: ...
def iconcat(a: Any, b: Any, /) -> Any: ...
def ifloordiv(a: Any, b: Any, /) -> Any: ...
def ilshift(a: Any, b: Any, /) -> Any: ...
def imod(a: Any, b: Any, /) -> Any: ...
def imul(a: Any, b: Any, /) -> Any: ...
def imatmul(a: Any, b: Any, /) -> Any: ...
def ior(a: Any, b: Any, /) -> Any: ...
def ipow(a: Any, b: Any, /) -> Any: ...
def irshift(a: Any, b: Any, /) -> Any: ...
def isub(a: Any, b: Any, /) -> Any: ...
def itruediv(a: Any, b: Any, /) -> Any: ...
def ixor(a: Any, b: Any, /) -> Any: ...
def length_hint(obj: object, default: int = 0, /) -> int:
"""
Return an estimate of the number of items in obj.
This is useful for presizing containers when building from an iterable.
If the object supports len(), the result will be exact.
Otherwise, it may over- or under-estimate by an arbitrary amount.
The result will be an integer >= 0.
"""
def iadd(a: Any, b: Any, /) -> Any:
"""
Same as a += b.
"""
def iand(a: Any, b: Any, /) -> Any:
"""
Same as a &= b.
"""
def iconcat(a: Any, b: Any, /) -> Any:
"""
Same as a += b, for a and b sequences.
"""
def ifloordiv(a: Any, b: Any, /) -> Any:
"""
Same as a //= b.
"""
def ilshift(a: Any, b: Any, /) -> Any:
"""
Same as a <<= b.
"""
def imod(a: Any, b: Any, /) -> Any:
"""
Same as a %= b.
"""
def imul(a: Any, b: Any, /) -> Any:
"""
Same as a *= b.
"""
def imatmul(a: Any, b: Any, /) -> Any:
"""
Same as a @= b.
"""
def ior(a: Any, b: Any, /) -> Any:
"""
Same as a |= b.
"""
def ipow(a: Any, b: Any, /) -> Any:
"""
Same as a **= b.
"""
def irshift(a: Any, b: Any, /) -> Any:
"""
Same as a >>= b.
"""
def isub(a: Any, b: Any, /) -> Any:
"""
Same as a -= b.
"""
def itruediv(a: Any, b: Any, /) -> Any:
"""
Same as a /= b.
"""
def ixor(a: Any, b: Any, /) -> Any:
"""
Same as a ^= b.
"""
if sys.version_info >= (3, 11):
def call(obj: Callable[_P, _R], /, *args: _P.args, **kwargs: _P.kwargs) -> _R: ...
def call(obj: Callable[_P, _R], /, *args: _P.args, **kwargs: _P.kwargs) -> _R:
"""
Same as obj(*args, **kwargs).
"""
def _compare_digest(a: AnyStr, b: AnyStr, /) -> bool: ...
def _compare_digest(a: AnyStr, b: AnyStr, /) -> bool:
"""
Return 'a == b'.
This function uses an approach designed to prevent
timing analysis, making it appropriate for cryptography.
a and b must both be of the same type: either str (ASCII only),
or any bytes-like object.
Note: If a and b are of different lengths, or if an error occurs,
a timing attack could theoretically reveal information about the
types and lengths of a and b--but not their values.
"""
if sys.version_info >= (3, 14):
def is_none(a: object, /) -> TypeIs[None]: ...
def is_not_none(a: _T | None, /) -> TypeIs[_T]: ...
def is_none(a: object, /) -> TypeIs[None]:
"""
Same as a is None.
"""
def is_not_none(a: _T | None, /) -> TypeIs[_T]:
"""
Same as a is not None.
"""

View file

@ -1,3 +1,7 @@
"""
Shared OS X support functions.
"""
from collections.abc import Iterable, Sequence
from typing import Final, TypeVar
@ -11,24 +15,117 @@ _UNIVERSAL_CONFIG_VARS: Final[tuple[str, ...]] # undocumented
_COMPILER_CONFIG_VARS: Final[tuple[str, ...]] # undocumented
_INITPRE: Final[str] # undocumented
def _find_executable(executable: str, path: str | None = None) -> str | None: ... # undocumented
def _read_output(commandstring: str, capture_stderr: bool = False) -> str | None: ... # undocumented
def _find_build_tool(toolname: str) -> str: ... # undocumented
def _find_executable(executable: str, path: str | None = None) -> str | None: # undocumented
"""
Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
def _read_output(commandstring: str, capture_stderr: bool = False) -> str | None: # undocumented
"""
Output from successful command execution or None
"""
def _find_build_tool(toolname: str) -> str: # undocumented
"""
Find a build tool on current path or using xcrun
"""
_SYSTEM_VERSION: Final[str | None] # undocumented
def _get_system_version() -> str: ... # undocumented
def _remove_original_values(_config_vars: dict[str, str]) -> None: ... # undocumented
def _save_modified_value(_config_vars: dict[str, str], cv: str, newvalue: str) -> None: ... # undocumented
def _supports_universal_builds() -> bool: ... # undocumented
def _find_appropriate_compiler(_config_vars: dict[str, str]) -> dict[str, str]: ... # undocumented
def _remove_universal_flags(_config_vars: dict[str, str]) -> dict[str, str]: ... # undocumented
def _remove_unsupported_archs(_config_vars: dict[str, str]) -> dict[str, str]: ... # undocumented
def _override_all_archs(_config_vars: dict[str, str]) -> dict[str, str]: ... # undocumented
def _check_for_unavailable_sdk(_config_vars: dict[str, str]) -> dict[str, str]: ... # undocumented
def compiler_fixup(compiler_so: Iterable[str], cc_args: Sequence[str]) -> list[str]: ...
def customize_config_vars(_config_vars: dict[str, str]) -> dict[str, str]: ...
def customize_compiler(_config_vars: dict[str, str]) -> dict[str, str]: ...
def get_platform_osx(
_config_vars: dict[str, str], osname: _T, release: _K, machine: _V
) -> tuple[str | _T, str | _K, str | _V]: ...
def _get_system_version() -> str: # undocumented
"""
Return the OS X system version as a string
"""
def _remove_original_values(_config_vars: dict[str, str]) -> None: # undocumented
"""
Remove original unmodified values for testing
"""
def _save_modified_value(_config_vars: dict[str, str], cv: str, newvalue: str) -> None: # undocumented
"""
Save modified and original unmodified value of configuration var
"""
def _supports_universal_builds() -> bool: # undocumented
"""
Returns True if universal builds are supported on this system
"""
def _find_appropriate_compiler(_config_vars: dict[str, str]) -> dict[str, str]: # undocumented
"""
Find appropriate C compiler for extension module builds
"""
def _remove_universal_flags(_config_vars: dict[str, str]) -> dict[str, str]: # undocumented
"""
Remove all universal build arguments from config vars
"""
def _remove_unsupported_archs(_config_vars: dict[str, str]) -> dict[str, str]: # undocumented
"""
Remove any unsupported archs from config vars
"""
def _override_all_archs(_config_vars: dict[str, str]) -> dict[str, str]: # undocumented
"""
Allow override of all archs with ARCHFLAGS env var
"""
def _check_for_unavailable_sdk(_config_vars: dict[str, str]) -> dict[str, str]: # undocumented
"""
Remove references to any SDKs not available
"""
def compiler_fixup(compiler_so: Iterable[str], cc_args: Sequence[str]) -> list[str]:
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
def customize_config_vars(_config_vars: dict[str, str]) -> dict[str, str]:
"""
Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extension module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
def customize_compiler(_config_vars: dict[str, str]) -> dict[str, str]:
"""
Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler.
"""
def get_platform_osx(_config_vars: dict[str, str], osname: _T, release: _K, machine: _V) -> tuple[str | _T, str | _K, str | _V]:
"""
Filter values for get_platform()
"""

View file

@ -1,3 +1,7 @@
"""
Optimized C implementation for the Python pickle module.
"""
from _typeshed import ReadableBuffer, SupportsWrite
from collections.abc import Callable, Iterable, Iterator, Mapping
from pickle import PickleBuffer as PickleBuffer
@ -25,10 +29,58 @@ def dump(
*,
fix_imports: bool = True,
buffer_callback: _BufferCallback = None,
) -> None: ...
def dumps(
obj: Any, protocol: int | None = None, *, fix_imports: bool = True, buffer_callback: _BufferCallback = None
) -> bytes: ...
) -> None:
"""
Write a pickled representation of obj to the open file object file.
This is equivalent to ``Pickler(file, protocol).dump(obj)``, but may
be more efficient.
The optional *protocol* argument tells the pickler to use the given
protocol; supported protocols are 0, 1, 2, 3, 4 and 5. The default
protocol is 5. It was introduced in Python 3.8, and is incompatible
with previous versions.
Specifying a negative protocol version selects the highest protocol
version supported. The higher the protocol used, the more recent the
version of Python needed to read the pickle produced.
The *file* argument must have a write() method that accepts a single
bytes argument. It can thus be a file object opened for binary
writing, an io.BytesIO instance, or any other custom object that meets
this interface.
If *fix_imports* is True and protocol is less than 3, pickle will try
to map the new Python 3 names to the old module names used in Python
2, so that the pickle data stream is readable with Python 2.
If *buffer_callback* is None (the default), buffer views are serialized
into *file* as part of the pickle stream. It is an error if
*buffer_callback* is not None and *protocol* is None or smaller than 5.
"""
def dumps(obj: Any, protocol: int | None = None, *, fix_imports: bool = True, buffer_callback: _BufferCallback = None) -> bytes:
"""
Return the pickled representation of the object as a bytes object.
The optional *protocol* argument tells the pickler to use the given
protocol; supported protocols are 0, 1, 2, 3, 4 and 5. The default
protocol is 5. It was introduced in Python 3.8, and is incompatible
with previous versions.
Specifying a negative protocol version selects the highest protocol
version supported. The higher the protocol used, the more recent the
version of Python needed to read the pickle produced.
If *fix_imports* is True and *protocol* is less than 3, pickle will
try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
If *buffer_callback* is None (the default), buffer views are serialized
into *file* as part of the pickle stream. It is an error if
*buffer_callback* is not None and *protocol* is None or smaller than 5.
"""
def load(
file: _ReadableFileobj,
*,
@ -36,7 +88,33 @@ def load(
encoding: str = "ASCII",
errors: str = "strict",
buffers: Iterable[Any] | None = (),
) -> Any: ...
) -> Any:
"""
Read and return an object from the pickle data stored in a file.
This is equivalent to ``Unpickler(file).load()``, but may be more
efficient.
The protocol version of the pickle is detected automatically, so no
protocol argument is needed. Bytes past the pickled object's
representation are ignored.
The argument *file* must have two methods, a read() method that takes
an integer argument, and a readline() method that requires no
arguments. Both methods should return bytes. Thus *file* can be a
binary file object opened for reading, an io.BytesIO object, or any
other custom object that meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and *errors*,
which are used to control compatibility support for pickle stream
generated by Python 2. If *fix_imports* is True, pickle will try to
map the old Python 2 names to the new names used in Python 3. The
*encoding* and *errors* tell pickle how to decode 8-bit string
instances pickled by Python 2; these default to 'ASCII' and 'strict',
respectively. The *encoding* can be 'bytes' to read these 8-bit
string instances as bytes objects.
"""
def loads(
data: ReadableBuffer,
/,
@ -45,7 +123,23 @@ def loads(
encoding: str = "ASCII",
errors: str = "strict",
buffers: Iterable[Any] | None = (),
) -> Any: ...
) -> Any:
"""
Read and return an object from the given pickle data.
The protocol version of the pickle is detected automatically, so no
protocol argument is needed. Bytes past the pickled object's
representation are ignored.
Optional keyword arguments are *fix_imports*, *encoding* and *errors*,
which are used to control compatibility support for pickle stream
generated by Python 2. If *fix_imports* is True, pickle will try to
map the old Python 2 names to the new names used in Python 3. The
*encoding* and *errors* tell pickle how to decode 8-bit string
instances pickled by Python 2; these default to 'ASCII' and 'strict',
respectively. The *encoding* can be 'bytes' to read these 8-bit
string instances as bytes objects.
"""
class PickleError(Exception): ...
class PicklingError(PickleError): ...
@ -57,6 +151,39 @@ class PicklerMemoProxy:
def copy(self, /) -> dict[int, tuple[int, Any]]: ...
class Pickler:
"""
This takes a binary file for writing a pickle data stream.
The optional *protocol* argument tells the pickler to use the given
protocol; supported protocols are 0, 1, 2, 3, 4 and 5. The default
protocol is 5. It was introduced in Python 3.8, and is incompatible
with previous versions.
Specifying a negative protocol version selects the highest protocol
version supported. The higher the protocol used, the more recent the
version of Python needed to read the pickle produced.
The *file* argument must have a write() method that accepts a single
bytes argument. It can thus be a file object opened for binary
writing, an io.BytesIO instance, or any other custom object that meets
this interface.
If *fix_imports* is True and protocol is less than 3, pickle will try
to map the new Python 3 names to the old module names used in Python
2, so that the pickle data stream is readable with Python 2.
If *buffer_callback* is None (the default), buffer views are
serialized into *file* as part of the pickle stream.
If *buffer_callback* is not None, then it can be called any number
of times with a buffer view. If the callback returns a false value
(such as None), the given buffer is out-of-band; otherwise the
buffer is serialized in-band, i.e. inside the pickle stream.
It is an error if *buffer_callback* is not None and *protocol*
is None or smaller than 5.
"""
fast: bool
dispatch_table: Mapping[type, Callable[[Any], _ReducedType]]
reducer_override: Callable[[Any], Any]
@ -72,9 +199,20 @@ class Pickler:
def memo(self) -> PicklerMemoProxy: ...
@memo.setter
def memo(self, value: PicklerMemoProxy | dict[int, tuple[int, Any]]) -> None: ...
def dump(self, obj: Any, /) -> None: ...
def clear_memo(self) -> None: ...
def dump(self, obj: Any, /) -> None:
"""
Write a pickled representation of the given object to the open file.
"""
def clear_memo(self) -> None:
"""
Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
# this method has no default implementation for Python < 3.13
def persistent_id(self, obj: Any, /) -> Any: ...
@ -84,6 +222,29 @@ class UnpicklerMemoProxy:
def copy(self, /) -> dict[int, tuple[int, Any]]: ...
class Unpickler:
"""
This takes a binary file for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
protocol argument is needed. Bytes past the pickled object's
representation are ignored.
The argument *file* must have two methods, a read() method that takes
an integer argument, and a readline() method that requires no
arguments. Both methods should return bytes. Thus *file* can be a
binary file object opened for reading, an io.BytesIO object, or any
other custom object that meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and *errors*,
which are used to control compatibility support for pickle stream
generated by Python 2. If *fix_imports* is True, pickle will try to
map the old Python 2 names to the new names used in Python 3. The
*encoding* and *errors* tell pickle how to decode 8-bit string
instances pickled by Python 2; these default to 'ASCII' and 'strict',
respectively. The *encoding* can be 'bytes' to read these 8-bit
string instances as bytes objects.
"""
def __init__(
self,
file: _ReadableFileobj,
@ -97,8 +258,25 @@ class Unpickler:
def memo(self) -> UnpicklerMemoProxy: ...
@memo.setter
def memo(self, value: UnpicklerMemoProxy | dict[int, tuple[int, Any]]) -> None: ...
def load(self) -> Any: ...
def find_class(self, module_name: str, global_name: str, /) -> Any: ...
def load(self) -> Any:
"""
Load a pickle.
Read a pickled object representation from the open file object given
in the constructor, and return the reconstituted object hierarchy
specified therein.
"""
def find_class(self, module_name: str, global_name: str, /) -> Any:
"""
Return an object from a specified module.
If necessary, the module will be imported. Subclasses may override
this method (e.g. to restrict unpickling of arbitrary classes and
functions).
This method is called whenever a class or a function object is
needed. Both arguments passed are str objects.
"""
# this method has no default implementation for Python < 3.13
def persistent_load(self, pid: Any, /) -> Any: ...

View file

@ -1,3 +1,7 @@
"""
A POSIX helper for the subprocess module.
"""
import sys
from _typeshed import StrOrBytesPath
from collections.abc import Callable, Sequence
@ -29,7 +33,30 @@ if sys.platform != "win32":
child_umask: int,
preexec_fn: Callable[[], None],
/,
) -> int: ...
) -> int:
"""
Spawn a fresh new child process.
Fork a child process, close parent file descriptors as appropriate in the
child and duplicate the few that are needed before calling exec() in the
child process.
If close_fds is True, close file descriptors 3 and higher, except those listed
in the sorted tuple pass_fds.
The preexec_fn, if supplied, will be called immediately before closing file
descriptors and exec.
WARNING: preexec_fn is NOT SAFE if your application uses threads.
It may trigger infrequent, difficult to debug deadlocks.
If an error occurs in the child process before the exec, it is
serialized and written to the errpipe_write fd per subprocess.py.
Returns: the child process's PID.
Raises: Only on an error in the parent process.
"""
else:
def fork_exec(
args: Sequence[StrOrBytesPath] | None,
@ -56,4 +83,27 @@ if sys.platform != "win32":
preexec_fn: Callable[[], None],
allow_vfork: bool,
/,
) -> int: ...
) -> int:
"""
Spawn a fresh new child process.
Fork a child process, close parent file descriptors as appropriate in the
child and duplicate the few that are needed before calling exec() in the
child process.
If close_fds is True, close file descriptors 3 and higher, except those listed
in the sorted tuple pass_fds.
The preexec_fn, if supplied, will be called immediately before closing file
descriptors and exec.
WARNING: preexec_fn is NOT SAFE if your application uses threads.
It may trigger infrequent, difficult to debug deadlocks.
If an error occurs in the child process before the exec, it is
serialized and written to the errpipe_write fd per subprocess.py.
Returns: the child process's PID.
Raises: Only on an error in the parent process.
"""

View file

@ -5,10 +5,36 @@ _T = TypeVar("_T")
_CacheToken = NewType("_CacheToken", int)
def get_cache_token() -> _CacheToken: ...
def get_cache_token() -> _CacheToken:
"""
Returns the current ABC cache token.
The token is an opaque object (supporting equality testing) identifying the
current version of the ABC cache for virtual subclasses. The token changes
with every call to ``register()`` on any ABC.
"""
class ABCMeta(type):
"""
Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
def __new__(
mcls: type[_typeshed.Self], name: str, bases: tuple[type[Any], ...], namespace: dict[str, Any], /
) -> _typeshed.Self: ...
def register(cls, subclass: type[_T]) -> type[_T]: ...
def register(cls, subclass: type[_T]) -> type[_T]:
"""
Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""

View file

@ -1,3 +1,7 @@
"""
Python decimal arithmetic module
"""
# This is a slight lie, the implementations aren't exactly identical
# However, in all likelihood, the differences are inconsequential
import sys

View file

@ -1,16 +1,72 @@
"""
C implementation of the Python queue module.
This module is an implementation detail, please do not use it directly.
"""
from types import GenericAlias
from typing import Any, Generic, TypeVar
_T = TypeVar("_T")
class Empty(Exception): ...
class Empty(Exception):
"""
Exception raised by Queue.get(block=0)/get_nowait().
"""
class SimpleQueue(Generic[_T]):
"""
Simple, unbounded, reentrant FIFO queue.
"""
def __init__(self) -> None: ...
def empty(self) -> bool: ...
def get(self, block: bool = True, timeout: float | None = None) -> _T: ...
def get_nowait(self) -> _T: ...
def put(self, item: _T, block: bool = True, timeout: float | None = None) -> None: ...
def put_nowait(self, item: _T) -> None: ...
def qsize(self) -> int: ...
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
def empty(self) -> bool:
"""
Return True if the queue is empty, False otherwise (not reliable!).
"""
def get(self, block: bool = True, timeout: float | None = None) -> _T:
"""
Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
def get_nowait(self) -> _T:
"""
Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
def put(self, item: _T, block: bool = True, timeout: float | None = None) -> None:
"""
Put the item on the queue.
The optional 'block' and 'timeout' arguments are ignored, as this method
never blocks. They are provided for compatibility with the Queue class.
"""
def put_nowait(self, item: _T) -> None:
"""
Put an item into the queue without blocking.
This is exactly equivalent to `put(item)` and is only provided
for compatibility with the Queue class.
"""
def qsize(self) -> int:
"""
Return the approximate size of the queue (not reliable!).
"""
def __class_getitem__(cls, item: Any, /) -> GenericAlias:
"""
See PEP 585
"""

View file

@ -1,12 +1,42 @@
"""
Module implements the Mersenne Twister random number generator.
"""
from typing_extensions import TypeAlias
# Actually Tuple[(int,) * 625]
_State: TypeAlias = tuple[int, ...]
class Random:
"""
Random() -> create a random number generator with its own internal state.
"""
def __init__(self, seed: object = ...) -> None: ...
def seed(self, n: object = None, /) -> None: ...
def getstate(self) -> _State: ...
def setstate(self, state: _State, /) -> None: ...
def random(self) -> float: ...
def getrandbits(self, k: int, /) -> int: ...
def seed(self, n: object = None, /) -> None:
"""
seed([n]) -> None.
Defaults to use urandom and falls back to a combination
of the current time and the process identifier.
"""
def getstate(self) -> _State:
"""
getstate() -> tuple containing the current state.
"""
def setstate(self, state: _State, /) -> None:
"""
setstate(state) -> None. Restores generator state.
"""
def random(self) -> float:
"""
random() -> x in the interval [0, 1).
"""
def getrandbits(self, k: int, /) -> int:
"""
getrandbits(k) -> x. Generates an int with k random bits.
"""

View file

@ -1,3 +1,7 @@
"""
The objects used by the site module to add custom builtins.
"""
import sys
from collections.abc import Iterable
from typing import ClassVar, Literal, NoReturn
@ -9,9 +13,24 @@ class Quitter:
def __call__(self, code: sys._ExitCode = None) -> NoReturn: ...
class _Printer:
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
MAXLINES: ClassVar[Literal[23]]
def __init__(self, name: str, data: str, files: Iterable[str] = (), dirs: Iterable[str] = ()) -> None: ...
def __call__(self) -> None: ...
class _Helper:
"""
Define the builtin 'help'.
This is a wrapper around pydoc.help that provides a helpful message
when 'help' is typed at the Python interactive prompt.
Calling help() at the Python prompt starts an interactive help session.
Calling help(thing) prints help for the python object 'thing'.
"""
def __call__(self, request: object = ...) -> None: ...

View file

@ -1,3 +1,9 @@
"""
Implementation module for socket operations.
See the socket module for documentation.
"""
import sys
from _typeshed import ReadableBuffer, WriteableBuffer
from collections.abc import Iterable
@ -732,15 +738,76 @@ if sys.platform != "win32" and sys.platform != "darwin":
# ===== Classes =====
class socket:
"""
socket(family=AF_INET, type=SOCK_STREAM, proto=0) -> socket object
socket(family=-1, type=-1, proto=-1, fileno=None) -> socket object
Open a socket of the given type. The family argument specifies the
address family; it defaults to AF_INET. The type argument specifies
whether this is a stream (SOCK_STREAM, this is the default)
or datagram (SOCK_DGRAM) socket. The protocol argument defaults to 0,
specifying the default protocol. Keyword arguments are accepted.
The socket is created as non-inheritable.
When a fileno is passed in, family, type and proto are auto-detected,
unless they are explicitly set.
A socket object represents one endpoint of a network connection.
Methods of socket objects (keyword arguments not allowed):
_accept() -- accept connection, returning new socket fd and client address
bind(addr) -- bind the socket to a local address
close() -- close the socket
connect(addr) -- connect the socket to a remote address
connect_ex(addr) -- connect, return an error code instead of an exception
dup() -- return a new socket fd duplicated from fileno()
fileno() -- return underlying file descriptor
getpeername() -- return remote address [*]
getsockname() -- return local address
getsockopt(level, optname[, buflen]) -- get socket options
gettimeout() -- return timeout or None
listen([n]) -- start listening for incoming connections
recv(buflen[, flags]) -- receive data
recv_into(buffer[, nbytes[, flags]]) -- receive data (into a buffer)
recvfrom(buflen[, flags]) -- receive data and sender's address
recvfrom_into(buffer[, nbytes, [, flags])
-- receive data and sender's address (into a buffer)
sendall(data[, flags]) -- send all data
send(data[, flags]) -- send data, may not send all of it
sendto(data[, flags], addr) -- send data to a given address
setblocking(bool) -- set or clear the blocking I/O flag
getblocking() -- return True if socket is blocking, False if non-blocking
setsockopt(level, optname, value[, optlen]) -- set socket options
settimeout(None | float) -- set or clear the timeout
shutdown(how) -- shut down traffic in one or both directions
[*] not available on all platforms!
"""
@property
def family(self) -> int: ...
def family(self) -> int:
"""
the socket family
"""
@property
def type(self) -> int: ...
def type(self) -> int:
"""
the socket type
"""
@property
def proto(self) -> int: ...
def proto(self) -> int:
"""
the socket protocol
"""
# F811: "Redefinition of unused `timeout`"
@property
def timeout(self) -> float | None: ... # noqa: F811
def timeout(self) -> float | None: # noqa: F811
"""
the socket timeout
"""
if sys.platform == "win32":
def __init__(
self, family: int = ..., type: int = ..., proto: int = ..., fileno: SupportsIndex | bytes | None = ...
@ -748,38 +815,244 @@ class socket:
else:
def __init__(self, family: int = ..., type: int = ..., proto: int = ..., fileno: SupportsIndex | None = ...) -> None: ...
def bind(self, address: _Address, /) -> None: ...
def close(self) -> None: ...
def connect(self, address: _Address, /) -> None: ...
def connect_ex(self, address: _Address, /) -> int: ...
def detach(self) -> int: ...
def fileno(self) -> int: ...
def getpeername(self) -> _RetAddress: ...
def getsockname(self) -> _RetAddress: ...
def bind(self, address: _Address, /) -> None:
"""
bind(address)
Bind the socket to a local address. For IP sockets, the address is a
pair (host, port); the host must refer to the local host. For raw packet
sockets the address is a tuple (ifname, proto [,pkttype [,hatype [,addr]]])
"""
def close(self) -> None:
"""
close()
Close the socket. It cannot be used after this call.
"""
def connect(self, address: _Address, /) -> None:
"""
connect(address)
Connect the socket to a remote address. For IP sockets, the address
is a pair (host, port).
"""
def connect_ex(self, address: _Address, /) -> int:
"""
connect_ex(address) -> errno
This is like connect(address), but returns an error code (the errno value)
instead of raising an exception when an error occurs.
"""
def detach(self) -> int:
"""
detach()
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
def fileno(self) -> int:
"""
fileno() -> integer
Return the integer file descriptor of the socket.
"""
def getpeername(self) -> _RetAddress:
"""
getpeername() -> address info
Return the address of the remote endpoint. For IP sockets, the address
info is a pair (hostaddr, port).
"""
def getsockname(self) -> _RetAddress:
"""
getsockname() -> address info
Return the address of the local endpoint. The format depends on the
address family. For IPv4 sockets, the address info is a pair
(hostaddr, port). For IPv6 sockets, the address info is a 4-tuple
(hostaddr, port, flowinfo, scope_id).
"""
@overload
def getsockopt(self, level: int, optname: int, /) -> int: ...
def getsockopt(self, level: int, optname: int, /) -> int:
"""
getsockopt(level, option[, buffersize]) -> value
Get a socket option. See the Unix manual for level and option.
If a nonzero buffersize argument is given, the return value is a
string of that length; otherwise it is an integer.
"""
@overload
def getsockopt(self, level: int, optname: int, buflen: int, /) -> bytes: ...
def getblocking(self) -> bool: ...
def gettimeout(self) -> float | None: ...
def getblocking(self) -> bool:
"""
getblocking()
Returns True if socket is in blocking mode, or False if it
is in non-blocking mode.
"""
def gettimeout(self) -> float | None:
"""
gettimeout() -> timeout
Returns the timeout in seconds (float) associated with socket
operations. A timeout of None indicates that timeouts on socket
operations are disabled.
"""
if sys.platform == "win32":
def ioctl(self, control: int, option: int | tuple[int, int, int] | bool, /) -> None: ...
def listen(self, backlog: int = ..., /) -> None: ...
def recv(self, bufsize: int, flags: int = ..., /) -> bytes: ...
def recvfrom(self, bufsize: int, flags: int = ..., /) -> tuple[bytes, _RetAddress]: ...
def listen(self, backlog: int = ..., /) -> None:
"""
listen([backlog])
Enable a server to accept connections. If backlog is specified, it must be
at least 0 (if it is lower, it is set to 0); it specifies the number of
unaccepted connections that the system will allow before refusing new
connections. If not specified, a default reasonable value is chosen.
"""
def recv(self, bufsize: int, flags: int = ..., /) -> bytes:
"""
recv(buffersize[, flags]) -> data
Receive up to buffersize bytes from the socket. For the optional flags
argument, see the Unix manual. When no data is available, block until
at least one byte is available or until the remote end is closed. When
the remote end is closed and all data is read, return the empty string.
"""
def recvfrom(self, bufsize: int, flags: int = ..., /) -> tuple[bytes, _RetAddress]:
"""
recvfrom(buffersize[, flags]) -> (data, address info)
Like recv(buffersize, flags) but also return the sender's address info.
"""
if sys.platform != "win32":
def recvmsg(self, bufsize: int, ancbufsize: int = ..., flags: int = ..., /) -> tuple[bytes, list[_CMSG], int, Any]: ...
def recvmsg(self, bufsize: int, ancbufsize: int = ..., flags: int = ..., /) -> tuple[bytes, list[_CMSG], int, Any]:
"""
recvmsg(bufsize[, ancbufsize[, flags]]) -> (data, ancdata, msg_flags, address)
Receive normal data (up to bufsize bytes) and ancillary data from the
socket. The ancbufsize argument sets the size in bytes of the
internal buffer used to receive the ancillary data; it defaults to 0,
meaning that no ancillary data will be received. Appropriate buffer
sizes for ancillary data can be calculated using CMSG_SPACE() or
CMSG_LEN(), and items which do not fit into the buffer might be
truncated or discarded. The flags argument defaults to 0 and has the
same meaning as for recv().
The return value is a 4-tuple: (data, ancdata, msg_flags, address).
The data item is a bytes object holding the non-ancillary data
received. The ancdata item is a list of zero or more tuples
(cmsg_level, cmsg_type, cmsg_data) representing the ancillary data
(control messages) received: cmsg_level and cmsg_type are integers
specifying the protocol level and protocol-specific type respectively,
and cmsg_data is a bytes object holding the associated data. The
msg_flags item is the bitwise OR of various flags indicating
conditions on the received message; see your system documentation for
details. If the receiving socket is unconnected, address is the
address of the sending socket, if available; otherwise, its value is
unspecified.
If recvmsg() raises an exception after the system call returns, it
will first attempt to close any file descriptors received via the
SCM_RIGHTS mechanism.
"""
def recvmsg_into(
self, buffers: Iterable[WriteableBuffer], ancbufsize: int = ..., flags: int = ..., /
) -> tuple[int, list[_CMSG], int, Any]: ...
) -> tuple[int, list[_CMSG], int, Any]:
"""
recvmsg_into(buffers[, ancbufsize[, flags]]) -> (nbytes, ancdata, msg_flags, address)
Receive normal data and ancillary data from the socket, scattering the
non-ancillary data into a series of buffers. The buffers argument
must be an iterable of objects that export writable buffers
(e.g. bytearray objects); these will be filled with successive chunks
of the non-ancillary data until it has all been written or there are
no more buffers. The ancbufsize argument sets the size in bytes of
the internal buffer used to receive the ancillary data; it defaults to
0, meaning that no ancillary data will be received. Appropriate
buffer sizes for ancillary data can be calculated using CMSG_SPACE()
or CMSG_LEN(), and items which do not fit into the buffer might be
truncated or discarded. The flags argument defaults to 0 and has the
same meaning as for recv().
The return value is a 4-tuple: (nbytes, ancdata, msg_flags, address).
The nbytes item is the total number of bytes of non-ancillary data
written into the buffers. The ancdata item is a list of zero or more
tuples (cmsg_level, cmsg_type, cmsg_data) representing the ancillary
data (control messages) received: cmsg_level and cmsg_type are
integers specifying the protocol level and protocol-specific type
respectively, and cmsg_data is a bytes object holding the associated
data. The msg_flags item is the bitwise OR of various flags
indicating conditions on the received message; see your system
documentation for details. If the receiving socket is unconnected,
address is the address of the sending socket, if available; otherwise,
its value is unspecified.
If recvmsg_into() raises an exception after the system call returns,
it will first attempt to close any file descriptors received via the
SCM_RIGHTS mechanism.
"""
def recvfrom_into(self, buffer: WriteableBuffer, nbytes: int = ..., flags: int = ...) -> tuple[int, _RetAddress]:
"""
recvfrom_into(buffer[, nbytes[, flags]]) -> (nbytes, address info)
Like recv_into(buffer[, nbytes[, flags]]) but also return the sender's address info.
"""
def recv_into(self, buffer: WriteableBuffer, nbytes: int = ..., flags: int = ...) -> int:
"""
recv_into(buffer, [nbytes[, flags]]) -> nbytes_read
A version of recv() that stores its data into a buffer rather than creating
a new string. Receive up to buffersize bytes from the socket. If buffersize
is not specified (or 0), receive up to the size available in the given buffer.
See recv() for documentation about the flags.
"""
def send(self, data: ReadableBuffer, flags: int = ..., /) -> int:
"""
send(data[, flags]) -> count
Send a data string to the socket. For the optional flags
argument, see the Unix manual. Return the number of bytes
sent; this may be less than len(data) if the network is busy.
"""
def sendall(self, data: ReadableBuffer, flags: int = ..., /) -> None:
"""
sendall(data[, flags])
Send a data string to the socket. For the optional flags
argument, see the Unix manual. This calls send() repeatedly
until all data is sent. If an error occurs, it's impossible
to tell how much data has been sent.
"""
def recvfrom_into(self, buffer: WriteableBuffer, nbytes: int = ..., flags: int = ...) -> tuple[int, _RetAddress]: ...
def recv_into(self, buffer: WriteableBuffer, nbytes: int = ..., flags: int = ...) -> int: ...
def send(self, data: ReadableBuffer, flags: int = ..., /) -> int: ...
def sendall(self, data: ReadableBuffer, flags: int = ..., /) -> None: ...
@overload
def sendto(self, data: ReadableBuffer, address: _Address, /) -> int: ...
def sendto(self, data: ReadableBuffer, address: _Address, /) -> int:
"""
sendto(data[, flags], address) -> count
Like send(data, flags) but allows specifying the destination address.
For IP sockets, the address is a pair (hostaddr, port).
"""
@overload
def sendto(self, data: ReadableBuffer, flags: int, address: _Address, /) -> int: ...
if sys.platform != "win32":
@ -790,29 +1063,98 @@ class socket:
flags: int = ...,
address: _Address | None = ...,
/,
) -> int: ...
) -> int:
"""
sendmsg(buffers[, ancdata[, flags[, address]]]) -> count
Send normal and ancillary data to the socket, gathering the
non-ancillary data from a series of buffers and concatenating it into
a single message. The buffers argument specifies the non-ancillary
data as an iterable of bytes-like objects (e.g. bytes objects).
The ancdata argument specifies the ancillary data (control messages)
as an iterable of zero or more tuples (cmsg_level, cmsg_type,
cmsg_data), where cmsg_level and cmsg_type are integers specifying the
protocol level and protocol-specific type respectively, and cmsg_data
is a bytes-like object holding the associated data. The flags
argument defaults to 0 and has the same meaning as for send(). If
address is supplied and not None, it sets a destination address for
the message. The return value is the number of bytes of non-ancillary
data sent.
"""
if sys.platform == "linux":
def sendmsg_afalg(
self, msg: Iterable[ReadableBuffer] = ..., *, op: int, iv: Any = ..., assoclen: int = ..., flags: int = ...
) -> int: ...
) -> int:
"""
sendmsg_afalg([msg], *, op[, iv[, assoclen[, flags=MSG_MORE]]])
Set operation mode, IV and length of associated data for an AF_ALG
operation socket.
"""
def setblocking(self, flag: bool, /) -> None:
"""
setblocking(flag)
Set the socket to blocking (flag is true) or non-blocking (false).
setblocking(True) is equivalent to settimeout(None);
setblocking(False) is equivalent to settimeout(0.0).
"""
def settimeout(self, value: float | None, /) -> None:
"""
settimeout(timeout)
Set a timeout on socket operations. 'timeout' can be a float,
giving in seconds, or None. Setting a timeout of None disables
the timeout feature and is equivalent to setblocking(1).
Setting a timeout of zero is the same as setblocking(0).
"""
def setblocking(self, flag: bool, /) -> None: ...
def settimeout(self, value: float | None, /) -> None: ...
@overload
def setsockopt(self, level: int, optname: int, value: int | ReadableBuffer, /) -> None: ...
def setsockopt(self, level: int, optname: int, value: int | ReadableBuffer, /) -> None:
"""
setsockopt(level, option, value: int)
setsockopt(level, option, value: buffer)
setsockopt(level, option, None, optlen: int)
Set a socket option. See the Unix manual for level and option.
The value argument can either be an integer, a string buffer, or
None, optlen.
"""
@overload
def setsockopt(self, level: int, optname: int, value: None, optlen: int, /) -> None: ...
if sys.platform == "win32":
def share(self, process_id: int, /) -> bytes: ...
def shutdown(self, how: int, /) -> None: ...
def shutdown(self, how: int, /) -> None:
"""
shutdown(flag)
Shut down the reading side of the socket (flag == SHUT_RD), the writing side
of the socket (flag == SHUT_WR), or both ends (flag == SHUT_RDWR).
"""
SocketType = socket
# ===== Functions =====
def close(fd: SupportsIndex, /) -> None: ...
def dup(fd: SupportsIndex, /) -> int: ...
def close(fd: SupportsIndex, /) -> None:
"""
close(integer) -> None
Close an integer socket file descriptor. This is like os.close(), but for
sockets; on some platforms os.close() won't work for socket file descriptors.
"""
def dup(fd: SupportsIndex, /) -> int:
"""
dup(integer) -> integer
Duplicate an integer socket file descriptor. This is like os.dup(), but for
sockets; on some platforms os.dup() won't work for socket file descriptors.
"""
# the 5th tuple item is an address
def getaddrinfo(
@ -822,41 +1164,207 @@ def getaddrinfo(
type: int = ...,
proto: int = ...,
flags: int = ...,
) -> list[tuple[int, int, int, str, tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes]]]: ...
def gethostbyname(hostname: str, /) -> str: ...
def gethostbyname_ex(hostname: str, /) -> tuple[str, list[str], list[str]]: ...
def gethostname() -> str: ...
def gethostbyaddr(ip_address: str, /) -> tuple[str, list[str], list[str]]: ...
def getnameinfo(sockaddr: tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], flags: int, /) -> tuple[str, str]: ...
def getprotobyname(protocolname: str, /) -> int: ...
def getservbyname(servicename: str, protocolname: str = ..., /) -> int: ...
def getservbyport(port: int, protocolname: str = ..., /) -> str: ...
def ntohl(x: int, /) -> int: ... # param & ret val are 32-bit ints
def ntohs(x: int, /) -> int: ... # param & ret val are 16-bit ints
def htonl(x: int, /) -> int: ... # param & ret val are 32-bit ints
def htons(x: int, /) -> int: ... # param & ret val are 16-bit ints
def inet_aton(ip_addr: str, /) -> bytes: ... # ret val 4 bytes in length
def inet_ntoa(packed_ip: ReadableBuffer, /) -> str: ...
def inet_pton(address_family: int, ip_string: str, /) -> bytes: ...
def inet_ntop(address_family: int, packed_ip: ReadableBuffer, /) -> str: ...
def getdefaulttimeout() -> float | None: ...
) -> list[tuple[int, int, int, str, tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes]]]:
"""
getaddrinfo(host, port [, family, type, proto, flags])
-> list of (family, type, proto, canonname, sockaddr)
Resolve host and port into addrinfo struct.
"""
def gethostbyname(hostname: str, /) -> str:
"""
gethostbyname(host) -> address
Return the IP address (a string of the form '255.255.255.255') for a host.
"""
def gethostbyname_ex(hostname: str, /) -> tuple[str, list[str], list[str]]:
"""
gethostbyname_ex(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number.
"""
def gethostname() -> str:
"""
gethostname() -> string
Return the current host name.
"""
def gethostbyaddr(ip_address: str, /) -> tuple[str, list[str], list[str]]:
"""
gethostbyaddr(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number.
"""
def getnameinfo(sockaddr: tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], flags: int, /) -> tuple[str, str]:
"""
getnameinfo(sockaddr, flags) --> (host, port)
Get host and port for a sockaddr.
"""
def getprotobyname(protocolname: str, /) -> int:
"""
getprotobyname(name) -> integer
Return the protocol number for the named protocol. (Rarely used.)
"""
def getservbyname(servicename: str, protocolname: str = ..., /) -> int:
"""
getservbyname(servicename[, protocolname]) -> integer
Return a port number from a service name and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match.
"""
def getservbyport(port: int, protocolname: str = ..., /) -> str:
"""
getservbyport(port[, protocolname]) -> string
Return the service name from a port number and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match.
"""
def ntohl(x: int, /) -> int: # param & ret val are 32-bit ints
"""
Convert a 32-bit unsigned integer from network to host byte order.
"""
def ntohs(x: int, /) -> int: # param & ret val are 16-bit ints
"""
Convert a 16-bit unsigned integer from network to host byte order.
"""
def htonl(x: int, /) -> int: # param & ret val are 32-bit ints
"""
Convert a 32-bit unsigned integer from host to network byte order.
"""
def htons(x: int, /) -> int: # param & ret val are 16-bit ints
"""
Convert a 16-bit unsigned integer from host to network byte order.
"""
def inet_aton(ip_addr: str, /) -> bytes: # ret val 4 bytes in length
"""
Convert an IP address in string format (123.45.67.89) to the 32-bit packed binary format used in low-level network functions.
"""
def inet_ntoa(packed_ip: ReadableBuffer, /) -> str:
"""
Convert an IP address from 32-bit packed binary format to string format.
"""
def inet_pton(address_family: int, ip_string: str, /) -> bytes:
"""
inet_pton(af, ip) -> packed IP address string
Convert an IP address from string format to a packed string suitable
for use with low-level network functions.
"""
def inet_ntop(address_family: int, packed_ip: ReadableBuffer, /) -> str:
"""
inet_ntop(af, packed_ip) -> string formatted IP address
Convert a packed IP address of the given family to string format.
"""
def getdefaulttimeout() -> float | None:
"""
getdefaulttimeout() -> timeout
Returns the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None.
"""
# F811: "Redefinition of unused `timeout`"
def setdefaulttimeout(timeout: float | None, /) -> None: ... # noqa: F811
def setdefaulttimeout(timeout: float | None, /) -> None: # noqa: F811
"""
setdefaulttimeout(timeout)
Set the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None.
"""
if sys.platform != "win32":
def sethostname(name: str, /) -> None: ...
def CMSG_LEN(length: int, /) -> int: ...
def CMSG_SPACE(length: int, /) -> int: ...
def socketpair(family: int = ..., type: int = ..., proto: int = ..., /) -> tuple[socket, socket]: ...
def sethostname(name: str, /) -> None:
"""
sethostname(name)
def if_nameindex() -> list[tuple[int, str]]: ...
def if_nametoindex(oname: str, /) -> int: ...
Sets the hostname to name.
"""
def CMSG_LEN(length: int, /) -> int:
"""
CMSG_LEN(length) -> control message length
Return the total length, without trailing padding, of an ancillary
data item with associated data of the given length. This value can
often be used as the buffer size for recvmsg() to receive a single
item of ancillary data, but RFC 3542 requires portable applications to
use CMSG_SPACE() and thus include space for padding, even when the
item will be the last in the buffer. Raises OverflowError if length
is outside the permissible range of values.
"""
def CMSG_SPACE(length: int, /) -> int:
"""
CMSG_SPACE(length) -> buffer size
Return the buffer size needed for recvmsg() to receive an ancillary
data item with associated data of the given length, along with any
trailing padding. The buffer space needed to receive multiple items
is the sum of the CMSG_SPACE() values for their associated data
lengths. Raises OverflowError if length is outside the permissible
range of values.
"""
def socketpair(family: int = ..., type: int = ..., proto: int = ..., /) -> tuple[socket, socket]:
"""
socketpair([family[, type [, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
def if_nameindex() -> list[tuple[int, str]]:
"""
if_nameindex()
Returns a list of network interface information (index, name) tuples.
"""
def if_nametoindex(oname: str, /) -> int:
"""
Returns the interface index corresponding to the interface name if_name.
"""
if sys.version_info >= (3, 14):
def if_indextoname(if_index: int, /) -> str: ...
def if_indextoname(if_index: int, /) -> str:
"""
Returns the interface name corresponding to the interface index if_index.
"""
else:
def if_indextoname(index: int, /) -> str: ...
def if_indextoname(index: int, /) -> str:
"""
if_indextoname(if_index)
Returns the interface name corresponding to the interface index if_index.
"""
CAPI: CapsuleType

View file

@ -214,10 +214,17 @@ if sys.version_info >= (3, 11):
# Can take or return anything depending on what's in the registry.
@overload
def adapt(obj: Any, proto: Any, /) -> Any: ...
def adapt(obj: Any, proto: Any, /) -> Any:
"""
Adapt given object to given protocol.
"""
@overload
def adapt(obj: Any, proto: Any, alt: _T, /) -> Any | _T: ...
def complete_statement(statement: str) -> bool: ...
def complete_statement(statement: str) -> bool:
"""
Checks if a string contains a complete SQL statement.
"""
if sys.version_info >= (3, 12):
@overload
@ -231,7 +238,19 @@ if sys.version_info >= (3, 12):
uri: bool = False,
*,
autocommit: bool = ...,
) -> Connection: ...
) -> Connection:
"""
Open a connection to the SQLite database file 'database'.
You can use ":memory:" to open a database connection to a database that
resides in RAM instead of on disk.
Note: Passing more than 1 positional argument to _sqlite3.connect() is
deprecated. Parameters 'timeout', 'detect_types', 'isolation_level',
'check_same_thread', 'factory', 'cached_statements' and 'uri' will
become keyword-only parameters in Python 3.15.
"""
@overload
def connect(
database: StrOrBytesPath,
@ -269,7 +288,14 @@ else:
check_same_thread: bool = True,
cached_statements: int = 128,
uri: bool = False,
) -> Connection: ...
) -> Connection:
"""
Opens a connection to the SQLite database file database.
You can use ":memory:" to open a database connection to a database that resides
in RAM instead of on disk.
"""
@overload
def connect(
database: StrOrBytesPath,
@ -294,19 +320,48 @@ else:
uri: bool = False,
) -> _ConnectionT: ...
def enable_callback_tracebacks(enable: bool, /) -> None: ...
def enable_callback_tracebacks(enable: bool, /) -> None:
"""
Enable or disable callback functions throwing errors to stderr.
"""
if sys.version_info < (3, 12):
# takes a pos-or-keyword argument because there is a C wrapper
def enable_shared_cache(do_enable: int) -> None: ...
def enable_shared_cache(do_enable: int) -> None:
"""
Enable or disable shared cache mode for the calling thread.
This method is deprecated and will be removed in Python 3.12.
Shared cache is strongly discouraged by the SQLite 3 documentation.
If shared cache must be used, open the database in URI mode using
the cache=shared query parameter.
"""
if sys.version_info >= (3, 10):
def register_adapter(type: type[_T], adapter: _Adapter[_T], /) -> None: ...
def register_converter(typename: str, converter: _Converter, /) -> None: ...
def register_adapter(type: type[_T], adapter: _Adapter[_T], /) -> None:
"""
Register a function to adapt Python objects to SQLite values.
"""
def register_converter(typename: str, converter: _Converter, /) -> None:
"""
Register a function to convert SQLite values to Python objects.
"""
else:
def register_adapter(type: type[_T], caster: _Adapter[_T], /) -> None: ...
def register_converter(name: str, converter: _Converter, /) -> None: ...
def register_adapter(type: type[_T], caster: _Adapter[_T], /) -> None:
"""
register_adapter(type, callable)
Registers an adapter with sqlite3's adapter registry.
"""
def register_converter(name: str, converter: _Converter, /) -> None:
"""
register_converter(typename, callable)
Registers a converter with sqlite3.
"""
if sys.version_info < (3, 10):
OptimizedUnicode = str

View file

@ -1,3 +1,8 @@
"""
Implementation module for SSL socket operations. See the socket module
for documentation.
"""
import sys
from _typeshed import ReadableBuffer, StrOrBytesPath
from collections.abc import Callable
@ -45,25 +50,63 @@ class _CertInfo(TypedDict):
caIssuers: NotRequired[tuple[str, ...] | None]
crlDistributionPoints: NotRequired[tuple[str, ...] | None]
def RAND_add(string: str | ReadableBuffer, entropy: float, /) -> None: ...
def RAND_bytes(n: int, /) -> bytes: ...
def RAND_add(string: str | ReadableBuffer, entropy: float, /) -> None:
"""
Mix string into the OpenSSL PRNG state.
entropy (a float) is a lower bound on the entropy contained in
string. See RFC 4086.
"""
def RAND_bytes(n: int, /) -> bytes:
"""
Generate n cryptographically strong pseudo-random bytes.
"""
if sys.version_info < (3, 12):
def RAND_pseudo_bytes(n: int, /) -> tuple[bytes, bool]: ...
def RAND_pseudo_bytes(n: int, /) -> tuple[bytes, bool]:
"""
Generate n pseudo-random bytes.
Return a pair (bytes, is_cryptographic). is_cryptographic is True
if the bytes generated are cryptographically strong.
"""
if sys.version_info < (3, 10):
def RAND_egd(path: str) -> None: ...
def RAND_status() -> bool: ...
def get_default_verify_paths() -> tuple[str, str, str, str]: ...
def RAND_status() -> bool:
"""
Returns True if the OpenSSL PRNG has been seeded with enough data and False if not.
It is necessary to seed the PRNG with RAND_add() on some platforms before
using the ssl() function.
"""
def get_default_verify_paths() -> tuple[str, str, str, str]:
"""
Return search paths and environment vars that are used by SSLContext's set_default_verify_paths() to load default CAs.
The values are 'cert_file_env', 'cert_file', 'cert_dir_env', 'cert_dir'.
"""
if sys.platform == "win32":
_EnumRetType: TypeAlias = list[tuple[bytes, str, set[str] | bool]]
def enum_certificates(store_name: str) -> _EnumRetType: ...
def enum_crls(store_name: str) -> _EnumRetType: ...
def txt2obj(txt: str, name: bool = False) -> tuple[int, str, str, str]: ...
def nid2obj(nid: int, /) -> tuple[int, str, str, str]: ...
def txt2obj(txt: str, name: bool = False) -> tuple[int, str, str, str]:
"""
Lookup NID, short name, long name and OID of an ASN1_OBJECT.
By default objects are looked up by OID. With name=True short and
long name are also matched.
"""
def nid2obj(nid: int, /) -> tuple[int, str, str, str]:
"""
Lookup NID, short name, long name and OID of an ASN1_OBJECT by NID.
"""
class _SSLContext:
check_hostname: bool
@ -80,9 +123,29 @@ class _SSLContext:
verify_flags: int
verify_mode: int
def __new__(cls, protocol: int, /) -> Self: ...
def cert_store_stats(self) -> dict[str, int]: ...
def cert_store_stats(self) -> dict[str, int]:
"""
Returns quantities of loaded X.509 certificates.
X.509 certificates with a CA extension and certificate revocation lists
inside the context's cert store.
NOTE: Certificates in a capath directory aren't loaded unless they have
been used at least once.
"""
@overload
def get_ca_certs(self, binary_form: Literal[False] = False) -> list[_PeerCertRetDictType]: ...
def get_ca_certs(self, binary_form: Literal[False] = False) -> list[_PeerCertRetDictType]:
"""
Returns a list of dicts with information of loaded CA certs.
If the optional argument is True, returns a DER-encoded copy of the CA
certificate.
NOTE: Certificates in a capath directory aren't loaded unless they have
been used at least once.
"""
@overload
def get_ca_certs(self, binary_form: Literal[True]) -> list[bytes]: ...
@overload
@ -113,23 +176,62 @@ class MemoryBIO:
eof: bool
pending: int
def __new__(self) -> Self: ...
def read(self, size: int = -1, /) -> bytes: ...
def write(self, b: ReadableBuffer, /) -> int: ...
def write_eof(self) -> None: ...
def read(self, size: int = -1, /) -> bytes:
"""
Read up to size bytes from the memory BIO.
If size is not specified, read the entire buffer.
If the return value is an empty bytes instance, this means either
EOF or that no data is available. Use the "eof" property to
distinguish between the two.
"""
def write(self, b: ReadableBuffer, /) -> int:
"""
Writes the bytes b into the memory BIO.
Returns the number of bytes written.
"""
def write_eof(self) -> None:
"""
Write an EOF marker to the memory BIO.
When all data has been read, the "eof" property will be True.
"""
@final
class SSLSession:
__hash__: ClassVar[None] # type: ignore[assignment]
@property
def has_ticket(self) -> bool: ...
def has_ticket(self) -> bool:
"""
Does the session contain a ticket?
"""
@property
def id(self) -> bytes: ...
def id(self) -> bytes:
"""
Session ID.
"""
@property
def ticket_lifetime_hint(self) -> int: ...
def ticket_lifetime_hint(self) -> int:
"""
Ticket life time hint.
"""
@property
def time(self) -> int: ...
def time(self) -> int:
"""
Session creation time (seconds since epoch).
"""
@property
def timeout(self) -> int: ...
def timeout(self) -> int:
"""
Session timeout (delta in seconds).
"""
# _ssl.Certificate is weird: it can't be instantiated or subclassed.
# Instances can only be created via methods of the private _ssl._SSLSocket class,

View file

@ -1,3 +1,75 @@
"""
S_IFMT_: file type bits
S_IFDIR: directory
S_IFCHR: character device
S_IFBLK: block device
S_IFREG: regular file
S_IFIFO: fifo (named pipe)
S_IFLNK: symbolic link
S_IFSOCK: socket file
S_IFDOOR: door
S_IFPORT: event port
S_IFWHT: whiteout
S_ISUID: set UID bit
S_ISGID: set GID bit
S_ENFMT: file locking enforcement
S_ISVTX: sticky bit
S_IREAD: Unix V7 synonym for S_IRUSR
S_IWRITE: Unix V7 synonym for S_IWUSR
S_IEXEC: Unix V7 synonym for S_IXUSR
S_IRWXU: mask for owner permissions
S_IRUSR: read by owner
S_IWUSR: write by owner
S_IXUSR: execute by owner
S_IRWXG: mask for group permissions
S_IRGRP: read by group
S_IWGRP: write by group
S_IXGRP: execute by group
S_IRWXO: mask for others (not in group) permissions
S_IROTH: read by others
S_IWOTH: write by others
S_IXOTH: execute by others
UF_SETTABLE: mask of owner changeable flags
UF_NODUMP: do not dump file
UF_IMMUTABLE: file may not be changed
UF_APPEND: file may only be appended to
UF_OPAQUE: directory is opaque when viewed through a union stack
UF_NOUNLINK: file may not be renamed or deleted
UF_COMPRESSED: macOS: file is hfs-compressed
UF_TRACKED: used for dealing with document IDs
UF_DATAVAULT: entitlement required for reading and writing
UF_HIDDEN: macOS: file should not be displayed
SF_SETTABLE: mask of super user changeable flags
SF_ARCHIVED: file may be archived
SF_IMMUTABLE: file may not be changed
SF_APPEND: file may only be appended to
SF_RESTRICTED: entitlement required for writing
SF_NOUNLINK: file may not be renamed or deleted
SF_SNAPSHOT: file is a snapshot file
SF_FIRMLINK: file is a firmlink
SF_DATALESS: file is a dataless object
On macOS:
SF_SUPPORTED: mask of super user supported flags
SF_SYNTHETIC: mask of read-only synthetic flags
ST_MODE
ST_INO
ST_DEV
ST_NLINK
ST_UID
ST_GID
ST_SIZE
ST_ATIME
ST_MTIME
ST_CTIME
FILE_ATTRIBUTE_*: Windows file attribute constants
(only present on Windows)
"""
import sys
from typing import Final
@ -64,19 +136,90 @@ UF_NODUMP: Final = 0x00000001
UF_NOUNLINK: Final = 0x00000010
UF_OPAQUE: Final = 0x00000008
def S_IMODE(mode: int, /) -> int: ...
def S_IFMT(mode: int, /) -> int: ...
def S_ISBLK(mode: int, /) -> bool: ...
def S_ISCHR(mode: int, /) -> bool: ...
def S_ISDIR(mode: int, /) -> bool: ...
def S_ISDOOR(mode: int, /) -> bool: ...
def S_ISFIFO(mode: int, /) -> bool: ...
def S_ISLNK(mode: int, /) -> bool: ...
def S_ISPORT(mode: int, /) -> bool: ...
def S_ISREG(mode: int, /) -> bool: ...
def S_ISSOCK(mode: int, /) -> bool: ...
def S_ISWHT(mode: int, /) -> bool: ...
def filemode(mode: int, /) -> str: ...
def S_IMODE(mode: int, /) -> int:
"""
Return the portion of the file's mode that can be set by os.chmod().
"""
def S_IFMT(mode: int, /) -> int:
"""
Return the portion of the file's mode that describes the file type.
"""
def S_ISBLK(mode: int, /) -> bool:
"""
S_ISBLK(mode) -> bool
Return True if mode is from a block special device file.
"""
def S_ISCHR(mode: int, /) -> bool:
"""
S_ISCHR(mode) -> bool
Return True if mode is from a character special device file.
"""
def S_ISDIR(mode: int, /) -> bool:
"""
S_ISDIR(mode) -> bool
Return True if mode is from a directory.
"""
def S_ISDOOR(mode: int, /) -> bool:
"""
S_ISDOOR(mode) -> bool
Return True if mode is from a door.
"""
def S_ISFIFO(mode: int, /) -> bool:
"""
S_ISFIFO(mode) -> bool
Return True if mode is from a FIFO (named pipe).
"""
def S_ISLNK(mode: int, /) -> bool:
"""
S_ISLNK(mode) -> bool
Return True if mode is from a symbolic link.
"""
def S_ISPORT(mode: int, /) -> bool:
"""
S_ISPORT(mode) -> bool
Return True if mode is from an event port.
"""
def S_ISREG(mode: int, /) -> bool:
"""
S_ISREG(mode) -> bool
Return True if mode is from a regular file.
"""
def S_ISSOCK(mode: int, /) -> bool:
"""
S_ISSOCK(mode) -> bool
Return True if mode is from a socket.
"""
def S_ISWHT(mode: int, /) -> bool:
"""
S_ISWHT(mode) -> bool
Return True if mode is from a whiteout.
"""
def filemode(mode: int, /) -> str:
"""
Convert a file's mode to a string of the form '-rwxrwxrwx'
"""
if sys.platform == "win32":
IO_REPARSE_TAG_SYMLINK: Final = 0xA000000C

View file

@ -1,22 +1,154 @@
"""
Functions to convert between Python values and C structs.
Python bytes objects are used to hold the data representing the C struct
and also as format strings (explained below) to describe the layout of data
in the C struct.
The optional first format char indicates byte order, size and alignment:
@: native order, size & alignment (default)
=: native order, std. size & alignment
<: little-endian, std. size & alignment
>: big-endian, std. size & alignment
!: same as >
The remaining chars indicate types of args and must match exactly;
these can be preceded by a decimal repeat count:
x: pad byte (no data); c:char; b:signed byte; B:unsigned byte;
?: _Bool (requires C99; if not available, char is used instead)
h:short; H:unsigned short; i:int; I:unsigned int;
l:long; L:unsigned long; f:float; d:double; e:half-float.
Special cases (preceding decimal count indicates length):
s:string (array of char); p: pascal string (with count byte).
Special cases (only available in native format):
n:ssize_t; N:size_t;
P:an integer type that is wide enough to hold a pointer.
Special case (not in native mode unless 'long long' in platform C):
q:long long; Q:unsigned long long
Whitespace between formats is ignored.
The variable struct.error is an exception raised on errors.
"""
from _typeshed import ReadableBuffer, WriteableBuffer
from collections.abc import Iterator
from typing import Any
def pack(fmt: str | bytes, /, *v: Any) -> bytes: ...
def pack_into(fmt: str | bytes, buffer: WriteableBuffer, offset: int, /, *v: Any) -> None: ...
def unpack(format: str | bytes, buffer: ReadableBuffer, /) -> tuple[Any, ...]: ...
def unpack_from(format: str | bytes, /, buffer: ReadableBuffer, offset: int = 0) -> tuple[Any, ...]: ...
def iter_unpack(format: str | bytes, buffer: ReadableBuffer, /) -> Iterator[tuple[Any, ...]]: ...
def calcsize(format: str | bytes, /) -> int: ...
def pack(fmt: str | bytes, /, *v: Any) -> bytes:
"""
pack(format, v1, v2, ...) -> bytes
Return a bytes object containing the values v1, v2, ... packed according
to the format string. See help(struct) for more on format strings.
"""
def pack_into(fmt: str | bytes, buffer: WriteableBuffer, offset: int, /, *v: Any) -> None:
"""
pack_into(format, buffer, offset, v1, v2, ...)
Pack the values v1, v2, ... according to the format string and write
the packed bytes into the writable buffer buf starting at offset. Note
that the offset is a required argument. See help(struct) for more
on format strings.
"""
def unpack(format: str | bytes, buffer: ReadableBuffer, /) -> tuple[Any, ...]:
"""
Return a tuple containing values unpacked according to the format string.
The buffer's size in bytes must be calcsize(format).
See help(struct) for more on format strings.
"""
def unpack_from(format: str | bytes, /, buffer: ReadableBuffer, offset: int = 0) -> tuple[Any, ...]:
"""
Return a tuple containing values unpacked according to the format string.
The buffer's size, minus offset, must be at least calcsize(format).
See help(struct) for more on format strings.
"""
def iter_unpack(format: str | bytes, buffer: ReadableBuffer, /) -> Iterator[tuple[Any, ...]]:
"""
Return an iterator yielding tuples unpacked from the given bytes.
The bytes are unpacked according to the format string, like
a repeated invocation of unpack_from().
Requires that the bytes length be a multiple of the format struct size.
"""
def calcsize(format: str | bytes, /) -> int:
"""
Return size in bytes of the struct described by the format string.
"""
class Struct:
"""
Struct(fmt) --> compiled struct object
"""
@property
def format(self) -> str: ...
def format(self) -> str:
"""
struct format string
"""
@property
def size(self) -> int: ...
def size(self) -> int:
"""
struct size in bytes
"""
def __init__(self, format: str | bytes) -> None: ...
def pack(self, *v: Any) -> bytes: ...
def pack_into(self, buffer: WriteableBuffer, offset: int, *v: Any) -> None: ...
def unpack(self, buffer: ReadableBuffer, /) -> tuple[Any, ...]: ...
def unpack_from(self, buffer: ReadableBuffer, offset: int = 0) -> tuple[Any, ...]: ...
def iter_unpack(self, buffer: ReadableBuffer, /) -> Iterator[tuple[Any, ...]]: ...
def pack(self, *v: Any) -> bytes:
"""
S.pack(v1, v2, ...) -> bytes
Return a bytes object containing values v1, v2, ... packed according
to the format string S.format. See help(struct) for more on format
strings.
"""
def pack_into(self, buffer: WriteableBuffer, offset: int, *v: Any) -> None:
"""
S.pack_into(buffer, offset, v1, v2, ...)
Pack the values v1, v2, ... according to the format string S.format
and write the packed bytes into the writable buffer buf starting at
offset. Note that the offset is a required argument. See
help(struct) for more on format strings.
"""
def unpack(self, buffer: ReadableBuffer, /) -> tuple[Any, ...]:
"""
Return a tuple containing unpacked values.
Unpack according to the format string Struct.format. The buffer's size
in bytes must be Struct.size.
See help(struct) for more on format strings.
"""
def unpack_from(self, buffer: ReadableBuffer, offset: int = 0) -> tuple[Any, ...]:
"""
Return a tuple containing unpacked values.
Values are unpacked according to the format string Struct.format.
The buffer's size in bytes, starting at position offset, must be
at least Struct.size.
See help(struct) for more on format strings.
"""
def iter_unpack(self, buffer: ReadableBuffer, /) -> Iterator[tuple[Any, ...]]:
"""
Return an iterator yielding tuples.
Tuples are unpacked from the given bytes source, like a repeated
invocation of unpack_from().
Requires that the bytes length be a multiple of the struct size.
"""

View file

@ -1,3 +1,8 @@
"""
This module provides primitive operations to write multi-threaded programs.
The 'threading' module provides a more convenient interface.
"""
import signal
import sys
from _typeshed import structseq
@ -11,15 +16,58 @@ _Ts = TypeVarTuple("_Ts")
error = RuntimeError
def _count() -> int: ...
def _count() -> int:
"""
Return the number of currently running Python threads, excluding
the main thread. The returned number comprises all threads created
through `start_new_thread()` as well as `threading.Thread`, and not
yet finished.
This function is meant for internal and specialized purposes only.
In most applications `threading.enumerate()` should be used instead.
"""
@final
class RLock:
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: ...
def release(self) -> None: ...
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool:
"""
Lock the lock. `blocking` indicates whether we should wait
for the lock to be available or not. If `blocking` is False
and another thread holds the lock, the method will return False
immediately. If `blocking` is True and another thread holds
the lock, the method will wait for the lock to be released,
take it and then return True.
(note: the blocking operation is interruptible.)
In all other cases, the method will return True immediately.
Precisely, if the current thread already holds the lock, its
internal counter is simply incremented. If nobody holds the lock,
the lock is taken and its internal counter initialized to 1.
"""
def release(self) -> None:
"""
Release the lock, allowing another thread that is blocked waiting for
the lock to acquire the lock. The lock must be in the locked state,
and must be locked by the same thread that unlocks it; otherwise a
`RuntimeError` is raised.
Do note that if the lock was acquire()d several times in a row by the
current thread, release() needs to be called as many times for the lock
to be available for other threads.
"""
__enter__ = acquire
def __exit__(self, t: type[BaseException] | None, v: BaseException | None, tb: TracebackType | None) -> None: ...
def __exit__(self, t: type[BaseException] | None, v: BaseException | None, tb: TracebackType | None) -> None:
"""
Release the lock.
"""
if sys.version_info >= (3, 14):
def locked(self) -> bool: ...
def locked(self) -> bool:
"""
locked()
Return a boolean indicating whether this object is locked right now.
"""
if sys.version_info >= (3, 13):
@final
@ -32,85 +80,350 @@ if sys.version_info >= (3, 13):
def start_joinable_thread(
function: Callable[[], object], handle: _ThreadHandle | None = None, daemon: bool = True
) -> _ThreadHandle: ...
) -> _ThreadHandle:
"""
*For internal use only*: start a new thread.
Like start_new_thread(), this starts a new thread calling the given function.
Unlike start_new_thread(), this returns a handle object with methods to join
or detach the given thread.
This function is not for third-party code, please use the
`threading` module instead. During finalization the runtime will not wait for
the thread to exit if daemon is True. If handle is provided it must be a
newly created thread._ThreadHandle instance.
"""
@final
class lock:
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: ...
def release(self) -> None: ...
def locked(self) -> bool: ...
def acquire_lock(self, blocking: bool = True, timeout: float = -1) -> bool: ...
def release_lock(self) -> None: ...
def locked_lock(self) -> bool: ...
def __enter__(self) -> bool: ...
"""
A lock object is a synchronization primitive. To create a lock,
call threading.Lock(). Methods are:
acquire() -- lock the lock, possibly blocking until it can be obtained
release() -- unlock of the lock
locked() -- test whether the lock is currently locked
A lock is not owned by the thread that locked it; another thread may
unlock it. A thread attempting to lock a lock that it has already locked
will block until another thread unlocks it. Deadlocks may ensue.
"""
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool:
"""
Lock the lock. Without argument, this blocks if the lock is already
locked (even by the same thread), waiting for another thread to release
the lock, and return True once the lock is acquired.
With an argument, this will only block if the argument is true,
and the return value reflects whether the lock is acquired.
The blocking operation is interruptible.
"""
def release(self) -> None:
"""
Release the lock, allowing another thread that is blocked waiting for
the lock to acquire the lock. The lock must be in the locked state,
but it needn't be locked by the same thread that unlocks it.
"""
def locked(self) -> bool:
"""
Return whether the lock is in the locked state.
"""
def acquire_lock(self, blocking: bool = True, timeout: float = -1) -> bool:
"""
An obsolete synonym of acquire().
"""
def release_lock(self) -> None:
"""
An obsolete synonym of release().
"""
def locked_lock(self) -> bool:
"""
An obsolete synonym of locked().
"""
def __enter__(self) -> bool:
"""
Lock the lock.
"""
def __exit__(
self, type: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None
) -> None: ...
) -> None:
"""
Release the lock.
"""
LockType = lock
else:
@final
class LockType:
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: ...
def release(self) -> None: ...
def locked(self) -> bool: ...
def acquire_lock(self, blocking: bool = True, timeout: float = -1) -> bool: ...
def release_lock(self) -> None: ...
def locked_lock(self) -> bool: ...
def __enter__(self) -> bool: ...
"""
A lock object is a synchronization primitive. To create a lock,
call threading.Lock(). Methods are:
acquire() -- lock the lock, possibly blocking until it can be obtained
release() -- unlock of the lock
locked() -- test whether the lock is currently locked
A lock is not owned by the thread that locked it; another thread may
unlock it. A thread attempting to lock a lock that it has already locked
will block until another thread unlocks it. Deadlocks may ensue.
"""
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool:
"""
acquire(blocking=True, timeout=-1) -> bool
(acquire_lock() is an obsolete synonym)
Lock the lock. Without argument, this blocks if the lock is already
locked (even by the same thread), waiting for another thread to release
the lock, and return True once the lock is acquired.
With an argument, this will only block if the argument is true,
and the return value reflects whether the lock is acquired.
The blocking operation is interruptible.
"""
def release(self) -> None:
"""
release()
(release_lock() is an obsolete synonym)
Release the lock, allowing another thread that is blocked waiting for
the lock to acquire the lock. The lock must be in the locked state,
but it needn't be locked by the same thread that unlocks it.
"""
def locked(self) -> bool:
"""
locked() -> bool
(locked_lock() is an obsolete synonym)
Return whether the lock is in the locked state.
"""
def acquire_lock(self, blocking: bool = True, timeout: float = -1) -> bool:
"""
acquire(blocking=True, timeout=-1) -> bool
(acquire_lock() is an obsolete synonym)
Lock the lock. Without argument, this blocks if the lock is already
locked (even by the same thread), waiting for another thread to release
the lock, and return True once the lock is acquired.
With an argument, this will only block if the argument is true,
and the return value reflects whether the lock is acquired.
The blocking operation is interruptible.
"""
def release_lock(self) -> None:
"""
release()
(release_lock() is an obsolete synonym)
Release the lock, allowing another thread that is blocked waiting for
the lock to acquire the lock. The lock must be in the locked state,
but it needn't be locked by the same thread that unlocks it.
"""
def locked_lock(self) -> bool:
"""
locked() -> bool
(locked_lock() is an obsolete synonym)
Return whether the lock is in the locked state.
"""
def __enter__(self) -> bool:
"""
acquire(blocking=True, timeout=-1) -> bool
(acquire_lock() is an obsolete synonym)
Lock the lock. Without argument, this blocks if the lock is already
locked (even by the same thread), waiting for another thread to release
the lock, and return True once the lock is acquired.
With an argument, this will only block if the argument is true,
and the return value reflects whether the lock is acquired.
The blocking operation is interruptible.
"""
def __exit__(
self, type: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None
) -> None: ...
) -> None:
"""
release()
(release_lock() is an obsolete synonym)
Release the lock, allowing another thread that is blocked waiting for
the lock to acquire the lock. The lock must be in the locked state,
but it needn't be locked by the same thread that unlocks it.
"""
@overload
def start_new_thread(function: Callable[[Unpack[_Ts]], object], args: tuple[Unpack[_Ts]], /) -> int: ...
def start_new_thread(function: Callable[[Unpack[_Ts]], object], args: tuple[Unpack[_Ts]], /) -> int:
"""
Start a new thread and return its identifier.
The thread will call the function with positional arguments from the
tuple args and keyword arguments taken from the optional dictionary
kwargs. The thread exits when the function returns; the return value
is ignored. The thread will also exit when the function raises an
unhandled exception; a stack trace will be printed unless the exception
is SystemExit.
"""
@overload
def start_new_thread(function: Callable[..., object], args: tuple[Any, ...], kwargs: dict[str, Any], /) -> int: ...
# Obsolete synonym for start_new_thread()
@overload
def start_new(function: Callable[[Unpack[_Ts]], object], args: tuple[Unpack[_Ts]], /) -> int: ...
def start_new(function: Callable[[Unpack[_Ts]], object], args: tuple[Unpack[_Ts]], /) -> int:
"""
An obsolete synonym of start_new_thread().
"""
@overload
def start_new(function: Callable[..., object], args: tuple[Any, ...], kwargs: dict[str, Any], /) -> int: ...
if sys.version_info >= (3, 10):
def interrupt_main(signum: signal.Signals = ..., /) -> None: ...
def interrupt_main(signum: signal.Signals = ..., /) -> None:
"""
Simulate the arrival of the given signal in the main thread,
where the corresponding signal handler will be executed.
If *signum* is omitted, SIGINT is assumed.
A subthread can use this function to interrupt the main thread.
Note: the default signal handler for SIGINT raises ``KeyboardInterrupt``.
"""
else:
def interrupt_main() -> None: ...
def interrupt_main() -> None:
"""
interrupt_main()
def exit() -> NoReturn: ...
def exit_thread() -> NoReturn: ... # Obsolete synonym for exit()
def allocate_lock() -> LockType: ...
def allocate() -> LockType: ... # Obsolete synonym for allocate_lock()
def get_ident() -> int: ...
def stack_size(size: int = 0, /) -> int: ...
Raise a KeyboardInterrupt in the main thread.
A subthread can use this function to interrupt the main thread.
"""
def exit() -> NoReturn:
"""
This is synonymous to ``raise SystemExit''. It will cause the current
thread to exit silently unless the exception is caught.
"""
def exit_thread() -> NoReturn: # Obsolete synonym for exit()
"""
An obsolete synonym of exit().
"""
def allocate_lock() -> LockType:
"""
Create a new lock object. See help(type(threading.Lock())) for
information about locks.
"""
def allocate() -> LockType: # Obsolete synonym for allocate_lock()
"""
An obsolete synonym of allocate_lock().
"""
def get_ident() -> int:
"""
Return a non-zero integer that uniquely identifies the current thread
amongst other threads that exist simultaneously.
This may be used to identify per-thread resources.
Even though on some platforms threads identities may appear to be
allocated consecutive numbers starting at 1, this behavior should not
be relied upon, and the number should be seen purely as a magic cookie.
A thread's identity may be reused for another thread after it exits.
"""
def stack_size(size: int = 0, /) -> int:
"""
Return the thread stack size used when creating new threads. The
optional size argument specifies the stack size (in bytes) to be used
for subsequently created threads, and must be 0 (use platform or
configured default) or a positive integer value of at least 32,768 (32k).
If changing the thread stack size is unsupported, a ThreadError
exception is raised. If the specified size is invalid, a ValueError
exception is raised, and the stack size is unmodified. 32k bytes
currently the minimum supported stack size value to guarantee
sufficient stack space for the interpreter itself.
Note that some platforms may have particular restrictions on values for
the stack size, such as requiring a minimum stack size larger than 32 KiB or
requiring allocation in multiples of the system memory page size
- platform documentation should be referred to for more information
(4 KiB pages are common; using multiples of 4096 for the stack size is
the suggested approach in the absence of more specific information).
"""
TIMEOUT_MAX: float
def get_native_id() -> int: ... # only available on some platforms
def get_native_id() -> int: # only available on some platforms
"""
Return a non-negative integer identifying the thread as reported
by the OS (kernel). This may be used to uniquely identify a
particular thread within a system.
"""
@final
class _ExceptHookArgs(structseq[Any], tuple[type[BaseException], BaseException | None, TracebackType | None, Thread | None]):
"""
ExceptHookArgs
Type used to pass arguments to threading.excepthook.
"""
if sys.version_info >= (3, 10):
__match_args__: Final = ("exc_type", "exc_value", "exc_traceback", "thread")
@property
def exc_type(self) -> type[BaseException]: ...
def exc_type(self) -> type[BaseException]:
"""
Exception type
"""
@property
def exc_value(self) -> BaseException | None: ...
def exc_value(self) -> BaseException | None:
"""
Exception value
"""
@property
def exc_traceback(self) -> TracebackType | None: ...
def exc_traceback(self) -> TracebackType | None:
"""
Exception traceback
"""
@property
def thread(self) -> Thread | None: ...
def thread(self) -> Thread | None:
"""
Thread
"""
_excepthook: Callable[[_ExceptHookArgs], Any]
if sys.version_info >= (3, 12):
def daemon_threads_allowed() -> bool: ...
def daemon_threads_allowed() -> bool:
"""
Return True if daemon threads are allowed in the current interpreter,
and False otherwise.
"""
if sys.version_info >= (3, 14):
def set_name(name: str) -> None: ...
def set_name(name: str) -> None:
"""
Set the name of the current thread.
"""
class _local:
"""
Thread-local data
"""
def __getattribute__(self, name: str, /) -> Any: ...
def __setattr__(self, name: str, value: Any, /) -> None: ...
def __delattr__(self, name: str, /) -> None: ...

View file

@ -1,3 +1,12 @@
"""
Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
"""
from threading import RLock
from typing import Any
from typing_extensions import Self, TypeAlias
@ -7,13 +16,25 @@ __all__ = ["local"]
_LocalDict: TypeAlias = dict[Any, Any]
class _localimpl:
"""
A class managing thread-local dicts
"""
key: str
dicts: dict[int, tuple[ReferenceType[Any], _LocalDict]]
# Keep localargs in sync with the *args, **kwargs annotation on local.__new__
localargs: tuple[list[Any], dict[str, Any]]
locallock: RLock
def get_dict(self) -> _LocalDict: ...
def create_dict(self) -> _LocalDict: ...
def get_dict(self) -> _LocalDict:
"""
Return the dict for the current thread. Raises KeyError if none
defined.
"""
def create_dict(self) -> _LocalDict:
"""
Create a new dict for the current thread, and return it.
"""
class local:
def __new__(cls, /, *args: Any, **kw: Any) -> Self: ...

View file

@ -19,9 +19,16 @@ from typing_extensions import TypeAlias
@final
class Tcl_Obj:
@property
def string(self) -> str: ...
def string(self) -> str:
"""
the string representation of this object, either as str or bytes
"""
@property
def typename(self) -> str: ...
def typename(self) -> str:
"""
name of the Tcl type
"""
__hash__: ClassVar[None] # type: ignore[assignment]
def __eq__(self, value, /): ...
def __ge__(self, value, /): ...
@ -91,8 +98,15 @@ class TkappType:
def wantobjects(self, *args, **kwargs): ...
def willdispatch(self): ...
if sys.version_info >= (3, 12):
def gettrace(self, /) -> _TkinterTraceFunc | None: ...
def settrace(self, func: _TkinterTraceFunc | None, /) -> None: ...
def gettrace(self, /) -> _TkinterTraceFunc | None:
"""
Get the tracing function.
"""
def settrace(self, func: _TkinterTraceFunc | None, /) -> None:
"""
Set the tracing function.
"""
# These should be kept in sync with tkinter.tix constants, except ALL_EVENTS which doesn't match TCL_ALL_EVENTS
ALL_EVENTS: Final = -3
@ -124,7 +138,15 @@ if sys.version_info >= (3, 13):
sync: bool = False,
use: str | None = None,
/,
): ...
):
"""
wantTk
if false, then Tk_Init() doesn't get called
sync
if true, then pass -sync to wish
use
if not None, then pass -use to wish
"""
else:
def create(
@ -137,7 +159,24 @@ else:
sync: bool = False,
use: str | None = None,
/,
): ...
):
"""
wantTk
if false, then Tk_Init() doesn't get called
sync
if true, then pass -sync to wish
use
if not None, then pass -use to wish
"""
def getbusywaitinterval(): ...
def setbusywaitinterval(new_val, /): ...
def getbusywaitinterval():
"""
Return the current busy-wait interval between successive calls to Tcl_DoOneEvent in a threaded Python interpreter.
"""
def setbusywaitinterval(new_val, /):
"""
Set the busy-wait interval in milliseconds between successive calls to Tcl_DoOneEvent in a threaded Python interpreter.
It should be set to a divisor of the maximum time between frames in an animation.
"""

View file

@ -1,13 +1,79 @@
"""
Debug module to trace memory blocks allocated by Python.
"""
from collections.abc import Sequence
from tracemalloc import _FrameTuple, _TraceTuple
def _get_object_traceback(obj: object, /) -> Sequence[_FrameTuple] | None: ...
def _get_traces() -> Sequence[_TraceTuple]: ...
def clear_traces() -> None: ...
def get_traceback_limit() -> int: ...
def get_traced_memory() -> tuple[int, int]: ...
def get_tracemalloc_memory() -> int: ...
def is_tracing() -> bool: ...
def reset_peak() -> None: ...
def start(nframe: int = 1, /) -> None: ...
def stop() -> None: ...
def _get_object_traceback(obj: object, /) -> Sequence[_FrameTuple] | None:
"""
Get the traceback where the Python object obj was allocated.
Return a tuple of (filename: str, lineno: int) tuples.
Return None if the tracemalloc module is disabled or did not
trace the allocation of the object.
"""
def _get_traces() -> Sequence[_TraceTuple]:
"""
Get traces of all memory blocks allocated by Python.
Return a list of (size: int, traceback: tuple) tuples.
traceback is a tuple of (filename: str, lineno: int) tuples.
Return an empty list if the tracemalloc module is disabled.
"""
def clear_traces() -> None:
"""
Clear traces of memory blocks allocated by Python.
"""
def get_traceback_limit() -> int:
"""
Get the maximum number of frames stored in the traceback of a trace.
By default, a trace of an allocated memory block only stores
the most recent frame: the limit is 1.
"""
def get_traced_memory() -> tuple[int, int]:
"""
Get the current size and peak size of memory blocks traced by tracemalloc.
Returns a tuple: (current: int, peak: int).
"""
def get_tracemalloc_memory() -> int:
"""
Get the memory usage in bytes of the tracemalloc module.
This memory is used internally to trace memory allocations.
"""
def is_tracing() -> bool:
"""
Return True if the tracemalloc module is tracing Python memory allocations.
"""
def reset_peak() -> None:
"""
Set the peak size of memory blocks traced by tracemalloc to the current size.
Do nothing if the tracemalloc module is not tracing memory allocations.
"""
def start(nframe: int = 1, /) -> None:
"""
Start tracing Python memory allocations.
Also set the maximum number of frames stored in the traceback of a
trace to nframe.
"""
def stop() -> None:
"""
Stop tracing Python memory allocations.
Also clear traces of memory blocks allocated by Python.
"""

View file

@ -1,3 +1,8 @@
"""
_warnings provides basic warning filtering support.
It is a helper module to speed up interpreter start-up.
"""
import sys
from typing import Any, overload
@ -14,7 +19,24 @@ if sys.version_info >= (3, 12):
source: Any | None = None,
*,
skip_file_prefixes: tuple[str, ...] = (),
) -> None: ...
) -> None:
"""
Issue a warning, or maybe ignore it or raise an exception.
message
Text of the warning message.
category
The Warning category subclass. Defaults to UserWarning.
stacklevel
How far up the call stack to make this warning appear. A value of 2 for
example attributes the warning to the caller of the code calling warn().
source
If supplied, the destroyed object which emitted a ResourceWarning
skip_file_prefixes
An optional tuple of module filename prefixes indicating frames to skip
during stacklevel computations for stack frame attribution.
"""
@overload
def warn(
message: Warning,
@ -27,7 +49,11 @@ if sys.version_info >= (3, 12):
else:
@overload
def warn(message: str, category: type[Warning] | None = None, stacklevel: int = 1, source: Any | None = None) -> None: ...
def warn(message: str, category: type[Warning] | None = None, stacklevel: int = 1, source: Any | None = None) -> None:
"""
Issue a warning, or maybe ignore it or raise an exception.
"""
@overload
def warn(message: Warning, category: Any = None, stacklevel: int = 1, source: Any | None = None) -> None: ...
@ -41,7 +67,11 @@ def warn_explicit(
registry: dict[str | tuple[str, type[Warning], int], int] | None = ...,
module_globals: dict[str, Any] | None = ...,
source: Any | None = ...,
) -> None: ...
) -> None:
"""
Issue a warning, or maybe ignore it or raise an exception.
"""
@overload
def warn_explicit(
message: Warning,

View file

@ -1,3 +1,7 @@
"""
Weak-reference support module.
"""
from collections.abc import Callable
from typing import Any, TypeVar, overload
from weakref import CallableProxyType as CallableProxyType, ProxyType as ProxyType, ReferenceType as ReferenceType, ref as ref
@ -5,11 +9,25 @@ from weakref import CallableProxyType as CallableProxyType, ProxyType as ProxyTy
_C = TypeVar("_C", bound=Callable[..., Any])
_T = TypeVar("_T")
def getweakrefcount(object: Any, /) -> int: ...
def getweakrefs(object: Any, /) -> list[Any]: ...
def getweakrefcount(object: Any, /) -> int:
"""
Return the number of weak references to 'object'.
"""
def getweakrefs(object: Any, /) -> list[Any]:
"""
Return a list of all weak reference objects pointing to 'object'.
"""
# Return CallableProxyType if object is callable, ProxyType otherwise
@overload
def proxy(object: _C, callback: Callable[[_C], Any] | None = None, /) -> CallableProxyType[_C]: ...
def proxy(object: _C, callback: Callable[[_C], Any] | None = None, /) -> CallableProxyType[_C]:
"""
Create a proxy object that weakly references 'object'.
'callback', if given, is called with a reference to the
proxy when 'object' is about to be finalized.
"""
@overload
def proxy(object: _T, callback: Callable[[_T], Any] | None = None, /) -> Any: ...

View file

@ -45,4 +45,9 @@ class WeakSet(MutableSet[_T]):
def union(self, other: Iterable[_S]) -> WeakSet[_S | _T]: ...
def __or__(self, other: Iterable[_S]) -> WeakSet[_S | _T]: ...
def isdisjoint(self, other: Iterable[_T]) -> bool: ...
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
def __class_getitem__(cls, item: Any, /) -> GenericAlias:
"""
Represent a PEP 585 generic type
E.g. for t = list[int], t.__origin__ is list and t.__args__ is (int,).
"""

View file

@ -1,3 +1,7 @@
"""
Implementation module for Zstandard compression.
"""
from _typeshed import ReadableBuffer
from collections.abc import Mapping
from compression.zstd import CompressionParameter, DecompressionParameter
@ -42,6 +46,20 @@ _ZstdCompressorFlushFrame: TypeAlias = Literal[2]
@final
class ZstdCompressor:
"""
Create a compressor object for compressing data incrementally.
level
The compression level to use. Defaults to COMPRESSION_LEVEL_DEFAULT.
options
A dict object that contains advanced compression parameters.
zstd_dict
A ZstdDict object, a pre-trained Zstandard dictionary.
Thread-safe at method level. For one-shot compression, use the compress()
function instead.
"""
CONTINUE: Final = 0
FLUSH_BLOCK: Final = 1
FLUSH_FRAME: Final = 2
@ -50,48 +68,276 @@ class ZstdCompressor:
) -> None: ...
def compress(
self, /, data: ReadableBuffer, mode: _ZstdCompressorContinue | _ZstdCompressorFlushBlock | _ZstdCompressorFlushFrame = 0
) -> bytes: ...
def flush(self, /, mode: _ZstdCompressorFlushBlock | _ZstdCompressorFlushFrame = 2) -> bytes: ...
def set_pledged_input_size(self, size: int | None, /) -> None: ...
) -> bytes:
"""
Provide data to the compressor object.
mode
Can be these 3 values ZstdCompressor.CONTINUE,
ZstdCompressor.FLUSH_BLOCK, ZstdCompressor.FLUSH_FRAME
Return a chunk of compressed data if possible, or b'' otherwise. When you have
finished providing data to the compressor, call the flush() method to finish
the compression process.
"""
def flush(self, /, mode: _ZstdCompressorFlushBlock | _ZstdCompressorFlushFrame = 2) -> bytes:
"""
Finish the compression process.
mode
Can be these 2 values ZstdCompressor.FLUSH_FRAME,
ZstdCompressor.FLUSH_BLOCK
Flush any remaining data left in internal buffers. Since Zstandard data
consists of one or more independent frames, the compressor object can still
be used after this method is called.
"""
def set_pledged_input_size(self, size: int | None, /) -> None:
"""
Set the uncompressed content size to be written into the frame header.
size
The size of the uncompressed data to be provided to the compressor.
This method can be used to ensure the header of the frame about to be written
includes the size of the data, unless the CompressionParameter.content_size_flag
is set to False. If last_mode != FLUSH_FRAME, then a RuntimeError is raised.
It is important to ensure that the pledged data size matches the actual data
size. If they do not match the compressed output data may be corrupted and the
final chunk written may be lost.
"""
@property
def last_mode(self) -> _ZstdCompressorContinue | _ZstdCompressorFlushBlock | _ZstdCompressorFlushFrame: ...
def last_mode(self) -> _ZstdCompressorContinue | _ZstdCompressorFlushBlock | _ZstdCompressorFlushFrame:
"""
The last mode used to this compressor object, its value can be .CONTINUE,
.FLUSH_BLOCK, .FLUSH_FRAME. Initialized to .FLUSH_FRAME.
It can be used to get the current state of a compressor, such as, data
flushed, or a frame ended.
"""
@final
class ZstdDecompressor:
"""
Create a decompressor object for decompressing data incrementally.
zstd_dict
A ZstdDict object, a pre-trained Zstandard dictionary.
options
A dict object that contains advanced decompression parameters.
Thread-safe at method level. For one-shot decompression, use the decompress()
function instead.
"""
def __init__(self, zstd_dict: ZstdDict | None = None, options: Mapping[int, int] | None = None) -> None: ...
def decompress(self, /, data: ReadableBuffer, max_length: int = -1) -> bytes: ...
def decompress(self, /, data: ReadableBuffer, max_length: int = -1) -> bytes:
"""
Decompress *data*, returning uncompressed bytes if possible, or b'' otherwise.
data
A bytes-like object, Zstandard data to be decompressed.
max_length
Maximum size of returned data. When it is negative, the size of
output buffer is unlimited. When it is nonnegative, returns at
most max_length bytes of decompressed data.
If *max_length* is nonnegative, returns at most *max_length* bytes of
decompressed data. If this limit is reached and further output can be
produced, *self.needs_input* will be set to ``False``. In this case, the next
call to *decompress()* may provide *data* as b'' to obtain more of the output.
If all of the input data was decompressed and returned (either because this
was less than *max_length* bytes, or because *max_length* was negative),
*self.needs_input* will be set to True.
Attempting to decompress data after the end of a frame is reached raises an
EOFError. Any data found after the end of the frame is ignored and saved in
the self.unused_data attribute.
"""
@property
def eof(self) -> bool: ...
def eof(self) -> bool:
"""
True means the end of the first frame has been reached. If decompress data
after that, an EOFError exception will be raised.
"""
@property
def needs_input(self) -> bool: ...
def needs_input(self) -> bool:
"""
If the max_length output limit in .decompress() method has been reached,
and the decompressor has (or may has) unconsumed input data, it will be set
to False. In this case, passing b'' to the .decompress() method may output
further data.
"""
@property
def unused_data(self) -> bytes: ...
def unused_data(self) -> bytes:
"""
A bytes object of un-consumed input data.
When ZstdDecompressor object stops after a frame is
decompressed, unused input data after the frame. Otherwise this will be b''.
"""
@final
class ZstdDict:
def __init__(self, dict_content: bytes, /, *, is_raw: bool = False) -> None: ...
def __len__(self, /) -> int: ...
@property
def as_digested_dict(self) -> tuple[Self, int]: ...
@property
def as_prefix(self) -> tuple[Self, int]: ...
@property
def as_undigested_dict(self) -> tuple[Self, int]: ...
@property
def dict_content(self) -> bytes: ...
@property
def dict_id(self) -> int: ...
"""
Represents a Zstandard dictionary.
class ZstdError(Exception): ...
dict_content
The content of a Zstandard dictionary as a bytes-like object.
is_raw
If true, perform no checks on *dict_content*, useful for some
advanced cases. Otherwise, check that the content represents
a Zstandard dictionary created by the zstd library or CLI.
The dictionary can be used for compression or decompression, and can be shared
by multiple ZstdCompressor or ZstdDecompressor objects.
"""
def __init__(self, dict_content: bytes, /, *, is_raw: bool = False) -> None: ...
def __len__(self, /) -> int:
"""
Return len(self).
"""
@property
def as_digested_dict(self) -> tuple[Self, int]:
"""
Load as a digested dictionary to compressor.
Pass this attribute as zstd_dict argument:
compress(dat, zstd_dict=zd.as_digested_dict)
1. Some advanced compression parameters of compressor may be overridden
by parameters of digested dictionary.
2. ZstdDict has a digested dictionaries cache for each compression level.
It's faster when loading again a digested dictionary with the same
compression level.
3. No need to use this for decompression.
"""
@property
def as_prefix(self) -> tuple[Self, int]:
"""
Load as a prefix to compressor/decompressor.
Pass this attribute as zstd_dict argument:
compress(dat, zstd_dict=zd.as_prefix)
1. Prefix is compatible with long distance matching, while dictionary is not.
2. It only works for the first frame, then the compressor/decompressor will
return to no prefix state.
3. When decompressing, must use the same prefix as when compressing."
"""
@property
def as_undigested_dict(self) -> tuple[Self, int]:
"""
Load as an undigested dictionary to compressor.
Pass this attribute as zstd_dict argument:
compress(dat, zstd_dict=zd.as_undigested_dict)
1. The advanced compression parameters of compressor will not be overridden.
2. Loading an undigested dictionary is costly. If load an undigested dictionary
multiple times, consider reusing a compressor object.
3. No need to use this for decompression.
"""
@property
def dict_content(self) -> bytes:
"""
The content of a Zstandard dictionary, as a bytes object.
"""
@property
def dict_id(self) -> int:
"""
the Zstandard dictionary, an int between 0 and 2**32.
A non-zero value represents an ordinary Zstandard dictionary, conforming to the standardised format.
The special value '0' means a 'raw content' dictionary,without any restrictions on format or content.
"""
class ZstdError(Exception):
"""
An error occurred in the zstd library.
"""
def finalize_dict(
custom_dict_bytes: bytes, samples_bytes: bytes, samples_sizes: tuple[int, ...], dict_size: int, compression_level: int, /
) -> bytes: ...
def get_frame_info(frame_buffer: ReadableBuffer) -> tuple[int, int]: ...
def get_frame_size(frame_buffer: ReadableBuffer) -> int: ...
def get_param_bounds(parameter: int, is_compress: bool) -> tuple[int, int]: ...
def set_parameter_types(c_parameter_type: type[CompressionParameter], d_parameter_type: type[DecompressionParameter]) -> None: ...
def train_dict(samples_bytes: bytes, samples_sizes: tuple[int, ...], dict_size: int, /) -> bytes: ...
) -> bytes:
"""
Finalize a Zstandard dictionary.
custom_dict_bytes
Custom dictionary content.
samples_bytes
Concatenation of samples.
samples_sizes
Tuple of samples' sizes.
dict_size
The size of the dictionary.
compression_level
Optimize for a specific Zstandard compression level, 0 means default.
"""
def get_frame_info(frame_buffer: ReadableBuffer) -> tuple[int, int]:
"""
Get Zstandard frame infomation from a frame header.
frame_buffer
A bytes-like object, containing the header of a Zstandard frame.
"""
def get_frame_size(frame_buffer: ReadableBuffer) -> int:
"""
Get the size of a Zstandard frame, including the header and optional checksum.
frame_buffer
A bytes-like object, it should start from the beginning of a frame,
and contains at least one complete frame.
"""
def get_param_bounds(parameter: int, is_compress: bool) -> tuple[int, int]:
"""
Get CompressionParameter/DecompressionParameter bounds.
parameter
The parameter to get bounds.
is_compress
True for CompressionParameter, False for DecompressionParameter.
"""
def set_parameter_types(c_parameter_type: type[CompressionParameter], d_parameter_type: type[DecompressionParameter]) -> None:
"""
Set CompressionParameter and DecompressionParameter types for validity check.
c_parameter_type
CompressionParameter IntEnum type object
d_parameter_type
DecompressionParameter IntEnum type object
"""
def train_dict(samples_bytes: bytes, samples_sizes: tuple[int, ...], dict_size: int, /) -> bytes:
"""
Train a Zstandard dictionary on sample data.
samples_bytes
Concatenation of samples.
samples_sizes
Tuple of samples' sizes.
dict_size
The size of the dictionary.
"""
zstd_version: Final[str]
zstd_version_number: Final[int]

View file

@ -1,3 +1,7 @@
"""
Abstract Base Classes (ABCs) according to PEP 3119.
"""
import _typeshed
import sys
from _typeshed import SupportsWrite
@ -12,6 +16,20 @@ _P = ParamSpec("_P")
# These definitions have special processing in mypy
class ABCMeta(type):
"""
Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
__abstractmethods__: frozenset[str]
if sys.version_info >= (3, 11):
def __new__(
@ -22,30 +40,129 @@ class ABCMeta(type):
mcls: type[_typeshed.Self], name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs: Any
) -> _typeshed.Self: ...
def __instancecheck__(cls: ABCMeta, instance: Any) -> bool: ...
def __subclasscheck__(cls: ABCMeta, subclass: type) -> bool: ...
def _dump_registry(cls: ABCMeta, file: SupportsWrite[str] | None = None) -> None: ...
def register(cls: ABCMeta, subclass: type[_T]) -> type[_T]: ...
def __instancecheck__(cls: ABCMeta, instance: Any) -> bool:
"""
Override for isinstance(instance, cls).
"""
def __subclasscheck__(cls: ABCMeta, subclass: type) -> bool:
"""
Override for issubclass(subclass, cls).
"""
def _dump_registry(cls: ABCMeta, file: SupportsWrite[str] | None = None) -> None:
"""
Debug helper to print the ABC registry.
"""
def register(cls: ABCMeta, subclass: type[_T]) -> type[_T]:
"""
Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
def abstractmethod(funcobj: _FuncT) -> _FuncT:
"""
A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms. abstractmethod() may be used to declare
abstract methods for properties and descriptors.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, arg1, arg2, argN):
...
"""
def abstractmethod(funcobj: _FuncT) -> _FuncT: ...
@deprecated("Use 'classmethod' with 'abstractmethod' instead")
class abstractclassmethod(classmethod[_T, _P, _R_co]):
"""
A decorator indicating abstract classmethods.
Deprecated, use 'classmethod' with 'abstractmethod' instead:
class C(ABC):
@classmethod
@abstractmethod
def my_abstract_classmethod(cls, ...):
...
"""
__isabstractmethod__: Literal[True]
def __init__(self, callable: Callable[Concatenate[type[_T], _P], _R_co]) -> None: ...
@deprecated("Use 'staticmethod' with 'abstractmethod' instead")
class abstractstaticmethod(staticmethod[_P, _R_co]):
"""
A decorator indicating abstract staticmethods.
Deprecated, use 'staticmethod' with 'abstractmethod' instead:
class C(ABC):
@staticmethod
@abstractmethod
def my_abstract_staticmethod(...):
...
"""
__isabstractmethod__: Literal[True]
def __init__(self, callable: Callable[_P, _R_co]) -> None: ...
@deprecated("Use 'property' with 'abstractmethod' instead")
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Deprecated, use 'property' with 'abstractmethod' instead:
class C(ABC):
@property
@abstractmethod
def my_abstract_property(self):
...
"""
__isabstractmethod__: Literal[True]
class ABC(metaclass=ABCMeta):
"""
Helper class that provides a standard way to create an ABC using
inheritance.
"""
__slots__ = ()
def get_cache_token() -> object: ...
def get_cache_token() -> object:
"""
Returns the current ABC cache token.
The token is an opaque object (supporting equality testing) identifying the
current version of the ABC cache for virtual subclasses. The token changes
with every call to register() on any ABC.
"""
if sys.version_info >= (3, 10):
def update_abstractmethods(cls: type[_T]) -> type[_T]: ...
def update_abstractmethods(cls: type[_T]) -> type[_T]:
"""
Recalculate the set of abstract methods of an abstract class.
If a class has had one of its abstract methods implemented after the
class was created, the method will not be considered implemented until
this function is called. Alternatively, if a new abstract method has been
added to the class, it will only be considered an abstract method of the
class after this function is called.
This function should be called before any use is made of the class,
usually in class decorators that add methods to the subject class.
Returns cls, to allow usage as a class decorator.
If cls is not an instance of ABCMeta, does nothing.
"""

View file

@ -1,3 +1,140 @@
"""
Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a namedtuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes(b'') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, you must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
from types import TracebackType
from typing import IO, Any, Literal, NamedTuple, overload
from typing_extensions import Self, TypeAlias
@ -7,6 +144,10 @@ __all__ = ["Error", "open"]
class Error(Exception): ...
class _aifc_params(NamedTuple):
"""
_aifc_params(nchannels, sampwidth, framerate, nframes, comptype, compname)
"""
nchannels: int
sampwidth: int
framerate: int

View file

@ -1,3 +1,7 @@
"""
Helpers for introspecting and wrapping annotations.
"""
import sys
from typing import Literal
@ -28,6 +32,18 @@ if sys.version_info >= (3, 14):
@final
class ForwardRef:
"""
Wrapper that holds a forward reference.
Constructor arguments:
* arg: a string representing the code to be evaluated.
* module: the module where the forward reference was created.
Must be a string, not a module object.
* owner: The owning object (module, class, or function).
* is_argument: Does nothing, retained for compatibility.
* is_class: True if the forward reference was created in class scope.
"""
__forward_is_argument__: bool
__forward_is_class__: bool
__forward_module__: str | None
@ -43,7 +59,13 @@ if sys.version_info >= (3, 14):
type_params: tuple[TypeVar | ParamSpec | TypeVarTuple, ...] | None = None,
owner: object = None,
format: Literal[Format.STRING],
) -> str: ...
) -> str:
"""
Evaluate the forward reference and return the value.
If the forward reference cannot be evaluated, raise an exception.
"""
@overload
def evaluate(
self,
@ -83,7 +105,13 @@ if sys.version_info >= (3, 14):
def __ror__(self, other: Any) -> types.UnionType: ...
@overload
def call_evaluate_function(evaluate: EvaluateFunc, format: Literal[Format.STRING], *, owner: object = None) -> str: ...
def call_evaluate_function(evaluate: EvaluateFunc, format: Literal[Format.STRING], *, owner: object = None) -> str:
"""
Call an evaluate function. Evaluate functions are normally generated for
the value of type aliases and the bounds, constraints, and defaults of
type parameter objects.
"""
@overload
def call_evaluate_function(
evaluate: EvaluateFunc, format: Literal[Format.FORWARDREF], *, owner: object = None
@ -91,16 +119,41 @@ if sys.version_info >= (3, 14):
@overload
def call_evaluate_function(evaluate: EvaluateFunc, format: Format, *, owner: object = None) -> AnnotationForm: ...
@overload
def call_annotate_function(
annotate: AnnotateFunc, format: Literal[Format.STRING], *, owner: object = None
) -> dict[str, str]: ...
def call_annotate_function(annotate: AnnotateFunc, format: Literal[Format.STRING], *, owner: object = None) -> dict[str, str]:
"""
Call an __annotate__ function. __annotate__ functions are normally
generated by the compiler to defer the evaluation of annotations. They
can be called with any of the format arguments in the Format enum, but
compiler-generated __annotate__ functions only support the VALUE format.
This function provides additional functionality to call __annotate__
functions with the FORWARDREF and STRING formats.
*annotate* must be an __annotate__ function, which takes a single argument
and returns a dict of annotations.
*format* must be a member of the Format enum or one of the corresponding
integer values.
*owner* can be the object that owns the annotations (i.e., the module,
class, or function that the __annotate__ function derives from). With the
FORWARDREF format, it is used to provide better evaluation capabilities
on the generated ForwardRef objects.
"""
@overload
def call_annotate_function(
annotate: AnnotateFunc, format: Literal[Format.FORWARDREF], *, owner: object = None
) -> dict[str, AnnotationForm | ForwardRef]: ...
@overload
def call_annotate_function(annotate: AnnotateFunc, format: Format, *, owner: object = None) -> dict[str, AnnotationForm]: ...
def get_annotate_from_class_namespace(obj: Mapping[str, object]) -> AnnotateFunc | None: ...
def get_annotate_from_class_namespace(obj: Mapping[str, object]) -> AnnotateFunc | None:
"""
Retrieve the annotate function from a class namespace dictionary.
Return None if the namespace does not contain an annotate function.
This is useful in metaclass ``__new__`` methods to retrieve the annotate function.
"""
@overload
def get_annotations(
obj: Any, # any object with __annotations__ or __annotate__
@ -109,7 +162,59 @@ if sys.version_info >= (3, 14):
locals: Mapping[str, object] | None = None,
eval_str: bool = False,
format: Literal[Format.STRING],
) -> dict[str, str]: ...
) -> dict[str, str]:
"""
Compute the annotations dict for an object.
obj may be a callable, class, module, or other object with
__annotate__ or __annotations__ attributes.
Passing any other object raises TypeError.
The *format* parameter controls the format in which annotations are returned,
and must be a member of the Format enum or its integer equivalent.
For the VALUE format, the __annotations__ is tried first; if it
does not exist, the __annotate__ function is called. The
FORWARDREF format uses __annotations__ if it exists and can be
evaluated, and otherwise falls back to calling the __annotate__ function.
The SOURCE format tries __annotate__ first, and falls back to
using __annotations__, stringified using annotations_to_string().
This function handles several details for you:
* If eval_str is true, values of type str will
be un-stringized using eval(). This is intended
for use with stringized annotations
("from __future__ import annotations").
* If obj doesn't have an annotations dict, returns an
empty dict. (Functions and methods always have an
annotations dict; classes, modules, and other types of
callables may not.)
* Ignores inherited annotations on classes. If a class
doesn't have its own annotations dict, returns an empty dict.
* All accesses to object members and dict values are done
using getattr() and dict.get() for safety.
* Always, always, always returns a freshly-created dict.
eval_str controls whether or not values of type str are replaced
with the result of calling eval() on those values:
* If eval_str is true, eval() is called on values of type str.
* If eval_str is false (the default), values of type str are unchanged.
globals and locals are passed in to eval(); see the documentation
for eval() for more information. If either globals or locals is
None, this function may replace that value with a context-specific
default, contingent on type(obj):
* If obj is a module, globals defaults to obj.__dict__.
* If obj is a class, globals defaults to
sys.modules[obj.__module__].__dict__ and locals
defaults to the obj class namespace.
* If obj is a callable, globals defaults to obj.__globals__,
although if obj is a wrapped function (using
functools.update_wrapper()) it is first unwrapped.
"""
@overload
def get_annotations(
obj: Any,
@ -128,5 +233,18 @@ if sys.version_info >= (3, 14):
eval_str: bool = False,
format: Format = Format.VALUE, # noqa: Y011
) -> dict[str, AnnotationForm]: ...
def type_repr(value: object) -> str: ...
def annotations_to_string(annotations: SupportsItems[str, object]) -> dict[str, str]: ...
def type_repr(value: object) -> str:
"""
Convert a Python value to a format suitable for use with the STRING format.
This is intended as a helper for tools that support the STRING format but do
not have access to the code that originally produced the annotations. It uses
repr() for most objects.
"""
def annotations_to_string(annotations: SupportsItems[str, object]) -> dict[str, str]:
"""
Convert an annotation dict containing values to approximately the STRING format.
Always returns a fresh a dictionary.
"""

View file

@ -1,3 +1,67 @@
"""
Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log',
help='the file where the sum should be written')
args = parser.parse_args()
with (open(args.log, 'w') if args.log is not None
else contextlib.nullcontext(sys.stdout)) as log:
log.write('%s' % sum(args.integers))
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls. Deprecated since
Python 3.14.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
import sys
from _typeshed import SupportsWrite, sentinel
from collections.abc import Callable, Generator, Iterable, Sequence
@ -41,12 +105,28 @@ ZERO_OR_MORE: Final = "*"
_UNRECOGNIZED_ARGS_ATTR: Final = "_unrecognized_args" # undocumented
class ArgumentError(Exception):
"""
An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
argument_name: str | None
message: str
def __init__(self, argument: Action | None, message: str) -> None: ...
# undocumented
class _AttributeHolder:
"""
Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def _get_kwargs(self) -> list[tuple[str, Any]]: ...
def _get_args(self) -> list[Any]: ...
@ -90,7 +170,12 @@ class _ActionsContainer:
dest: str | None = ...,
version: str = ...,
**kwargs: Any,
) -> Action: ...
) -> Action:
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
def add_argument_group(
self,
title: str | None = None,
@ -116,6 +201,31 @@ class _FormatterClass(Protocol):
def __call__(self, *, prog: str) -> HelpFormatter: ...
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""
Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default:
``os.path.basename(sys.argv[0])``)
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
- allow_abbrev -- Allow long options to be abbreviated unambiguously
- exit_on_error -- Determines whether or not ArgumentParser exits with
error info when an error occurs
- suggest_on_error - Enables suggestions for mistyped argument choices
and subparser names (default: ``False``)
- color - Allow color output in help messages (default: ``False``)
"""
prog: str
usage: str | None
epilog: str | None
@ -220,7 +330,17 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
def parse_known_args(self, *, namespace: _N) -> tuple[_N, list[str]]: ...
def convert_arg_line_to_args(self, arg_line: str) -> list[str]: ...
def exit(self, status: int = 0, message: str | None = None) -> NoReturn: ...
def error(self, message: str) -> NoReturn: ...
def error(self, message: str) -> NoReturn:
"""
error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
@overload
def parse_intermixed_args(self, args: Sequence[str] | None = None, namespace: None = None) -> Namespace: ...
@overload
@ -258,6 +378,13 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
def _print_message(self, message: str, file: SupportsWrite[str] | None = None) -> None: ...
class HelpFormatter:
"""
Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
# undocumented
_prog: str
_indent_increment: int
@ -318,12 +445,91 @@ class HelpFormatter:
def _get_default_metavar_for_optional(self, action: Action) -> str: ...
def _get_default_metavar_for_positional(self, action: Action) -> str: ...
class RawDescriptionHelpFormatter(HelpFormatter): ...
class RawTextHelpFormatter(RawDescriptionHelpFormatter): ...
class ArgumentDefaultsHelpFormatter(HelpFormatter): ...
class MetavarTypeHelpFormatter(HelpFormatter): ...
class RawDescriptionHelpFormatter(HelpFormatter):
"""
Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""
Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""
Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
class MetavarTypeHelpFormatter(HelpFormatter):
"""
Help message formatter which uses the argument 'type' as the default
metavar value (instead of the argument 'dest')
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
class Action(_AttributeHolder):
"""
Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- A callable that accepts a single string argument, and
returns the converted value. The standard Python types str, int,
float, and complex are useful examples of such callables. If None,
str is used.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
option_strings: Sequence[str]
dest: str
nargs: int | str | None
@ -459,6 +665,13 @@ else:
) -> None: ...
class Namespace(_AttributeHolder):
"""
Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs: Any) -> None: ...
def __getattr__(self, name: str) -> Any: ...
def __setattr__(self, name: str, value: Any, /) -> None: ...
@ -469,6 +682,23 @@ class Namespace(_AttributeHolder):
if sys.version_info >= (3, 14):
@deprecated("Deprecated in Python 3.14; Simply open files after parsing arguments")
class FileType:
"""
Deprecated factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
- encoding -- The file's encoding. Accepts the same values as the
builtin open() function.
- errors -- A string indicating how encoding and decoding errors are to
be handled. Accepts the same value as the builtin open() function.
"""
# undocumented
_mode: str
_bufsize: int
@ -481,6 +711,23 @@ if sys.version_info >= (3, 14):
else:
class FileType:
"""
Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
- encoding -- The file's encoding. Accepts the same values as the
builtin open() function.
- errors -- A string indicating how encoding and decoding errors are to
be handled. Accepts the same value as the builtin open() function.
"""
# undocumented
_mode: str
_bufsize: int
@ -796,7 +1043,10 @@ class _SubParsersAction(Action, Generic[_ArgumentParserT]):
def _get_subactions(self) -> list[Action]: ...
# undocumented
class ArgumentTypeError(Exception): ...
class ArgumentTypeError(Exception):
"""
An error from trying to convert a command line string to a type.
"""
# undocumented
def _get_action_name(argument: Action | None) -> str | None: ...

View file

@ -1,3 +1,10 @@
"""
This module defines an object type which can efficiently represent
an array of basic values: characters, integers, floating-point
numbers. Arrays are sequence types and behave very much like lists,
except that the type of objects stored in them is constrained.
"""
import sys
from _typeshed import ReadableBuffer, SupportsRead, SupportsWrite
from collections.abc import Iterable, MutableSequence
@ -15,10 +22,77 @@ _T = TypeVar("_T", int, float, str)
typecodes: str
class array(MutableSequence[_T]):
"""
array(typecode [, initializer]) -> array
Return a new array whose items are restricted by typecode, and
initialized from the optional initializer value, which must be a list,
string or iterable over elements of the appropriate type.
Arrays represent basic values and behave very much like lists, except
the type of objects stored in them is constrained. The type is specified
at object creation time by using a type code, which is a single character.
The following type codes are defined:
Type code C Type Minimum size in bytes
'b' signed integer 1
'B' unsigned integer 1
'u' Unicode character 2 (see note)
'h' signed integer 2
'H' unsigned integer 2
'i' signed integer 2
'I' unsigned integer 2
'l' signed integer 4
'L' unsigned integer 4
'q' signed integer 8 (see note)
'Q' unsigned integer 8 (see note)
'f' floating-point 4
'd' floating-point 8
NOTE: The 'u' typecode corresponds to Python's unicode character. On
narrow builds this is 2-bytes on wide builds this is 4-bytes.
NOTE: The 'q' and 'Q' type codes are only available if the platform
C compiler used to build Python supports 'long long', or, on Windows,
'__int64'.
Methods:
append() -- append a new item to the end of the array
buffer_info() -- return information giving the current memory info
byteswap() -- byteswap all the items of the array
count() -- return number of occurrences of an object
extend() -- extend array by appending multiple elements from an iterable
fromfile() -- read items from a file object
fromlist() -- append items from the list
frombytes() -- append items from the string
index() -- return index of first occurrence of an object
insert() -- insert a new item into the array at a provided position
pop() -- remove and return item (default last)
remove() -- remove first occurrence of an object
reverse() -- reverse the order of the items in the array
tofile() -- write all items to a file object
tolist() -- return the array converted to an ordinary list
tobytes() -- return the array converted to a string
Attributes:
typecode -- the typecode character used to create the array
itemsize -- the length in bytes of one array item
"""
@property
def typecode(self) -> _TypeCode: ...
def typecode(self) -> _TypeCode:
"""
the typecode character used to create the array
"""
@property
def itemsize(self) -> int: ...
def itemsize(self) -> int:
"""
the size, in bytes, of one array item
"""
@overload
def __new__(
cls: type[array[int]], typecode: _IntTypeCode, initializer: bytes | bytearray | Iterable[int] = ..., /
@ -35,54 +109,195 @@ class array(MutableSequence[_T]):
def __new__(cls, typecode: str, initializer: Iterable[_T], /) -> Self: ...
@overload
def __new__(cls, typecode: str, initializer: bytes | bytearray = ..., /) -> Self: ...
def append(self, v: _T, /) -> None: ...
def buffer_info(self) -> tuple[int, int]: ...
def byteswap(self) -> None: ...
def count(self, v: _T, /) -> int: ...
def extend(self, bb: Iterable[_T], /) -> None: ...
def frombytes(self, buffer: ReadableBuffer, /) -> None: ...
def fromfile(self, f: SupportsRead[bytes], n: int, /) -> None: ...
def fromlist(self, list: list[_T], /) -> None: ...
def fromunicode(self, ustr: str, /) -> None: ...
def append(self, v: _T, /) -> None:
"""
Append new value v to the end of the array.
"""
def buffer_info(self) -> tuple[int, int]:
"""
Return a tuple (address, length) giving the current memory address and the length in items of the buffer used to hold array's contents.
The length should be multiplied by the itemsize attribute to calculate
the buffer length in bytes.
"""
def byteswap(self) -> None:
"""
Byteswap all items of the array.
If the items in the array are not 1, 2, 4, or 8 bytes in size, RuntimeError is
raised.
"""
def count(self, v: _T, /) -> int:
"""
Return number of occurrences of v in the array.
"""
def extend(self, bb: Iterable[_T], /) -> None:
"""
Append items to the end of the array.
"""
def frombytes(self, buffer: ReadableBuffer, /) -> None:
"""
Appends items from the string, interpreting it as an array of machine values, as if it had been read from a file using the fromfile() method.
"""
def fromfile(self, f: SupportsRead[bytes], n: int, /) -> None:
"""
Read n objects from the file object f and append them to the end of the array.
"""
def fromlist(self, list: list[_T], /) -> None:
"""
Append items to array from list.
"""
def fromunicode(self, ustr: str, /) -> None:
"""
Extends this array with data from the unicode string ustr.
The array must be a unicode type array; otherwise a ValueError is raised.
Use array.frombytes(ustr.encode(...)) to append Unicode data to an array of
some other type.
"""
if sys.version_info >= (3, 10):
def index(self, v: _T, start: int = 0, stop: int = sys.maxsize, /) -> int: ...
def index(self, v: _T, start: int = 0, stop: int = sys.maxsize, /) -> int:
"""
Return index of first occurrence of v in the array.
Raise ValueError if the value is not present.
"""
else:
def index(self, v: _T, /) -> int: ... # type: ignore[override]
def index(self, v: _T, /) -> int: # type: ignore[override]
"""
Return index of first occurrence of v in the array.
def insert(self, i: int, v: _T, /) -> None: ...
def pop(self, i: int = -1, /) -> _T: ...
def remove(self, v: _T, /) -> None: ...
def tobytes(self) -> bytes: ...
def tofile(self, f: SupportsWrite[bytes], /) -> None: ...
def tolist(self) -> list[_T]: ...
def tounicode(self) -> str: ...
Raise ValueError if the value is not present.
"""
def insert(self, i: int, v: _T, /) -> None:
"""
Insert a new item v into the array before position i.
"""
def pop(self, i: int = -1, /) -> _T:
"""
Return the i-th element and delete it from the array.
i defaults to -1.
"""
def remove(self, v: _T, /) -> None:
"""
Remove the first occurrence of v in the array.
"""
def tobytes(self) -> bytes:
"""
Convert the array to an array of machine values and return the bytes representation.
"""
def tofile(self, f: SupportsWrite[bytes], /) -> None:
"""
Write all items (as machine values) to the file object f.
"""
def tolist(self) -> list[_T]:
"""
Convert array to an ordinary list with the same items.
"""
def tounicode(self) -> str:
"""
Extends this array with data from the unicode string ustr.
Convert the array to a unicode string. The array must be a unicode type array;
otherwise a ValueError is raised. Use array.tobytes().decode() to obtain a
unicode string from an array of some other type.
"""
__hash__: ClassVar[None] # type: ignore[assignment]
def __len__(self) -> int: ...
def __len__(self) -> int:
"""
Return len(self).
"""
@overload
def __getitem__(self, key: SupportsIndex, /) -> _T: ...
def __getitem__(self, key: SupportsIndex, /) -> _T:
"""
Return self[key].
"""
@overload
def __getitem__(self, key: slice, /) -> array[_T]: ...
@overload # type: ignore[override]
def __setitem__(self, key: SupportsIndex, value: _T, /) -> None: ...
def __setitem__(self, key: SupportsIndex, value: _T, /) -> None:
"""
Set self[key] to value.
"""
@overload
def __setitem__(self, key: slice, value: array[_T], /) -> None: ...
def __delitem__(self, key: SupportsIndex | slice, /) -> None: ...
def __add__(self, value: array[_T], /) -> array[_T]: ...
def __delitem__(self, key: SupportsIndex | slice, /) -> None:
"""
Delete self[key].
"""
def __add__(self, value: array[_T], /) -> array[_T]:
"""
Return self+value.
"""
def __eq__(self, value: object, /) -> bool: ...
def __ge__(self, value: array[_T], /) -> bool: ...
def __gt__(self, value: array[_T], /) -> bool: ...
def __iadd__(self, value: array[_T], /) -> Self: ... # type: ignore[override]
def __imul__(self, value: int, /) -> Self: ...
def __iadd__(self, value: array[_T], /) -> Self: # type: ignore[override]
"""
Implement self+=value.
"""
def __imul__(self, value: int, /) -> Self:
"""
Implement self*=value.
"""
def __le__(self, value: array[_T], /) -> bool: ...
def __lt__(self, value: array[_T], /) -> bool: ...
def __mul__(self, value: int, /) -> array[_T]: ...
def __rmul__(self, value: int, /) -> array[_T]: ...
def __copy__(self) -> array[_T]: ...
def __deepcopy__(self, unused: Any, /) -> array[_T]: ...
def __buffer__(self, flags: int, /) -> memoryview: ...
def __release_buffer__(self, buffer: memoryview, /) -> None: ...
def __mul__(self, value: int, /) -> array[_T]:
"""
Return self*value.
"""
def __rmul__(self, value: int, /) -> array[_T]:
"""
Return value*self.
"""
def __copy__(self) -> array[_T]:
"""
Return a copy of the array.
"""
def __deepcopy__(self, unused: Any, /) -> array[_T]:
"""
Return a copy of the array.
"""
def __buffer__(self, flags: int, /) -> memoryview:
"""
Return a buffer object that exposes the underlying memory of the object.
"""
def __release_buffer__(self, buffer: memoryview, /) -> None:
"""
Release the buffer object that exposes the underlying memory of the object.
"""
if sys.version_info >= (3, 12):
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
def __class_getitem__(cls, item: Any, /) -> GenericAlias:
"""
See PEP 585
"""
ArrayType = array

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,25 @@
"""
A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '\\r\\n' for single-line responses, '\\r\\n.\\r\\n'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '\\r\\n', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '\\r\\n.\\r\\n'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import asyncore
from abc import abstractmethod
@ -6,16 +28,31 @@ class simple_producer:
def more(self) -> bytes: ...
class async_chat(asyncore.dispatcher):
"""
This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()
"""
ac_in_buffer_size: int
ac_out_buffer_size: int
@abstractmethod
def collect_incoming_data(self, data: bytes) -> None: ...
@abstractmethod
def found_terminator(self) -> None: ...
def set_terminator(self, term: bytes | int | None) -> None: ...
def set_terminator(self, term: bytes | int | None) -> None:
"""
Set the input delimiter.
Can be a fixed string of any length, an integer, or None.
"""
def get_terminator(self) -> bytes | int | None: ...
def push(self, data: bytes) -> None: ...
def push_with_producer(self, producer: simple_producer) -> None: ...
def close_when_done(self) -> None: ...
def close_when_done(self) -> None:
"""
automatically close this channel once the outgoing queue is empty
"""
def initiate_send(self) -> None: ...
def discard_buffers(self) -> None: ...

View file

@ -1,3 +1,7 @@
"""
The asyncio package, tracking PEP 3156.
"""
# This condition is so big, it's clearer to keep to platform condition in two blocks
# Can't NOQA on a specific line: https://github.com/plinss/flake8-noqa/issues/22
import sys

View file

@ -1,3 +1,19 @@
"""
Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import ssl
import sys
from _typeshed import FileDescriptorLike, ReadableBuffer, WriteableBuffer
@ -59,41 +75,172 @@ class Server(AbstractServer):
@property
def sockets(self) -> tuple[socket, ...]: ...
def close(self) -> None: ...
async def wait_closed(self) -> None: ...
async def wait_closed(self) -> None:
"""
Wait until server is closed and all connections are dropped.
- If the server is not closed, wait.
- If it is closed, but there are still active connections, wait.
Anyone waiting here will be unblocked once both conditions
(server is closed and all connections have been dropped)
have become true, in either order.
Historical note: In 3.11 and before, this was broken, returning
immediately if the server was already closed, even if there
were still active connections. An attempted fix in 3.12.0 was
still broken, returning immediately if the server was still
open and there were no active connections. Hopefully in 3.12.1
we have it right.
"""
class BaseEventLoop(AbstractEventLoop):
def run_forever(self) -> None: ...
def run_until_complete(self, future: _AwaitableLike[_T]) -> _T: ...
def stop(self) -> None: ...
def is_running(self) -> bool: ...
def is_closed(self) -> bool: ...
def close(self) -> None: ...
async def shutdown_asyncgens(self) -> None: ...
def run_forever(self) -> None:
"""
Run until stop() is called.
"""
def run_until_complete(self, future: _AwaitableLike[_T]) -> _T:
"""
Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
def stop(self) -> None:
"""
Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
def is_running(self) -> bool:
"""
Returns True if the event loop is running.
"""
def is_closed(self) -> bool:
"""
Returns True if the event loop was closed.
"""
def close(self) -> None:
"""
Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
async def shutdown_asyncgens(self) -> None:
"""
Shutdown all active asynchronous generators.
"""
# Methods scheduling callbacks. All these return Handles.
def call_soon(
self, callback: Callable[[Unpack[_Ts]], object], *args: Unpack[_Ts], context: Context | None = None
) -> Handle: ...
def call_soon(self, callback: Callable[[Unpack[_Ts]], object], *args: Unpack[_Ts], context: Context | None = None) -> Handle:
"""
Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
def call_later(
self, delay: float, callback: Callable[[Unpack[_Ts]], object], *args: Unpack[_Ts], context: Context | None = None
) -> TimerHandle: ...
) -> TimerHandle:
"""
Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it is undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
def call_at(
self, when: float, callback: Callable[[Unpack[_Ts]], object], *args: Unpack[_Ts], context: Context | None = None
) -> TimerHandle: ...
def time(self) -> float: ...
) -> TimerHandle:
"""
Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
def time(self) -> float:
"""
Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
# Future methods
def create_future(self) -> Future[Any]: ...
def create_future(self) -> Future[Any]:
"""
Create a Future object attached to the loop.
"""
# Tasks methods
if sys.version_info >= (3, 11):
def create_task(self, coro: _CoroutineLike[_T], *, name: object = None, context: Context | None = None) -> Task[_T]: ...
else:
def create_task(self, coro: _CoroutineLike[_T], *, name: object = None) -> Task[_T]: ...
def create_task(self, coro: _CoroutineLike[_T], *, name: object = None, context: Context | None = None) -> Task[_T]:
"""
Schedule or begin executing a coroutine object.
def set_task_factory(self, factory: _TaskFactory | None) -> None: ...
def get_task_factory(self) -> _TaskFactory | None: ...
Return a task object.
"""
else:
def create_task(self, coro: _CoroutineLike[_T], *, name: object = None) -> Task[_T]:
"""
Schedule or begin executing a coroutine object.
Return a task object.
"""
def set_task_factory(self, factory: _TaskFactory | None) -> None:
"""
Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro, **kwargs)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object, and **kwargs will be
arbitrary keyword arguments that should be passed on to Task.
The callable must return a Task.
"""
def get_task_factory(self) -> _TaskFactory | None:
"""
Return a task factory, or None if the default one is in use.
"""
# Methods for interacting with threads
def call_soon_threadsafe(
self, callback: Callable[[Unpack[_Ts]], object], *args: Unpack[_Ts], context: Context | None = None
) -> Handle: ...
) -> Handle:
"""
Like call_soon(), but thread-safe.
"""
def run_in_executor(self, executor: Executor | None, func: Callable[[Unpack[_Ts]], _T], *args: Unpack[_Ts]) -> Future[_T]: ...
def set_default_executor(self, executor: ThreadPoolExecutor) -> None: ... # type: ignore[override]
# Network I/O methods returning Futures.
@ -128,7 +275,20 @@ class BaseEventLoop(AbstractEventLoop):
happy_eyeballs_delay: float | None = None,
interleave: int | None = None,
all_errors: bool = False,
) -> tuple[Transport, _ProtocolT]: ...
) -> tuple[Transport, _ProtocolT]:
"""
Connect to a TCP server.
Create a streaming transport connection to a given internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
@overload
async def create_connection(
self,
@ -168,7 +328,20 @@ class BaseEventLoop(AbstractEventLoop):
ssl_shutdown_timeout: float | None = None,
happy_eyeballs_delay: float | None = None,
interleave: int | None = None,
) -> tuple[Transport, _ProtocolT]: ...
) -> tuple[Transport, _ProtocolT]:
"""
Connect to a TCP server.
Create a streaming transport connection to a given internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
@overload
async def create_connection(
self,
@ -206,7 +379,20 @@ class BaseEventLoop(AbstractEventLoop):
ssl_handshake_timeout: float | None = None,
happy_eyeballs_delay: float | None = None,
interleave: int | None = None,
) -> tuple[Transport, _ProtocolT]: ...
) -> tuple[Transport, _ProtocolT]:
"""
Connect to a TCP server.
Create a streaming transport connection to a given internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
@overload
async def create_connection(
self,
@ -246,7 +432,24 @@ class BaseEventLoop(AbstractEventLoop):
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
start_serving: bool = True,
) -> Server: ...
) -> Server:
"""
Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
@overload
async def create_server(
self,
@ -284,7 +487,24 @@ class BaseEventLoop(AbstractEventLoop):
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
start_serving: bool = True,
) -> Server: ...
) -> Server:
"""
Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
@overload
async def create_server(
self,
@ -320,7 +540,24 @@ class BaseEventLoop(AbstractEventLoop):
reuse_port: bool | None = None,
ssl_handshake_timeout: float | None = None,
start_serving: bool = True,
) -> Server: ...
) -> Server:
"""
Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
@overload
async def create_server(
self,
@ -350,7 +587,14 @@ class BaseEventLoop(AbstractEventLoop):
server_hostname: str | None = None,
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
) -> Transport | None: ...
) -> Transport | None:
"""
Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
async def connect_accepted_socket(
self,
protocol_factory: Callable[[], _ProtocolT],
@ -359,7 +603,16 @@ class BaseEventLoop(AbstractEventLoop):
ssl: _SSLContext = None,
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
) -> tuple[Transport, _ProtocolT]: ...
) -> tuple[Transport, _ProtocolT]:
"""
Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio but that use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
else:
async def start_tls(
self,
@ -370,7 +623,14 @@ class BaseEventLoop(AbstractEventLoop):
server_side: bool = False,
server_hostname: str | None = None,
ssl_handshake_timeout: float | None = None,
) -> Transport | None: ...
) -> Transport | None:
"""
Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
async def connect_accepted_socket(
self,
protocol_factory: Callable[[], _ProtocolT],
@ -378,14 +638,46 @@ class BaseEventLoop(AbstractEventLoop):
*,
ssl: _SSLContext = None,
ssl_handshake_timeout: float | None = None,
) -> tuple[Transport, _ProtocolT]: ...
) -> tuple[Transport, _ProtocolT]:
"""
Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio but that use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
async def sock_sendfile(
self, sock: socket, file: IO[bytes], offset: int = 0, count: int | None = None, *, fallback: bool | None = True
) -> int: ...
async def sendfile(
self, transport: WriteTransport, file: IO[bytes], offset: int = 0, count: int | None = None, *, fallback: bool = True
) -> int: ...
) -> int:
"""
Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if sys.version_info >= (3, 11):
async def create_datagram_endpoint( # type: ignore[override]
self,
@ -399,7 +691,10 @@ class BaseEventLoop(AbstractEventLoop):
reuse_port: bool | None = None,
allow_broadcast: bool | None = None,
sock: socket | None = None,
) -> tuple[DatagramTransport, _ProtocolT]: ...
) -> tuple[DatagramTransport, _ProtocolT]:
"""
Create datagram connection.
"""
else:
async def create_datagram_endpoint(
self,
@ -414,7 +709,10 @@ class BaseEventLoop(AbstractEventLoop):
reuse_port: bool | None = None,
allow_broadcast: bool | None = None,
sock: socket | None = None,
) -> tuple[DatagramTransport, _ProtocolT]: ...
) -> tuple[DatagramTransport, _ProtocolT]:
"""
Create datagram connection.
"""
# Pipes and subprocesses.
async def connect_read_pipe(
self, protocol_factory: Callable[[], _ProtocolT], pipe: Any
@ -473,16 +771,87 @@ class BaseEventLoop(AbstractEventLoop):
def add_signal_handler(self, sig: int, callback: Callable[[Unpack[_Ts]], Any], *args: Unpack[_Ts]) -> None: ...
def remove_signal_handler(self, sig: int) -> bool: ...
# Error handlers.
def set_exception_handler(self, handler: _ExceptionHandler | None) -> None: ...
def get_exception_handler(self) -> _ExceptionHandler | None: ...
def default_exception_handler(self, context: _Context) -> None: ...
def call_exception_handler(self, context: _Context) -> None: ...
def set_exception_handler(self, handler: _ExceptionHandler | None) -> None:
"""
Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
def get_exception_handler(self) -> _ExceptionHandler | None:
"""
Return an exception handler, or None if the default one is in use.
"""
def default_exception_handler(self, context: _Context) -> None:
"""
Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
def call_exception_handler(self, context: _Context) -> None:
"""
Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'source_traceback' (optional): Traceback of the source;
- 'handle_traceback' (optional): Traceback of the handle;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
# Debug flag management.
def get_debug(self) -> bool: ...
def set_debug(self, enabled: bool) -> None: ...
if sys.version_info >= (3, 12):
async def shutdown_default_executor(self, timeout: float | None = None) -> None: ...
async def shutdown_default_executor(self, timeout: float | None = None) -> None:
"""
Schedule the shutdown of the default executor.
The timeout parameter specifies the amount of time the executor will
be given to finish joining. The default value is None, which means
that the executor will be given an unlimited amount of time.
"""
else:
async def shutdown_default_executor(self) -> None: ...
async def shutdown_default_executor(self) -> None:
"""
Schedule the shutdown of the default executor.
The timeout parameter specifies the amount of time the executor will
be given to finish joining. The default value is None, which means
that the executor will be given an unlimited amount of time.
"""
def __del__(self) -> None: ...

View file

@ -15,5 +15,12 @@ _PENDING: Final = "PENDING" # undocumented
_CANCELLED: Final = "CANCELLED" # undocumented
_FINISHED: Final = "FINISHED" # undocumented
def _format_callbacks(cb: Sequence[tuple[Callable[[futures.Future[Any]], None], Context]]) -> str: ... # undocumented
def _future_repr_info(future: futures.Future[Any]) -> list[str]: ... # undocumented
def _format_callbacks(cb: Sequence[tuple[Callable[[futures.Future[Any]], None], Context]]) -> str: # undocumented
"""
helper function for Future.__repr__
"""
def _future_repr_info(future: futures.Future[Any]) -> list[str]: # undocumented
"""
helper function for Future.__repr__
"""

View file

@ -52,7 +52,13 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
def _pipe_connection_lost(self, fd: int, exc: BaseException | None) -> None: ... # undocumented
def _pipe_data_received(self, fd: int, data: bytes) -> None: ... # undocumented
def _process_exited(self, returncode: int) -> None: ... # undocumented
async def _wait(self) -> int: ... # undocumented
async def _wait(self) -> int: # undocumented
"""
Wait until the process exit and return the process return code.
This method is a coroutine.
"""
def _try_finish(self) -> None: ... # undocumented
def _call_connection_lost(self, exc: BaseException | None) -> None: ... # undocumented
def __del__(self) -> None: ...

View file

@ -15,6 +15,10 @@ if sys.version_info >= (3, 12):
THREAD_JOIN_TIMEOUT: Final = 300
class _SendfileMode(enum.Enum):
"""
An enumeration.
"""
UNSUPPORTED = 1
TRY_NATIVE = 2
FALLBACK = 3

View file

@ -14,14 +14,27 @@ _FunctionT = TypeVar("_FunctionT", bound=Callable[..., Any])
_P = ParamSpec("_P")
if sys.version_info < (3, 11):
def coroutine(func: _FunctionT) -> _FunctionT: ...
def coroutine(func: _FunctionT) -> _FunctionT:
"""
Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
@overload
def iscoroutinefunction(func: Callable[..., Coroutine[Any, Any, Any]]) -> bool: ...
def iscoroutinefunction(func: Callable[..., Coroutine[Any, Any, Any]]) -> bool:
"""
Return True if func is a decorated coroutine function.
"""
@overload
def iscoroutinefunction(func: Callable[_P, Awaitable[_T]]) -> TypeGuard[Callable[_P, Coroutine[Any, Any, _T]]]: ...
@overload
def iscoroutinefunction(func: Callable[_P, object]) -> TypeGuard[Callable[_P, Coroutine[Any, Any, Any]]]: ...
@overload
def iscoroutinefunction(func: object) -> TypeGuard[Callable[..., Coroutine[Any, Any, Any]]]: ...
def iscoroutine(obj: object) -> TypeIs[Coroutine[Any, Any, Any]]: ...
def iscoroutine(obj: object) -> TypeIs[Coroutine[Any, Any, Any]]:
"""
Return True if obj is a coroutine object.
"""

View file

@ -1,3 +1,7 @@
"""
Event loop and event loop policy.
"""
import ssl
import sys
from _asyncio import (
@ -75,6 +79,10 @@ class _TaskFactory(Protocol):
def __call__(self, loop: AbstractEventLoop, factory: _CoroutineLike[_T], /) -> Future[_T]: ...
class Handle:
"""
Object returned by callback registration methods.
"""
_cancelled: bool
_args: Sequence[Any]
def __init__(
@ -87,6 +95,10 @@ class Handle:
def get_context(self) -> Context: ...
class TimerHandle(Handle):
"""
Object returned by timed callback registration methods.
"""
def __init__(
self,
when: float,
@ -96,7 +108,14 @@ class TimerHandle(Handle):
context: Context | None = None,
) -> None: ...
def __hash__(self) -> int: ...
def when(self) -> float: ...
def when(self) -> float:
"""
Return a scheduled callback time.
The time is an absolute timestamp, using the same time
reference as loop.time().
"""
def __lt__(self, other: TimerHandle) -> bool: ...
def __le__(self, other: TimerHandle) -> bool: ...
def __gt__(self, other: TimerHandle) -> bool: ...
@ -104,43 +123,123 @@ class TimerHandle(Handle):
def __eq__(self, other: object) -> bool: ...
class AbstractServer:
"""
Abstract server returned by create_server().
"""
@abstractmethod
def close(self) -> None: ...
def close(self) -> None:
"""
Stop serving. This leaves existing connections open.
"""
if sys.version_info >= (3, 13):
@abstractmethod
def close_clients(self) -> None: ...
def close_clients(self) -> None:
"""
Close all active connections.
"""
@abstractmethod
def abort_clients(self) -> None: ...
def abort_clients(self) -> None:
"""
Close all active connections immediately.
"""
async def __aenter__(self) -> Self: ...
async def __aexit__(self, *exc: Unused) -> None: ...
@abstractmethod
def get_loop(self) -> AbstractEventLoop: ...
def get_loop(self) -> AbstractEventLoop:
"""
Get the event loop the Server object is attached to.
"""
@abstractmethod
def is_serving(self) -> bool: ...
def is_serving(self) -> bool:
"""
Return True if the server is accepting connections.
"""
@abstractmethod
async def start_serving(self) -> None: ...
async def start_serving(self) -> None:
"""
Start accepting connections.
This method is idempotent, so it can be called when
the server is already being serving.
"""
@abstractmethod
async def serve_forever(self) -> None: ...
async def serve_forever(self) -> None:
"""
Start accepting connections until the coroutine is cancelled.
The server is closed when the coroutine is cancelled.
"""
@abstractmethod
async def wait_closed(self) -> None: ...
async def wait_closed(self) -> None:
"""
Coroutine to wait until service is closed.
"""
class AbstractEventLoop:
"""
Abstract event loop.
"""
slow_callback_duration: float
@abstractmethod
def run_forever(self) -> None: ...
def run_forever(self) -> None:
"""
Run the event loop until stop() is called.
"""
@abstractmethod
def run_until_complete(self, future: _AwaitableLike[_T]) -> _T: ...
def run_until_complete(self, future: _AwaitableLike[_T]) -> _T:
"""
Run the event loop until a Future is done.
Return the Future's result, or raise its exception.
"""
@abstractmethod
def stop(self) -> None: ...
def stop(self) -> None:
"""
Stop the event loop as soon as reasonable.
Exactly how soon that is may depend on the implementation, but
no more I/O callbacks should be scheduled.
"""
@abstractmethod
def is_running(self) -> bool: ...
def is_running(self) -> bool:
"""
Return whether the event loop is currently running.
"""
@abstractmethod
def is_closed(self) -> bool: ...
def is_closed(self) -> bool:
"""
Returns True if the event loop was closed.
"""
@abstractmethod
def close(self) -> None: ...
def close(self) -> None:
"""
Close the loop.
The loop should not be running.
This is idempotent and irreversible.
No other methods should be called after this one.
"""
@abstractmethod
async def shutdown_asyncgens(self) -> None: ...
async def shutdown_asyncgens(self) -> None:
"""
Shutdown all active asynchronous generators.
"""
# Methods scheduling callbacks. All these return Handles.
# "context" added in 3.9.10/3.10.2 for call_*
@abstractmethod
@ -300,7 +399,60 @@ class AbstractEventLoop:
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
start_serving: bool = True,
) -> Server: ...
) -> Server:
"""
A coroutine which creates a TCP server bound to host and port.
The return value is a Server object which can be used to stop
the service.
If host is an empty string or None all interfaces are assumed
and a list of multiple sockets will be returned (most likely
one for IPv4 and another one for IPv6). The host parameter can also be
a sequence (e.g. list) of hosts to bind to.
family can be set to either AF_INET or AF_INET6 to force the
socket to use IPv4 or IPv6. If not set it will be determined
from host (defaults to AF_UNSPEC).
flags is a bitmask for getaddrinfo().
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
reuse_address tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to
expire. If not specified will automatically be set to True on
UNIX.
reuse_port tells the kernel to allow this endpoint to be bound to
the same port as other existing endpoints are bound to, so long as
they all set this flag when being created. This option is not
supported on Windows.
keep_alive set to True keeps connections active by enabling the
periodic transmission of messages.
ssl_handshake_timeout is the time in seconds that an SSL server
will wait for completion of the SSL handshake before aborting the
connection. Default is 60s.
ssl_shutdown_timeout is the time in seconds that an SSL server
will wait for completion of the SSL shutdown procedure
before aborting the connection. Default is 30s.
start_serving set to True (default) causes the created server
to start accepting connections immediately. When set to False,
the user should await Server.start_serving() or Server.serve_forever()
to make the server to start accepting connections.
"""
@overload
@abstractmethod
async def create_server(
@ -340,7 +492,60 @@ class AbstractEventLoop:
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
start_serving: bool = True,
) -> Server: ...
) -> Server:
"""
A coroutine which creates a TCP server bound to host and port.
The return value is a Server object which can be used to stop
the service.
If host is an empty string or None all interfaces are assumed
and a list of multiple sockets will be returned (most likely
one for IPv4 and another one for IPv6). The host parameter can also be
a sequence (e.g. list) of hosts to bind to.
family can be set to either AF_INET or AF_INET6 to force the
socket to use IPv4 or IPv6. If not set it will be determined
from host (defaults to AF_UNSPEC).
flags is a bitmask for getaddrinfo().
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
reuse_address tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to
expire. If not specified will automatically be set to True on
UNIX.
reuse_port tells the kernel to allow this endpoint to be bound to
the same port as other existing endpoints are bound to, so long as
they all set this flag when being created. This option is not
supported on Windows.
keep_alive set to True keeps connections active by enabling the
periodic transmission of messages.
ssl_handshake_timeout is the time in seconds that an SSL server
will wait for completion of the SSL handshake before aborting the
connection. Default is 60s.
ssl_shutdown_timeout is the time in seconds that an SSL server
will wait for completion of the SSL shutdown procedure
before aborting the connection. Default is 30s.
start_serving set to True (default) causes the created server
to start accepting connections immediately. When set to False,
the user should await Server.start_serving() or Server.serve_forever()
to make the server to start accepting connections.
"""
@overload
@abstractmethod
async def create_server(
@ -378,7 +583,60 @@ class AbstractEventLoop:
reuse_port: bool | None = None,
ssl_handshake_timeout: float | None = None,
start_serving: bool = True,
) -> Server: ...
) -> Server:
"""
A coroutine which creates a TCP server bound to host and port.
The return value is a Server object which can be used to stop
the service.
If host is an empty string or None all interfaces are assumed
and a list of multiple sockets will be returned (most likely
one for IPv4 and another one for IPv6). The host parameter can also be
a sequence (e.g. list) of hosts to bind to.
family can be set to either AF_INET or AF_INET6 to force the
socket to use IPv4 or IPv6. If not set it will be determined
from host (defaults to AF_UNSPEC).
flags is a bitmask for getaddrinfo().
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
reuse_address tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to
expire. If not specified will automatically be set to True on
UNIX.
reuse_port tells the kernel to allow this endpoint to be bound to
the same port as other existing endpoints are bound to, so long as
they all set this flag when being created. This option is not
supported on Windows.
keep_alive set to True keeps connections active by enabling the
periodic transmission of messages.
ssl_handshake_timeout is the time in seconds that an SSL server
will wait for completion of the SSL handshake before aborting the
connection. Default is 60s.
ssl_shutdown_timeout is the time in seconds that an SSL server
will wait for completion of the SSL shutdown procedure
before aborting the connection. Default is 30s.
start_serving set to True (default) causes the created server
to start accepting connections immediately. When set to False,
the user should await Server.start_serving() or Server.serve_forever()
to make the server to start accepting connections.
"""
@overload
@abstractmethod
async def create_server(
@ -410,7 +668,14 @@ class AbstractEventLoop:
server_hostname: str | None = None,
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
) -> Transport | None: ...
) -> Transport | None:
"""
Upgrade a transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
async def create_unix_server(
self,
protocol_factory: _ProtocolFactory,
@ -422,7 +687,36 @@ class AbstractEventLoop:
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
start_serving: bool = True,
) -> Server: ...
) -> Server:
"""
A coroutine which creates a UNIX Domain Socket server.
The return value is a Server object, which can be used to stop
the service.
path is a str, representing a file system path to bind the
server socket to.
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
ssl_handshake_timeout is the time in seconds that an SSL server
will wait for the SSL handshake to complete (defaults to 60s).
ssl_shutdown_timeout is the time in seconds that an SSL server
will wait for the SSL shutdown to finish (defaults to 30s).
start_serving set to True (default) causes the created server
to start accepting connections immediately. When set to False,
the user should await Server.start_serving() or Server.serve_forever()
to make the server to start accepting connections.
"""
else:
@abstractmethod
async def start_tls(
@ -434,7 +728,14 @@ class AbstractEventLoop:
server_side: bool = False,
server_hostname: str | None = None,
ssl_handshake_timeout: float | None = None,
) -> Transport | None: ...
) -> Transport | None:
"""
Upgrade a transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
async def create_unix_server(
self,
protocol_factory: _ProtocolFactory,
@ -445,8 +746,36 @@ class AbstractEventLoop:
ssl: _SSLContext = None,
ssl_handshake_timeout: float | None = None,
start_serving: bool = True,
) -> Server: ...
) -> Server:
"""
A coroutine which creates a UNIX Domain Socket server.
The return value is a Server object, which can be used to stop
the service.
path is a str, representing a file system path to bind the
server socket to.
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
ssl_handshake_timeout is the time in seconds that an SSL server
will wait for the SSL handshake to complete (defaults to 60s).
ssl_shutdown_timeout is the time in seconds that an SSL server
will wait for the SSL shutdown to finish (defaults to 30s).
start_serving set to True (default) causes the created server
to start accepting connections immediately. When set to False,
the user should await Server.start_serving() or Server.serve_forever()
to make the server to start accepting connections.
"""
if sys.version_info >= (3, 11):
async def connect_accepted_socket(
self,
@ -456,7 +785,16 @@ class AbstractEventLoop:
ssl: _SSLContext = None,
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
) -> tuple[Transport, _ProtocolT]: ...
) -> tuple[Transport, _ProtocolT]:
"""
Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio, but use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
elif sys.version_info >= (3, 10):
async def connect_accepted_socket(
self,
@ -465,7 +803,16 @@ class AbstractEventLoop:
*,
ssl: _SSLContext = None,
ssl_handshake_timeout: float | None = None,
) -> tuple[Transport, _ProtocolT]: ...
) -> tuple[Transport, _ProtocolT]:
"""
Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio, but use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
if sys.version_info >= (3, 11):
async def create_unix_connection(
self,
@ -497,7 +844,13 @@ class AbstractEventLoop:
@abstractmethod
async def sendfile(
self, transport: WriteTransport, file: IO[bytes], offset: int = 0, count: int | None = None, *, fallback: bool = True
) -> int: ...
) -> int:
"""
Send a file through a transport.
Return an amount of sent bytes.
"""
@abstractmethod
async def create_datagram_endpoint(
self,
@ -512,16 +865,61 @@ class AbstractEventLoop:
reuse_port: bool | None = None,
allow_broadcast: bool | None = None,
sock: socket | None = None,
) -> tuple[DatagramTransport, _ProtocolT]: ...
) -> tuple[DatagramTransport, _ProtocolT]:
"""
A coroutine which creates a datagram endpoint.
This method will try to establish the endpoint in the background.
When successful, the coroutine returns a (transport, protocol) pair.
protocol_factory must be a callable returning a protocol instance.
socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on
host (or family if specified), socket type SOCK_DGRAM.
reuse_address tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to
expire. If not specified it will automatically be set to True on
UNIX.
reuse_port tells the kernel to allow this endpoint to be bound to
the same port as other existing endpoints are bound to, so long as
they all set this flag when being created. This option is not
supported on Windows and some UNIX's. If the
:py:data:`~socket.SO_REUSEPORT` constant is not defined then this
capability is unsupported.
allow_broadcast tells the kernel to allow this endpoint to send
messages to the broadcast address.
sock can optionally be specified in order to use a preexisting
socket object.
"""
# Pipes and subprocesses.
@abstractmethod
async def connect_read_pipe(
self, protocol_factory: Callable[[], _ProtocolT], pipe: Any
) -> tuple[ReadTransport, _ProtocolT]: ...
async def connect_read_pipe(self, protocol_factory: Callable[[], _ProtocolT], pipe: Any) -> tuple[ReadTransport, _ProtocolT]:
"""
Register read pipe in event loop. Set the pipe to non-blocking mode.
protocol_factory should instantiate object with Protocol interface.
pipe is a file-like object.
Return pair (transport, protocol), where transport supports the
ReadTransport interface.
"""
@abstractmethod
async def connect_write_pipe(
self, protocol_factory: Callable[[], _ProtocolT], pipe: Any
) -> tuple[WriteTransport, _ProtocolT]: ...
) -> tuple[WriteTransport, _ProtocolT]:
"""
Register write pipe in event loop.
protocol_factory should instantiate object with BaseProtocol interface.
Pipe is file-like object already switched to nonblocking.
Return pair (transport, protocol), where transport support
WriteTransport interface.
"""
@abstractmethod
async def subprocess_shell(
self,
@ -600,15 +998,41 @@ class AbstractEventLoop:
@abstractmethod
def set_debug(self, enabled: bool) -> None: ...
@abstractmethod
async def shutdown_default_executor(self) -> None: ...
async def shutdown_default_executor(self) -> None:
"""
Schedule the shutdown of the default executor.
"""
class _AbstractEventLoopPolicy:
"""
Abstract policy for accessing the event loop.
"""
@abstractmethod
def get_event_loop(self) -> AbstractEventLoop: ...
def get_event_loop(self) -> AbstractEventLoop:
"""
Get the event loop for the current context.
Returns an event loop object implementing the AbstractEventLoop interface,
or raises an exception in case no event loop has been set for the
current context and the current policy does not specify to create one.
It should never return None.
"""
@abstractmethod
def set_event_loop(self, loop: AbstractEventLoop | None) -> None: ...
def set_event_loop(self, loop: AbstractEventLoop | None) -> None:
"""
Set the event loop for the current context to loop.
"""
@abstractmethod
def new_event_loop(self) -> AbstractEventLoop: ...
def new_event_loop(self) -> AbstractEventLoop:
"""
Create and return a new event loop object according to this
policy's rules. If there's need to set this loop as the event loop for
the current context, set_event_loop must be called explicitly.
"""
# Child processes handling (Unix only).
if sys.version_info < (3, 14):
if sys.version_info >= (3, 12):
@ -629,38 +1053,137 @@ if sys.version_info < (3, 14):
if sys.version_info >= (3, 14):
class _BaseDefaultEventLoopPolicy(_AbstractEventLoopPolicy, metaclass=ABCMeta):
def get_event_loop(self) -> AbstractEventLoop: ...
def set_event_loop(self, loop: AbstractEventLoop | None) -> None: ...
def new_event_loop(self) -> AbstractEventLoop: ...
"""
Default policy implementation for accessing the event loop.
In this policy, each thread has its own event loop. However, we
only automatically create an event loop by default for the main
thread; other threads by default have no event loop.
Other policies may have different rules (e.g. a single global
event loop, or automatically creating an event loop per thread, or
using some other notion of context to which an event loop is
associated).
"""
def get_event_loop(self) -> AbstractEventLoop:
"""
Get the event loop for the current context.
Returns an instance of EventLoop or raises an exception.
"""
def set_event_loop(self, loop: AbstractEventLoop | None) -> None:
"""
Set the event loop.
"""
def new_event_loop(self) -> AbstractEventLoop:
"""
Create a new event loop.
You must call set_event_loop() to make this the current event
loop.
"""
else:
class BaseDefaultEventLoopPolicy(_AbstractEventLoopPolicy, metaclass=ABCMeta):
def get_event_loop(self) -> AbstractEventLoop: ...
def set_event_loop(self, loop: AbstractEventLoop | None) -> None: ...
def new_event_loop(self) -> AbstractEventLoop: ...
"""
Default policy implementation for accessing the event loop.
In this policy, each thread has its own event loop. However, we
only automatically create an event loop by default for the main
thread; other threads by default have no event loop.
Other policies may have different rules (e.g. a single global
event loop, or automatically creating an event loop per thread, or
using some other notion of context to which an event loop is
associated).
"""
def get_event_loop(self) -> AbstractEventLoop:
"""
Get the event loop for the current context.
Returns an instance of EventLoop or raises an exception.
"""
def set_event_loop(self, loop: AbstractEventLoop | None) -> None:
"""
Set the event loop.
"""
def new_event_loop(self) -> AbstractEventLoop:
"""
Create a new event loop.
You must call set_event_loop() to make this the current event
loop.
"""
if sys.version_info >= (3, 14):
def _get_event_loop_policy() -> _AbstractEventLoopPolicy: ...
def _set_event_loop_policy(policy: _AbstractEventLoopPolicy | None) -> None: ...
def _get_event_loop_policy() -> _AbstractEventLoopPolicy:
"""
Get the current event loop policy.
"""
def _set_event_loop_policy(policy: _AbstractEventLoopPolicy | None) -> None:
"""
Set the current event loop policy.
If policy is None, the default policy is restored.
"""
@deprecated("Deprecated as of Python 3.14; will be removed in Python 3.16")
def get_event_loop_policy() -> _AbstractEventLoopPolicy: ...
@deprecated("Deprecated as of Python 3.14; will be removed in Python 3.16")
def set_event_loop_policy(policy: _AbstractEventLoopPolicy | None) -> None: ...
else:
def get_event_loop_policy() -> _AbstractEventLoopPolicy: ...
def set_event_loop_policy(policy: _AbstractEventLoopPolicy | None) -> None: ...
def get_event_loop_policy() -> _AbstractEventLoopPolicy:
"""
Get the current event loop policy.
"""
def set_event_loop(loop: AbstractEventLoop | None) -> None: ...
def new_event_loop() -> AbstractEventLoop: ...
def set_event_loop_policy(policy: _AbstractEventLoopPolicy | None) -> None:
"""
Set the current event loop policy.
If policy is None, the default policy is restored.
"""
def set_event_loop(loop: AbstractEventLoop | None) -> None:
"""
Equivalent to calling get_event_loop_policy().set_event_loop(loop).
"""
def new_event_loop() -> AbstractEventLoop:
"""
Equivalent to calling get_event_loop_policy().new_event_loop().
"""
if sys.version_info < (3, 14):
if sys.version_info >= (3, 12):
@deprecated("Deprecated as of Python 3.12; will be removed in Python 3.14")
def get_child_watcher() -> AbstractChildWatcher: ...
@deprecated("Deprecated as of Python 3.12; will be removed in Python 3.14")
def set_child_watcher(watcher: AbstractChildWatcher) -> None: ...
def get_child_watcher() -> AbstractChildWatcher:
"""
Equivalent to calling get_event_loop_policy().get_child_watcher().
"""
@deprecated("Deprecated as of Python 3.12; will be removed in Python 3.14")
def set_child_watcher(watcher: AbstractChildWatcher) -> None:
"""
Equivalent to calling
get_event_loop_policy().set_child_watcher(watcher).
"""
else:
def get_child_watcher() -> AbstractChildWatcher: ...
def set_child_watcher(watcher: AbstractChildWatcher) -> None: ...
def get_child_watcher() -> AbstractChildWatcher:
"""
Equivalent to calling get_event_loop_policy().get_child_watcher().
"""
def set_child_watcher(watcher: AbstractChildWatcher) -> None:
"""
Equivalent to calling
get_event_loop_policy().set_child_watcher(watcher).
"""

View file

@ -1,3 +1,7 @@
"""
asyncio exceptions.
"""
import sys
# Keep asyncio.__all__ updated with any changes to __all__ here
@ -21,24 +25,57 @@ else:
"SendfileNotAvailableError",
)
class CancelledError(BaseException): ...
class CancelledError(BaseException):
"""
The Future or Task was cancelled.
"""
if sys.version_info >= (3, 11):
from builtins import TimeoutError as TimeoutError
else:
class TimeoutError(Exception): ...
class TimeoutError(Exception):
"""
The operation exceeded the given deadline.
"""
class InvalidStateError(Exception): ...
class SendfileNotAvailableError(RuntimeError): ...
class InvalidStateError(Exception):
"""
The operation is not allowed in this state.
"""
class SendfileNotAvailableError(RuntimeError):
"""
Sendfile syscall is not available.
Raised if OS does not support sendfile syscall for given socket or
file type.
"""
class IncompleteReadError(EOFError):
"""
Incomplete read error. Attributes:
- partial: read bytes string before the end of stream was reached
- expected: total number of expected bytes (or None if unknown)
"""
expected: int | None
partial: bytes
def __init__(self, partial: bytes, expected: int | None) -> None: ...
class LimitOverrunError(Exception):
"""
Reached the buffer limit while looking for a separator.
Attributes:
- consumed: total number of to be consumed bytes.
"""
consumed: int
def __init__(self, message: str, consumed: int) -> None: ...
if sys.version_info >= (3, 11):
class BrokenBarrierError(RuntimeError): ...
class BrokenBarrierError(RuntimeError):
"""
Barrier is broken by barrier.abort() call.
"""

View file

@ -18,14 +18,34 @@ def _get_function_source(func: object) -> tuple[str, int] | None: ...
if sys.version_info >= (3, 13):
def _format_callback_source(func: object, args: Iterable[Any], *, debug: bool = False) -> str: ...
def _format_args_and_kwargs(args: Iterable[Any], kwargs: dict[str, Any], *, debug: bool = False) -> str: ...
def _format_args_and_kwargs(args: Iterable[Any], kwargs: dict[str, Any], *, debug: bool = False) -> str:
"""
Format function arguments and keyword arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello').
Note that this function only returns argument details when
debug=True is specified, as arguments may contain sensitive
information.
"""
def _format_callback(
func: object, args: Iterable[Any], kwargs: dict[str, Any], *, debug: bool = False, suffix: str = ""
) -> str: ...
else:
def _format_callback_source(func: object, args: Iterable[Any]) -> str: ...
def _format_args_and_kwargs(args: Iterable[Any], kwargs: dict[str, Any]) -> str: ...
def _format_args_and_kwargs(args: Iterable[Any], kwargs: dict[str, Any]) -> str:
"""
Format function arguments and keyword arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello').
"""
def _format_callback(func: object, args: Iterable[Any], kwargs: dict[str, Any], suffix: str = "") -> str: ...
def extract_stack(f: FrameType | None = None, limit: int | None = None) -> traceback.StackSummary: ...
def extract_stack(f: FrameType | None = None, limit: int | None = None) -> traceback.StackSummary:
"""
Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode.
"""

View file

@ -1,3 +1,7 @@
"""
A Future class similar to the one in PEP 3148.
"""
import sys
from _asyncio import Future as Future
from concurrent.futures._base import Future as _ConcurrentFuture
@ -19,5 +23,16 @@ _T = TypeVar("_T")
# asyncio defines 'isfuture()' in base_futures.py and re-imports it in futures.py
# but it leads to circular import error in pytype tool.
# That's why the import order is reversed.
def isfuture(obj: object) -> TypeIs[Future[Any]]: ...
def wrap_future(future: _ConcurrentFuture[_T] | Future[_T], *, loop: AbstractEventLoop | None = None) -> Future[_T]: ...
def isfuture(obj: object) -> TypeIs[Future[Any]]:
"""
Check for a Future.
This returns True when obj is a Future instance or is advertising
itself as duck-type compatible by setting _asyncio_future_blocking.
See comment in Future for more details.
"""
def wrap_future(future: _ConcurrentFuture[_T] | Future[_T], *, loop: AbstractEventLoop | None = None) -> Future[_T]:
"""
Wrap concurrent.futures.Future object.
"""

View file

@ -1,3 +1,7 @@
"""
Introspection utils for tasks call graphs.
"""
from _typeshed import SupportsWrite
from asyncio import Future
from dataclasses import dataclass
@ -8,19 +12,71 @@ __all__ = ("capture_call_graph", "format_call_graph", "print_call_graph", "Frame
@dataclass(frozen=True)
class FrameCallGraphEntry:
"""
FrameCallGraphEntry(frame: frame)
"""
frame: FrameType
@dataclass(frozen=True)
class FutureCallGraph:
"""
FutureCallGraph(future: _asyncio.Future, call_stack: tuple['FrameCallGraphEntry', ...], awaited_by: tuple['FutureCallGraph', ...])
"""
future: Future[Any]
call_stack: tuple[FrameCallGraphEntry, ...]
awaited_by: tuple[FutureCallGraph, ...]
@overload
def capture_call_graph(future: None = None, /, *, depth: int = 1, limit: int | None = None) -> FutureCallGraph | None: ...
def capture_call_graph(future: None = None, /, *, depth: int = 1, limit: int | None = None) -> FutureCallGraph | None:
"""
Capture the async call graph for the current task or the provided Future.
The graph is represented with three data structures:
* FutureCallGraph(future, call_stack, awaited_by)
Where 'future' is an instance of asyncio.Future or asyncio.Task.
'call_stack' is a tuple of FrameGraphEntry objects.
'awaited_by' is a tuple of FutureCallGraph objects.
* FrameCallGraphEntry(frame)
Where 'frame' is a frame object of a regular Python function
in the call stack.
Receives an optional 'future' argument. If not passed,
the current task will be used. If there's no current task, the function
returns None.
If "capture_call_graph()" is introspecting *the current task*, the
optional keyword-only 'depth' argument can be used to skip the specified
number of frames from top of the stack.
If the optional keyword-only 'limit' argument is provided, each call stack
in the resulting graph is truncated to include at most ``abs(limit)``
entries. If 'limit' is positive, the entries left are the closest to
the invocation point. If 'limit' is negative, the topmost entries are
left. If 'limit' is omitted or None, all entries are present.
If 'limit' is 0, the call stack is not captured at all, only
"awaited by" information is present.
"""
@overload
def capture_call_graph(future: Future[Any], /, *, depth: int = 1, limit: int | None = None) -> FutureCallGraph | None: ...
def format_call_graph(future: Future[Any] | None = None, /, *, depth: int = 1, limit: int | None = None) -> str: ...
def format_call_graph(future: Future[Any] | None = None, /, *, depth: int = 1, limit: int | None = None) -> str:
"""
Return the async call graph as a string for `future`.
If `future` is not provided, format the call graph for the current task.
"""
def print_call_graph(
future: Future[Any] | None = None, /, *, file: SupportsWrite[str] | None = None, depth: int = 1, limit: int | None = None
) -> None: ...
) -> None:
"""
Print the async call graph for the current task or the provided Future.
"""

View file

@ -1,3 +1,7 @@
"""
Synchronization primitives.
"""
import enum
import sys
from _typeshed import Unused
@ -30,29 +34,144 @@ class _ContextManagerMixin:
) -> None: ...
class Lock(_ContextManagerMixin, _LoopBoundMixin):
"""
Primitive lock objects.
A primitive lock is a synchronization primitive that is not owned
by a particular task when locked. A primitive lock is in one
of two states, 'locked' or 'unlocked'.
It is created in the unlocked state. It has two basic methods,
acquire() and release(). When the state is unlocked, acquire()
changes the state to locked and returns immediately. When the
state is locked, acquire() blocks until a call to release() in
another task changes it to unlocked, then the acquire() call
resets it to locked and returns. The release() method should only
be called in the locked state; it changes the state to unlocked
and returns immediately. If an attempt is made to release an
unlocked lock, a RuntimeError will be raised.
When more than one task is blocked in acquire() waiting for
the state to turn to unlocked, only one task proceeds when a
release() call resets the state to unlocked; successive release()
calls will unblock tasks in FIFO order.
Locks also support the asynchronous context management protocol.
'async with lock' statement should be used.
Usage:
lock = Lock()
...
await lock.acquire()
try:
...
finally:
lock.release()
Context manager usage:
lock = Lock()
...
async with lock:
...
Lock objects can be tested for locking state:
if not lock.locked():
await lock.acquire()
else:
# lock is acquired
...
"""
_waiters: deque[Future[Any]] | None
if sys.version_info >= (3, 10):
def __init__(self) -> None: ...
else:
def __init__(self, *, loop: AbstractEventLoop | None = None) -> None: ...
def locked(self) -> bool: ...
async def acquire(self) -> Literal[True]: ...
def release(self) -> None: ...
def locked(self) -> bool:
"""
Return True if lock is acquired.
"""
async def acquire(self) -> Literal[True]:
"""
Acquire a lock.
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
"""
def release(self) -> None:
"""
Release a lock.
When the lock is locked, reset it to unlocked, and return.
If any other tasks are blocked waiting for the lock to become
unlocked, allow exactly one of them to proceed.
When invoked on an unlocked lock, a RuntimeError is raised.
There is no return value.
"""
class Event(_LoopBoundMixin):
"""
Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
to true with the set() method and reset to false with the clear() method.
The wait() method blocks until the flag is true. The flag is initially
false.
"""
_waiters: deque[Future[Any]]
if sys.version_info >= (3, 10):
def __init__(self) -> None: ...
else:
def __init__(self, *, loop: AbstractEventLoop | None = None) -> None: ...
def is_set(self) -> bool: ...
def set(self) -> None: ...
def clear(self) -> None: ...
async def wait(self) -> Literal[True]: ...
def is_set(self) -> bool:
"""
Return True if and only if the internal flag is true.
"""
def set(self) -> None:
"""
Set the internal flag to true. All tasks waiting for it to
become true are awakened. Tasks that call wait() once the flag is
true will not block at all.
"""
def clear(self) -> None:
"""
Reset the internal flag to false. Subsequently, tasks calling
wait() will block until set() is called to set the internal flag
to true again.
"""
async def wait(self) -> Literal[True]:
"""
Block until the internal flag is true.
If the internal flag is true on entry, return True
immediately. Otherwise, block until another task calls
set() to set the flag to true, then return True.
"""
class Condition(_ContextManagerMixin, _LoopBoundMixin):
"""
Asynchronous equivalent to threading.Condition.
This class implements condition variable objects. A condition variable
allows one or more tasks to wait until they are notified by another
task.
A new Lock object is created and used as the underlying lock.
"""
_waiters: deque[Future[Any]]
if sys.version_info >= (3, 10):
def __init__(self, lock: Lock | None = None) -> None: ...
@ -62,12 +181,71 @@ class Condition(_ContextManagerMixin, _LoopBoundMixin):
def locked(self) -> bool: ...
async def acquire(self) -> Literal[True]: ...
def release(self) -> None: ...
async def wait(self) -> Literal[True]: ...
async def wait_for(self, predicate: Callable[[], _T]) -> _T: ...
def notify(self, n: int = 1) -> None: ...
def notify_all(self) -> None: ...
async def wait(self) -> Literal[True]:
"""
Wait until notified.
If the calling task has not acquired the lock when this
method is called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks
until it is awakened by a notify() or notify_all() call for
the same condition variable in another task. Once
awakened, it re-acquires the lock and returns True.
This method may return spuriously,
which is why the caller should always
re-check the state and be prepared to wait() again.
"""
async def wait_for(self, predicate: Callable[[], _T]) -> _T:
"""
Wait until a predicate becomes true.
The predicate should be a callable whose result will be
interpreted as a boolean value. The method will repeatedly
wait() until it evaluates to true. The final predicate value is
the return value.
"""
def notify(self, n: int = 1) -> None:
"""
By default, wake up one task waiting on this condition, if any.
If the calling task has not acquired the lock when this method
is called, a RuntimeError is raised.
This method wakes up n of the tasks waiting for the condition
variable; if fewer than n are waiting, they are all awoken.
Note: an awakened task does not actually return from its
wait() call until it can reacquire the lock. Since notify() does
not release the lock, its caller should.
"""
def notify_all(self) -> None:
"""
Wake up all tasks waiting on this condition. This method acts
like notify(), but wakes up all waiting tasks instead of one. If the
calling task has not acquired the lock when this method is called,
a RuntimeError is raised.
"""
class Semaphore(_ContextManagerMixin, _LoopBoundMixin):
"""
A Semaphore implementation.
A semaphore manages an internal counter which is decremented by each
acquire() call and incremented by each release() call. The counter
can never go below zero; when acquire() finds that it is zero, it blocks,
waiting until some other thread calls release().
Semaphores also support the context management protocol.
The optional argument gives the initial value for the internal
counter; it defaults to 1. If the value given is less than 0,
ValueError is raised.
"""
_value: int
_waiters: deque[Future[Any]] | None
if sys.version_info >= (3, 10):
@ -75,12 +253,42 @@ class Semaphore(_ContextManagerMixin, _LoopBoundMixin):
else:
def __init__(self, value: int = 1, *, loop: AbstractEventLoop | None = None) -> None: ...
def locked(self) -> bool: ...
async def acquire(self) -> Literal[True]: ...
def release(self) -> None: ...
def _wake_up_next(self) -> None: ...
def locked(self) -> bool:
"""
Returns True if semaphore cannot be acquired immediately.
"""
class BoundedSemaphore(Semaphore): ...
async def acquire(self) -> Literal[True]:
"""
Acquire a semaphore.
If the internal counter is larger than zero on entry,
decrement it by one and return True immediately. If it is
zero on entry, block, waiting until some other task has
called release() to make it larger than 0, and then return
True.
"""
def release(self) -> None:
"""
Release a semaphore, incrementing the internal counter by one.
When it was zero on entry and another task is waiting for it to
become larger than zero again, wake up that task.
"""
def _wake_up_next(self) -> None:
"""
Wake up the first waiter that isn't done.
"""
class BoundedSemaphore(Semaphore):
"""
A bounded semaphore implementation.
This raises ValueError in release() if it would increase the value
above the initial value.
"""
if sys.version_info >= (3, 11):
class _BarrierState(enum.Enum): # undocumented
@ -90,15 +298,61 @@ if sys.version_info >= (3, 11):
BROKEN = "broken"
class Barrier(_LoopBoundMixin):
def __init__(self, parties: int) -> None: ...
"""
Asyncio equivalent to threading.Barrier
Implements a Barrier primitive.
Useful for synchronizing a fixed number of tasks at known synchronization
points. Tasks block on 'wait()' and are simultaneously awoken once they
have all made their call.
"""
def __init__(self, parties: int) -> None:
"""
Create a barrier, initialised to 'parties' tasks.
"""
async def __aenter__(self) -> Self: ...
async def __aexit__(self, *args: Unused) -> None: ...
async def wait(self) -> int: ...
async def abort(self) -> None: ...
async def reset(self) -> None: ...
async def wait(self) -> int:
"""
Wait for the barrier.
When the specified number of tasks have started waiting, they are all
simultaneously awoken.
Returns an unique and individual index number from 0 to 'parties-1'.
"""
async def abort(self) -> None:
"""
Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting tasks and tasks
attempting to 'wait()' will have BrokenBarrierError raised.
"""
async def reset(self) -> None:
"""
Reset the barrier to the initial state.
Any tasks currently waiting will get the BrokenBarrier exception
raised.
"""
@property
def parties(self) -> int: ...
def parties(self) -> int:
"""
Return the number of tasks required to trip the barrier.
"""
@property
def n_waiting(self) -> int: ...
def n_waiting(self) -> int:
"""
Return the number of tasks currently waiting at the barrier.
"""
@property
def broken(self) -> bool: ...
def broken(self) -> bool:
"""
Return True if the barrier is in a broken state.
"""

View file

@ -1,3 +1,7 @@
"""
Logging configuration.
"""
import logging
logger: logging.Logger

View file

@ -1,3 +1,7 @@
"""
Event loop mixins.
"""
import sys
import threading
from typing_extensions import Never

View file

@ -1,3 +1,10 @@
"""
Event loop using a proactor and related classes.
A proactor is a "notify-on-completion" multiplexer. Currently a
proactor is only implemented on Windows with IOCP.
"""
import sys
from collections.abc import Mapping
from socket import socket
@ -8,6 +15,10 @@ from . import base_events, constants, events, futures, streams, transports
__all__ = ("BaseProactorEventLoop",)
class _ProactorBasePipeTransport(transports._FlowControlMixin, transports.BaseTransport):
"""
Base class for pipe and socket transports.
"""
def __init__(
self,
loop: events.AbstractEventLoop,
@ -20,6 +31,10 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin, transports.BaseTr
def __del__(self) -> None: ...
class _ProactorReadPipeTransport(_ProactorBasePipeTransport, transports.ReadTransport):
"""
Transport for read pipes.
"""
if sys.version_info >= (3, 10):
def __init__(
self,
@ -42,11 +57,23 @@ class _ProactorReadPipeTransport(_ProactorBasePipeTransport, transports.ReadTran
server: events.AbstractServer | None = None,
) -> None: ...
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport, transports.WriteTransport): ...
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport, transports.WriteTransport):
"""
Transport for write pipes.
"""
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport): ...
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport, _ProactorBaseWritePipeTransport, transports.Transport): ...
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport, _ProactorBaseWritePipeTransport, transports.Transport):
"""
Transport for duplex pipes.
"""
class _ProactorSocketTransport(_ProactorReadPipeTransport, _ProactorBaseWritePipeTransport, transports.Transport):
"""
Transport for connected sockets.
"""
_sendfile_compatible: ClassVar[constants._SendfileMode]
def __init__(
self,

View file

@ -1,3 +1,7 @@
"""
Abstract Protocol base classes.
"""
from _typeshed import ReadableBuffer
from asyncio import transports
from typing import Any
@ -6,30 +10,212 @@ from typing import Any
__all__ = ("BaseProtocol", "Protocol", "DatagramProtocol", "SubprocessProtocol", "BufferedProtocol")
class BaseProtocol:
def connection_made(self, transport: transports.BaseTransport) -> None: ...
def connection_lost(self, exc: Exception | None) -> None: ...
def pause_writing(self) -> None: ...
def resume_writing(self) -> None: ...
"""
Common base class for protocol interfaces.
Usually user implements protocols that derived from BaseProtocol
like Protocol or ProcessProtocol.
The only case when BaseProtocol should be implemented directly is
write-only transport like write pipe
"""
def connection_made(self, transport: transports.BaseTransport) -> None:
"""
Called when a connection is made.
The argument is the transport representing the pipe connection.
To receive data, wait for data_received() calls.
When the connection is closed, connection_lost() is called.
"""
def connection_lost(self, exc: Exception | None) -> None:
"""
Called when the connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
def pause_writing(self) -> None:
"""
Called when the transport's buffer goes over the high-water mark.
Pause and resume calls are paired -- pause_writing() is called
once when the buffer goes strictly over the high-water mark
(even if subsequent writes increases the buffer size even
more), and eventually resume_writing() is called once when the
buffer size reaches the low-water mark.
Note that if the buffer size equals the high-water mark,
pause_writing() is not called -- it must go strictly over.
Conversely, resume_writing() is called when the buffer size is
equal or lower than the low-water mark. These end conditions
are important to ensure that things go as expected when either
mark is zero.
NOTE: This is the only Protocol callback that is not called
through EventLoop.call_soon() -- if it were, it would have no
effect when it's most needed (when the app keeps writing
without yielding until pause_writing() is called).
"""
def resume_writing(self) -> None:
"""
Called when the transport's buffer drains below the low-water mark.
See pause_writing() for details.
"""
class Protocol(BaseProtocol):
def data_received(self, data: bytes) -> None: ...
def eof_received(self) -> bool | None: ...
"""
Interface for stream protocol.
The user should implement this interface. They can inherit from
this class but don't need to. The implementations here do
nothing (they don't raise exceptions).
When the user wants to requests a transport, they pass a protocol
factory to a utility function (e.g., EventLoop.create_connection()).
When the connection is made successfully, connection_made() is
called with a suitable transport object. Then data_received()
will be called 0 or more times with data (bytes) received from the
transport; finally, connection_lost() will be called exactly once
with either an exception object or None as an argument.
State machine of calls:
start -> CM [-> DR*] [-> ER?] -> CL -> end
* CM: connection_made()
* DR: data_received()
* ER: eof_received()
* CL: connection_lost()
"""
def data_received(self, data: bytes) -> None:
"""
Called when some data is received.
The argument is a bytes object.
"""
def eof_received(self) -> bool | None:
"""
Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
class BufferedProtocol(BaseProtocol):
def get_buffer(self, sizehint: int) -> ReadableBuffer: ...
def buffer_updated(self, nbytes: int) -> None: ...
def eof_received(self) -> bool | None: ...
"""
Interface for stream protocol with manual buffer control.
Event methods, such as `create_server` and `create_connection`,
accept factories that return protocols that implement this interface.
The idea of BufferedProtocol is that it allows to manually allocate
and control the receive buffer. Event loops can then use the buffer
provided by the protocol to avoid unnecessary data copies. This
can result in noticeable performance improvement for protocols that
receive big amounts of data. Sophisticated protocols can allocate
the buffer only once at creation time.
State machine of calls:
start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end
* CM: connection_made()
* GB: get_buffer()
* BU: buffer_updated()
* ER: eof_received()
* CL: connection_lost()
"""
def get_buffer(self, sizehint: int) -> ReadableBuffer:
"""
Called to allocate a new receive buffer.
*sizehint* is a recommended minimal size for the returned
buffer. When set to -1, the buffer size can be arbitrary.
Must return an object that implements the
:ref:`buffer protocol <bufferobjects>`.
It is an error to return a zero-sized buffer.
"""
def buffer_updated(self, nbytes: int) -> None:
"""
Called when the buffer was updated with the received data.
*nbytes* is the total number of bytes that were written to
the buffer.
"""
def eof_received(self) -> bool | None:
"""
Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
class DatagramProtocol(BaseProtocol):
def connection_made(self, transport: transports.DatagramTransport) -> None: ... # type: ignore[override]
"""
Interface for datagram protocol.
"""
def connection_made(self, transport: transports.DatagramTransport) -> None: # type: ignore[override]
"""
Called when a connection is made.
The argument is the transport representing the pipe connection.
To receive data, wait for data_received() calls.
When the connection is closed, connection_lost() is called.
"""
# addr can be a tuple[int, int] for some unusual protocols like socket.AF_NETLINK.
# Use tuple[str | Any, int] to not cause typechecking issues on most usual cases.
# This could be improved by using tuple[AnyOf[str, int], int] if the AnyOf feature is accepted.
# See https://github.com/python/typing/issues/566
def datagram_received(self, data: bytes, addr: tuple[str | Any, int]) -> None: ...
def error_received(self, exc: Exception) -> None: ...
def datagram_received(self, data: bytes, addr: tuple[str | Any, int]) -> None:
"""
Called when some datagram is received.
"""
def error_received(self, exc: Exception) -> None:
"""
Called when a send or receive operation raises an OSError.
(Other than BlockingIOError or InterruptedError.)
"""
class SubprocessProtocol(BaseProtocol):
def pipe_data_received(self, fd: int, data: bytes) -> None: ...
def pipe_connection_lost(self, fd: int, exc: Exception | None) -> None: ...
def process_exited(self) -> None: ...
"""
Interface for protocol for subprocess calls.
"""
def pipe_data_received(self, fd: int, data: bytes) -> None:
"""
Called when the subprocess writes data into stdout/stderr pipe.
fd is int file descriptor.
data is bytes object.
"""
def pipe_connection_lost(self, fd: int, exc: Exception | None) -> None:
"""
Called when a file descriptor associated with the child process is
closed.
fd is the int file descriptor that was closed.
"""
def process_exited(self) -> None:
"""
Called when subprocess has exited.
"""

View file

@ -8,8 +8,15 @@ if sys.version_info >= (3, 10):
else:
_LoopBoundMixin = object
class QueueEmpty(Exception): ...
class QueueFull(Exception): ...
class QueueEmpty(Exception):
"""
Raised when Queue.get_nowait() is called on an empty Queue.
"""
class QueueFull(Exception):
"""
Raised when the Queue.put_nowait() method is called on a full Queue.
"""
# Keep asyncio.__all__ updated with any changes to __all__ here
if sys.version_info >= (3, 13):
@ -21,11 +28,26 @@ else:
_T = TypeVar("_T")
if sys.version_info >= (3, 13):
class QueueShutDown(Exception): ...
class QueueShutDown(Exception):
"""
Raised when putting on to or getting from a shut-down Queue.
"""
# If Generic[_T] is last and _LoopBoundMixin is object, pyright is unhappy.
# We can remove the noqa pragma when dropping 3.9 support.
class Queue(Generic[_T], _LoopBoundMixin): # noqa: Y059
"""
A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "await put()" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
with qsize(), since your single-threaded asyncio application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
if sys.version_info >= (3, 10):
def __init__(self, maxsize: int = 0) -> None: ...
else:
@ -35,20 +57,125 @@ class Queue(Generic[_T], _LoopBoundMixin): # noqa: Y059
def _get(self) -> _T: ...
def _put(self, item: _T) -> None: ...
def _format(self) -> str: ...
def qsize(self) -> int: ...
@property
def maxsize(self) -> int: ...
def empty(self) -> bool: ...
def full(self) -> bool: ...
async def put(self, item: _T) -> None: ...
def put_nowait(self, item: _T) -> None: ...
async def get(self) -> _T: ...
def get_nowait(self) -> _T: ...
async def join(self) -> None: ...
def task_done(self) -> None: ...
def __class_getitem__(cls, type: Any, /) -> GenericAlias: ...
if sys.version_info >= (3, 13):
def shutdown(self, immediate: bool = False) -> None: ...
def qsize(self) -> int:
"""
Number of items in the queue.
"""
class PriorityQueue(Queue[_T]): ...
class LifoQueue(Queue[_T]): ...
@property
def maxsize(self) -> int:
"""
Number of items allowed in the queue.
"""
def empty(self) -> bool:
"""
Return True if the queue is empty, False otherwise.
"""
def full(self) -> bool:
"""
Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
async def put(self, item: _T) -> None:
"""
Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
Raises QueueShutDown if the queue has been shut down.
"""
def put_nowait(self, item: _T) -> None:
"""
Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
Raises QueueShutDown if the queue has been shut down.
"""
async def get(self) -> _T:
"""
Remove and return an item from the queue.
If queue is empty, wait until an item is available.
Raises QueueShutDown if the queue has been shut down and is empty, or
if the queue has been shut down immediately.
"""
def get_nowait(self) -> _T:
"""
Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
Raises QueueShutDown if the queue has been shut down and is empty, or
if the queue has been shut down immediately.
"""
async def join(self) -> None:
"""
Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
def task_done(self) -> None:
"""
Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
shutdown(immediate=True) calls task_done() for each remaining item in
the queue.
Raises ValueError if called more times than there were items placed in
the queue.
"""
def __class_getitem__(cls, type: Any, /) -> GenericAlias:
"""
Represent a PEP 585 generic type
E.g. for t = list[int], t.__origin__ is list and t.__args__ is (int,).
"""
if sys.version_info >= (3, 13):
def shutdown(self, immediate: bool = False) -> None:
"""
Shut-down the queue, making queue gets and puts raise QueueShutDown.
By default, gets will only raise once the queue is empty. Set
'immediate' to True to make gets raise immediately instead.
All blocked callers of put() and get() will be unblocked. If
'immediate', a task is marked as done for each item remaining in
the queue, which may unblock callers of join().
"""
class PriorityQueue(Queue[_T]):
"""
A subclass of Queue; retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
"""
class LifoQueue(Queue[_T]):
"""
A subclass of Queue that retrieves most recently added entries first.
"""

View file

@ -17,17 +17,106 @@ _T = TypeVar("_T")
if sys.version_info >= (3, 11):
@final
class Runner:
"""
A context manager that controls event loop life cycle.
The context manager always creates a new event loop,
allows to run async functions inside it,
and properly finalizes the loop at the context manager exit.
If debug is True, the event loop will be run in debug mode.
If loop_factory is passed, it is used for new event loop creation.
asyncio.run(main(), debug=True)
is a shortcut for
with asyncio.Runner(debug=True) as runner:
runner.run(main())
The run() method can be called multiple times within the runner's context.
This can be useful for interactive console (e.g. IPython),
unittest runners, console tools, -- everywhere when async code
is called from existing sync framework and where the preferred single
asyncio.run() call doesn't work.
"""
def __init__(self, *, debug: bool | None = None, loop_factory: Callable[[], AbstractEventLoop] | None = None) -> None: ...
def __enter__(self) -> Self: ...
def __exit__(self, exc_type: Unused, exc_val: Unused, exc_tb: Unused) -> None: ...
def close(self) -> None: ...
def get_loop(self) -> AbstractEventLoop: ...
def run(self, coro: Coroutine[Any, Any, _T], *, context: Context | None = None) -> _T: ...
def close(self) -> None:
"""
Shutdown and close event loop.
"""
def get_loop(self) -> AbstractEventLoop:
"""
Return embedded event loop.
"""
def run(self, coro: Coroutine[Any, Any, _T], *, context: Context | None = None) -> _T:
"""
Run code in the embedded event loop.
"""
if sys.version_info >= (3, 12):
def run(
main: Coroutine[Any, Any, _T], *, debug: bool | None = ..., loop_factory: Callable[[], AbstractEventLoop] | None = ...
) -> _T: ...
) -> _T:
"""
Execute the coroutine and return the result.
This function runs the passed coroutine, taking care of
managing the asyncio event loop, finalizing asynchronous
generators and closing the default executor.
This function cannot be called when another asyncio event loop is
running in the same thread.
If debug is True, the event loop will be run in debug mode.
If loop_factory is passed, it is used for new event loop creation.
This function always creates a new event loop and closes it at the end.
It should be used as a main entry point for asyncio programs, and should
ideally only be called once.
The executor is given a timeout duration of 5 minutes to shutdown.
If the executor hasn't finished within that duration, a warning is
emitted and the executor is closed.
Example:
async def main():
await asyncio.sleep(1)
print('hello')
asyncio.run(main())
"""
else:
def run(main: Coroutine[Any, Any, _T], *, debug: bool | None = None) -> _T: ...
def run(main: Coroutine[Any, Any, _T], *, debug: bool | None = None) -> _T:
"""
Execute the coroutine and return the result.
This function runs the passed coroutine, taking care of
managing the asyncio event loop and finalizing asynchronous
generators.
This function cannot be called when another asyncio event loop is
running in the same thread.
If debug is True, the event loop will be run in debug mode.
This function always creates a new event loop and closes it at the end.
It should be used as a main entry point for asyncio programs, and should
ideally only be called once.
Example:
async def main():
await asyncio.sleep(1)
print('hello')
asyncio.run(main())
"""

View file

@ -1,3 +1,10 @@
"""
Event loop using a selector and related classes.
A selector is a "notify-when-ready" multiplexer. For a subclass which
also includes support for signal handling, see the unix_events sub-module.
"""
import selectors
from socket import socket
@ -6,5 +13,18 @@ from . import base_events
__all__ = ("BaseSelectorEventLoop",)
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""
Selector event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector: selectors.BaseSelector | None = None) -> None: ...
async def sock_recv(self, sock: socket, n: int) -> bytes: ...
async def sock_recv(self, sock: socket, n: int) -> bytes:
"""
Receive data from the socket.
The return value is a bytes object representing the data received.
The maximum amount of data to be received at once is specified by
nbytes.
"""

View file

@ -36,6 +36,24 @@ else:
if sys.version_info < (3, 11):
class _SSLPipe:
"""
An SSL "Pipe".
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
through memory buffers. It can be used to implement a security layer for an
existing connection where you don't have access to the connection's file
descriptor, or for some reason you don't want to use it.
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
data is passed through untransformed. In wrapped mode, application level
data is encrypted to SSL record level data and vice versa. The SSL record
level is the lowest level in the SSL protocol suite and is what travels
as-is over the wire.
An SslPipe initially is in "unwrapped" mode. To start SSL, call
do_handshake(). To shutdown SSL again, call unwrap().
"""
max_size: ClassVar[int]
_context: ssl.SSLContext
@ -48,20 +66,110 @@ if sys.version_info < (3, 11):
_need_ssldata: bool
_handshake_cb: Callable[[BaseException | None], None] | None
_shutdown_cb: Callable[[], None] | None
def __init__(self, context: ssl.SSLContext, server_side: bool, server_hostname: str | None = None) -> None: ...
def __init__(self, context: ssl.SSLContext, server_side: bool, server_hostname: str | None = None) -> None:
"""
The *context* argument specifies the ssl.SSLContext to use.
The *server_side* argument indicates whether this is a server side or
client side transport.
The optional *server_hostname* argument can be used to specify the
hostname you are connecting to. You may only specify this parameter if
the _ssl module supports Server Name Indication (SNI).
"""
@property
def context(self) -> ssl.SSLContext: ...
def context(self) -> ssl.SSLContext:
"""
The SSL context passed to the constructor.
"""
@property
def ssl_object(self) -> ssl.SSLObject | None: ...
def ssl_object(self) -> ssl.SSLObject | None:
"""
The internal ssl.SSLObject instance.
Return None if the pipe is not wrapped.
"""
@property
def need_ssldata(self) -> bool: ...
def need_ssldata(self) -> bool:
"""
Whether more record level data is needed to complete a handshake
that is currently in progress.
"""
@property
def wrapped(self) -> bool: ...
def do_handshake(self, callback: Callable[[BaseException | None], object] | None = None) -> list[bytes]: ...
def shutdown(self, callback: Callable[[], object] | None = None) -> list[bytes]: ...
def feed_eof(self) -> None: ...
def feed_ssldata(self, data: bytes, only_handshake: bool = False) -> tuple[list[bytes], list[bytes]]: ...
def feed_appdata(self, data: bytes, offset: int = 0) -> tuple[list[bytes], int]: ...
def wrapped(self) -> bool:
"""
Whether a security layer is currently in effect.
Return False during handshake.
"""
def do_handshake(self, callback: Callable[[BaseException | None], object] | None = None) -> list[bytes]:
"""
Start the SSL handshake.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the handshake is complete. The callback will be
called with None if successful, else an exception instance.
"""
def shutdown(self, callback: Callable[[], object] | None = None) -> list[bytes]:
"""
Start the SSL shutdown sequence.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the shutdown is complete. The callback will be
called without arguments.
"""
def feed_eof(self) -> None:
"""
Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
def feed_ssldata(self, data: bytes, only_handshake: bool = False) -> tuple[list[bytes], list[bytes]]:
"""
Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling shutdown().
"""
def feed_appdata(self, data: bytes, offset: int = 0) -> tuple[list[bytes], int]:
"""
Feed plaintext data into the pipe.
Return an (ssldata, offset) tuple. The ssldata element is a list of
buffers containing record level data that needs to be sent to the
remote SSL instance. The offset is the number of plaintext bytes that
were processed, which may be less than the length of data.
NOTE: In case of short writes, this call MUST be retried with the SAME
buffer passed into the *data* argument (i.e. the id() must be the
same). This is an OpenSSL requirement. A further particularity is that
a short write will always have offset == 0, because the _ssl module
does not enable partial writes. And even though the offset is zero,
there will still be encrypted data in ssldata.
"""
class _SSLProtocolTransport(transports._FlowControlMixin, transports.Transport):
_sendfile_compatible: ClassVar[constants._SendfileMode]
@ -73,16 +181,59 @@ class _SSLProtocolTransport(transports._FlowControlMixin, transports.Transport):
_ssl_protocol: SSLProtocol
_closed: bool
def __init__(self, loop: events.AbstractEventLoop, ssl_protocol: SSLProtocol) -> None: ...
def get_extra_info(self, name: str, default: Any | None = None) -> dict[str, Any]: ...
def get_extra_info(self, name: str, default: Any | None = None) -> dict[str, Any]:
"""
Get optional transport information.
"""
@property
def _protocol_paused(self) -> bool: ...
def write(self, data: bytes | bytearray | memoryview[Any]) -> None: ... # any memoryview format or shape
def can_write_eof(self) -> Literal[False]: ...
def write(self, data: bytes | bytearray | memoryview[Any]) -> None: # any memoryview format or shape
"""
Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
def can_write_eof(self) -> Literal[False]:
"""
Return True if this transport supports write_eof(), False if not.
"""
if sys.version_info >= (3, 11):
def get_write_buffer_limits(self) -> tuple[int, int]: ...
def get_write_buffer_limits(self) -> tuple[int, int]:
"""
Get the high and low watermarks for write flow control.
Return a tuple (low, high) where low and high are
positive number of bytes.
"""
def get_read_buffer_limits(self) -> tuple[int, int]: ...
def set_read_buffer_limits(self, high: int | None = None, low: int | None = None) -> None: ...
def get_read_buffer_size(self) -> int: ...
def set_read_buffer_limits(self, high: int | None = None, low: int | None = None) -> None:
"""
Set the high- and low-water limits for read flow control.
These two values control when to call the upstream transport's
pause_reading() and resume_reading() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_reading() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_reading() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
def get_read_buffer_size(self) -> int:
"""
Return the current size of the read buffer.
"""
def __del__(self) -> None: ...
@ -92,6 +243,13 @@ else:
_SSLProtocolBase: TypeAlias = protocols.Protocol
class SSLProtocol(_SSLProtocolBase):
"""
SSL protocol.
Implementation of SSL on top of a socket using incoming and outgoing
buffers which are ssl.MemoryBIO objects.
"""
_server_side: bool
_server_hostname: str | None
_sslcontext: ssl.SSLContext
@ -143,8 +301,25 @@ class SSLProtocol(_SSLProtocolBase):
def _set_app_protocol(self, app_protocol: protocols.BaseProtocol) -> None: ...
def _wakeup_waiter(self, exc: BaseException | None = None) -> None: ...
def connection_lost(self, exc: BaseException | None) -> None: ...
def eof_received(self) -> None: ...
def connection_lost(self, exc: BaseException | None) -> None:
"""
Called when the low-level connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
def eof_received(self) -> None:
"""
Called when the other end of the low-level stream
is half-closed.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
def _get_extra_info(self, name: str, default: Any | None = None) -> Any: ...
def _start_shutdown(self) -> None: ...
if sys.version_info >= (3, 11):

View file

@ -1,3 +1,7 @@
"""
Support for running coroutines in parallel with staggered start times.
"""
from collections.abc import Awaitable, Callable, Iterable
from typing import Any
@ -7,4 +11,54 @@ __all__ = ("staggered_race",)
async def staggered_race(
coro_fns: Iterable[Callable[[], Awaitable[Any]]], delay: float | None, *, loop: events.AbstractEventLoop | None = None
) -> tuple[Any, int | None, list[Exception | None]]: ...
) -> tuple[Any, int | None, list[Exception | None]]:
"""
Run coroutines with staggered start times and take the first to finish.
This method takes an iterable of coroutine functions. The first one is
started immediately. From then on, whenever the immediately preceding one
fails (raises an exception), or when *delay* seconds has passed, the next
coroutine is started. This continues until one of the coroutines complete
successfully, in which case all others are cancelled, or until all
coroutines fail.
The coroutines provided should be well-behaved in the following way:
* They should only ``return`` if completed successfully.
* They should always raise an exception if they did not complete
successfully. In particular, if they handle cancellation, they should
probably reraise, like this::
try:
# do work
except asyncio.CancelledError:
# undo partially completed work
raise
Args:
coro_fns: an iterable of coroutine functions, i.e. callables that
return a coroutine object when called. Use ``functools.partial`` or
lambdas to pass arguments.
delay: amount of time, in seconds, between starting coroutines. If
``None``, the coroutines will run sequentially.
loop: the event loop to use.
Returns:
tuple *(winner_result, winner_index, exceptions)* where
- *winner_result*: the result of the winning coroutine, or ``None``
if no coroutines won.
- *winner_index*: the index of the winning coroutine in
``coro_fns``, or ``None`` if no coroutines won. If the winning
coroutine may return None on success, *winner_index* can be used
to definitively determine whether any coroutine won.
- *exceptions*: list of exceptions returned by the coroutines.
``len(exceptions)`` is equal to the number of coroutines actually
started, and the order is the same as in ``coro_fns``. The winning
coroutine's entry is ``None``.
"""

View file

@ -35,7 +35,26 @@ if sys.version_info >= (3, 10):
limit: int = 65536,
ssl_handshake_timeout: float | None = ...,
**kwds: Any,
) -> tuple[StreamReader, StreamWriter]: ...
) -> tuple[StreamReader, StreamWriter]:
"""
A wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.)
"""
async def start_server(
client_connected_cb: _ClientConnectedCallback,
host: str | Sequence[str] | None = None,
@ -44,7 +63,28 @@ if sys.version_info >= (3, 10):
limit: int = 65536,
ssl_handshake_timeout: float | None = ...,
**kwds: Any,
) -> Server: ...
) -> Server:
"""
Start a socket server, call back for each client connected.
The first parameter, `client_connected_cb`, takes two parameters:
client_reader, client_writer. client_reader is a StreamReader
object, while client_writer is a StreamWriter object. This
parameter can either be a plain callback function or a coroutine;
if it is a coroutine, it will be automatically converted into a
Task.
The rest of the arguments are all the usual arguments to
loop.create_server() except protocol_factory; most common are
positional host and port, with various optional keyword arguments
following. The return value is the same as loop.create_server().
Additional optional keyword argument is limit (to set the buffer
limit passed to the StreamReader).
The return value is the same as loop.create_server(), i.e. a
Server object which can be used to stop the service.
"""
else:
async def open_connection(
@ -55,7 +95,26 @@ else:
limit: int = 65536,
ssl_handshake_timeout: float | None = ...,
**kwds: Any,
) -> tuple[StreamReader, StreamWriter]: ...
) -> tuple[StreamReader, StreamWriter]:
"""
A wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.)
"""
async def start_server(
client_connected_cb: _ClientConnectedCallback,
host: str | None = None,
@ -65,20 +124,53 @@ else:
limit: int = 65536,
ssl_handshake_timeout: float | None = ...,
**kwds: Any,
) -> Server: ...
) -> Server:
"""
Start a socket server, call back for each client connected.
The first parameter, `client_connected_cb`, takes two parameters:
client_reader, client_writer. client_reader is a StreamReader
object, while client_writer is a StreamWriter object. This
parameter can either be a plain callback function or a coroutine;
if it is a coroutine, it will be automatically converted into a
Task.
The rest of the arguments are all the usual arguments to
loop.create_server() except protocol_factory; most common are
positional host and port, with various optional keyword arguments
following. The return value is the same as loop.create_server().
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
The return value is the same as loop.create_server(), i.e. a
Server object which can be used to stop the service.
"""
if sys.platform != "win32":
if sys.version_info >= (3, 10):
async def open_unix_connection(
path: StrPath | None = None, *, limit: int = 65536, **kwds: Any
) -> tuple[StreamReader, StreamWriter]: ...
) -> tuple[StreamReader, StreamWriter]:
"""
Similar to `open_connection` but works with UNIX Domain Sockets.
"""
async def start_unix_server(
client_connected_cb: _ClientConnectedCallback, path: StrPath | None = None, *, limit: int = 65536, **kwds: Any
) -> Server: ...
) -> Server:
"""
Similar to `start_server` but works with UNIX Domain Sockets.
"""
else:
async def open_unix_connection(
path: StrPath | None = None, *, loop: events.AbstractEventLoop | None = None, limit: int = 65536, **kwds: Any
) -> tuple[StreamReader, StreamWriter]: ...
) -> tuple[StreamReader, StreamWriter]:
"""
Similar to `open_connection` but works with UNIX Domain Sockets.
"""
async def start_unix_server(
client_connected_cb: _ClientConnectedCallback,
path: StrPath | None = None,
@ -86,12 +178,34 @@ if sys.platform != "win32":
loop: events.AbstractEventLoop | None = None,
limit: int = 65536,
**kwds: Any,
) -> Server: ...
) -> Server:
"""
Similar to `start_server` but works with UNIX Domain Sockets.
"""
class FlowControlMixin(protocols.Protocol):
"""
Reusable flow control logic for StreamWriter.drain().
This implements the protocol methods pause_writing(),
resume_writing() and connection_lost(). If the subclass overrides
these it must call the super methods.
StreamWriter.drain() must wait for _drain_helper() coroutine.
"""
def __init__(self, loop: events.AbstractEventLoop | None = None) -> None: ...
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
"""
Helper class to adapt between Protocol and StreamReader.
(This is a helper class instead of making StreamReader itself a
Protocol subclass, because the StreamReader has other potential
uses, and to prevent the user of the StreamReader to accidentally
call inappropriate methods of the protocol.)
"""
def __init__(
self,
stream_reader: StreamReader,
@ -101,6 +215,16 @@ class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
def __del__(self) -> None: ...
class StreamWriter:
"""
Wraps a Transport.
This exposes write(), writelines(), [can_]write_eof(),
get_extra_info() and close(). It adds drain() which returns an
optional Future on which you can wait for flow control. It also
adds a transport property which references the Transport
directly.
"""
def __init__(
self,
transport: transports.WriteTransport,
@ -118,7 +242,15 @@ class StreamWriter:
def is_closing(self) -> bool: ...
async def wait_closed(self) -> None: ...
def get_extra_info(self, name: str, default: Any = None) -> Any: ...
async def drain(self) -> None: ...
async def drain(self) -> None:
"""
Flush the write buffer.
The intended use is to write
w.write(data)
await w.drain()
"""
if sys.version_info >= (3, 12):
async def start_tls(
self,
@ -127,12 +259,17 @@ class StreamWriter:
server_hostname: str | None = None,
ssl_handshake_timeout: float | None = None,
ssl_shutdown_timeout: float | None = None,
) -> None: ...
) -> None:
"""
Upgrade an existing stream-based connection to TLS.
"""
elif sys.version_info >= (3, 11):
async def start_tls(
self, sslcontext: ssl.SSLContext, *, server_hostname: str | None = None, ssl_handshake_timeout: float | None = None
) -> None: ...
) -> None:
"""
Upgrade an existing stream-based connection to TLS.
"""
if sys.version_info >= (3, 13):
def __del__(self, warnings: ModuleType = ...) -> None: ...
elif sys.version_info >= (3, 11):
@ -144,15 +281,126 @@ class StreamReader:
def set_exception(self, exc: Exception) -> None: ...
def set_transport(self, transport: transports.BaseTransport) -> None: ...
def feed_eof(self) -> None: ...
def at_eof(self) -> bool: ...
def feed_data(self, data: Iterable[SupportsIndex]) -> None: ...
async def readline(self) -> bytes: ...
if sys.version_info >= (3, 13):
async def readuntil(self, separator: _ReaduntilBuffer | tuple[_ReaduntilBuffer, ...] = b"\n") -> bytes: ...
else:
async def readuntil(self, separator: _ReaduntilBuffer = b"\n") -> bytes: ...
def at_eof(self) -> bool:
"""
Return True if the buffer is empty and 'feed_eof' was called.
"""
def feed_data(self, data: Iterable[SupportsIndex]) -> None: ...
async def readline(self) -> bytes:
"""
Read chunk of data from the stream until newline (b'
') is found.
On success, return chunk that ends with newline. If only partial
line can be read due to EOF, return incomplete line without
terminating newline. When EOF was reached while no bytes read, empty
bytes object is returned.
If limit is reached, ValueError will be raised. In that case, if
newline was found, complete line including newline will be removed
from internal buffer. Else, internal buffer will be cleared. Limit is
compared against part of the line without newline.
If stream was paused, this function will automatically resume it if
needed.
"""
if sys.version_info >= (3, 13):
async def readuntil(self, separator: _ReaduntilBuffer | tuple[_ReaduntilBuffer, ...] = b"\n") -> bytes:
"""
Read data from the stream until ``separator`` is found.
On success, the data and separator will be removed from the
internal buffer (consumed). Returned data will include the
separator at the end.
Configured stream limit is used to check result. Limit sets the
maximal length of data that can be returned, not counting the
separator.
If an EOF occurs and the complete separator is still not found,
an IncompleteReadError exception will be raised, and the internal
buffer will be reset. The IncompleteReadError.partial attribute
may contain the separator partially.
If the data cannot be read because of over limit, a
LimitOverrunError exception will be raised, and the data
will be left in the internal buffer, so it can be read again.
The ``separator`` may also be a tuple of separators. In this
case the return value will be the shortest possible that has any
separator as the suffix. For the purposes of LimitOverrunError,
the shortest possible separator is considered to be the one that
matched.
"""
else:
async def readuntil(self, separator: _ReaduntilBuffer = b"\n") -> bytes:
"""
Read data from the stream until ``separator`` is found.
On success, the data and separator will be removed from the
internal buffer (consumed). Returned data will include the
separator at the end.
Configured stream limit is used to check result. Limit sets the
maximal length of data that can be returned, not counting the
separator.
If an EOF occurs and the complete separator is still not found,
an IncompleteReadError exception will be raised, and the internal
buffer will be reset. The IncompleteReadError.partial attribute
may contain the separator partially.
If the data cannot be read because of over limit, a
LimitOverrunError exception will be raised, and the data
will be left in the internal buffer, so it can be read again.
The ``separator`` may also be a tuple of separators. In this
case the return value will be the shortest possible that has any
separator as the suffix. For the purposes of LimitOverrunError,
the shortest possible separator is considered to be the one that
matched.
"""
async def read(self, n: int = -1) -> bytes:
"""
Read up to `n` bytes from the stream.
If `n` is not provided or set to -1,
read until EOF, then return all read bytes.
If EOF was received and the internal buffer is empty,
return an empty bytes object.
If `n` is 0, return an empty bytes object immediately.
If `n` is positive, return at most `n` available bytes
as soon as at least 1 byte is available in the internal buffer.
If EOF is received before any byte is read, return an empty
bytes object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
async def readexactly(self, n: int) -> bytes:
"""
Read exactly `n` bytes.
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
read. The IncompleteReadError.partial attribute of the exception will
contain the partial read bytes.
if n is zero, return empty bytes object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
async def read(self, n: int = -1) -> bytes: ...
async def readexactly(self, n: int) -> bytes: ...
def __aiter__(self) -> Self: ...
async def __anext__(self) -> bytes: ...

View file

@ -13,6 +13,10 @@ STDOUT: int
DEVNULL: int
class SubprocessStreamProtocol(streams.FlowControlMixin, protocols.SubprocessProtocol):
"""
Like StreamReaderProtocol, but for a subprocess.
"""
stdin: streams.StreamWriter | None
stdout: streams.StreamReader | None
stderr: streams.StreamReader | None
@ -29,7 +33,11 @@ class Process:
) -> None: ...
@property
def returncode(self) -> int | None: ...
async def wait(self) -> int: ...
async def wait(self) -> int:
"""
Wait until the process exit and return the process return code.
"""
def send_signal(self, signal: int) -> None: ...
def terminate(self) -> None: ...
def kill(self) -> None: ...

View file

@ -17,10 +17,33 @@ else:
_T = TypeVar("_T")
class TaskGroup:
"""
Asynchronous context manager for managing groups of tasks.
Example use:
async with asyncio.TaskGroup() as group:
task1 = group.create_task(some_coroutine(...))
task2 = group.create_task(other_coroutine(...))
print("Both tasks have completed now.")
All tasks are awaited when the context manager exits.
Any exceptions other than `asyncio.CancelledError` raised within
a task will cancel all remaining tasks and wait for them to exit.
The exceptions are then combined and raised as an `ExceptionGroup`.
"""
_loop: AbstractEventLoop | None
_tasks: set[Task[Any]]
async def __aenter__(self) -> Self: ...
async def __aexit__(self, et: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
def create_task(self, coro: _CoroutineLike[_T], *, name: str | None = None, context: Context | None = None) -> Task[_T]: ...
def create_task(self, coro: _CoroutineLike[_T], *, name: str | None = None, context: Context | None = None) -> Task[_T]:
"""
Create a new task in this group and return it.
Similar to `asyncio.create_task`.
"""
def _on_task_done(self, task: Task[object]) -> None: ...

View file

@ -1,3 +1,7 @@
"""
Support for tasks, coroutines and the scheduler.
"""
import concurrent.futures
import sys
from _asyncio import (
@ -89,18 +93,106 @@ ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
if sys.version_info >= (3, 13):
class _SyncAndAsyncIterator(Iterator[_T_co], AsyncIterator[_T_co], Protocol[_T_co]): ...
def as_completed(fs: Iterable[_FutureLike[_T]], *, timeout: float | None = None) -> _SyncAndAsyncIterator[Future[_T]]: ...
def as_completed(fs: Iterable[_FutureLike[_T]], *, timeout: float | None = None) -> _SyncAndAsyncIterator[Future[_T]]:
"""
Create an iterator of awaitables or their results in completion order.
Run the supplied awaitables concurrently. The returned object can be
iterated to obtain the results of the awaitables as they finish.
The object returned can be iterated as an asynchronous iterator or a plain
iterator. When asynchronous iteration is used, the originally-supplied
awaitables are yielded if they are tasks or futures. This makes it easy to
correlate previously-scheduled tasks with their results:
ipv4_connect = create_task(open_connection("127.0.0.1", 80))
ipv6_connect = create_task(open_connection("::1", 80))
tasks = [ipv4_connect, ipv6_connect]
async for earliest_connect in as_completed(tasks):
# earliest_connect is done. The result can be obtained by
# awaiting it or calling earliest_connect.result()
reader, writer = await earliest_connect
if earliest_connect is ipv6_connect:
print("IPv6 connection established.")
else:
print("IPv4 connection established.")
During asynchronous iteration, implicitly-created tasks will be yielded for
supplied awaitables that aren't tasks or futures.
When used as a plain iterator, each iteration yields a new coroutine that
returns the result or raises the exception of the next completed awaitable.
This pattern is compatible with Python versions older than 3.13:
ipv4_connect = create_task(open_connection("127.0.0.1", 80))
ipv6_connect = create_task(open_connection("::1", 80))
tasks = [ipv4_connect, ipv6_connect]
for next_connect in as_completed(tasks):
# next_connect is not one of the original task objects. It must be
# awaited to obtain the result value or raise the exception of the
# awaitable that finishes next.
reader, writer = await next_connect
A TimeoutError is raised if the timeout occurs before all awaitables are
done. This is raised by the async for loop during asynchronous iteration or
by the coroutines yielded during plain iteration.
"""
elif sys.version_info >= (3, 10):
def as_completed(fs: Iterable[_FutureLike[_T]], *, timeout: float | None = None) -> Iterator[Future[_T]]: ...
def as_completed(fs: Iterable[_FutureLike[_T]], *, timeout: float | None = None) -> Iterator[Future[_T]]:
"""
Return an iterator whose values are coroutines.
When waiting for the yielded coroutines you'll get the results (or
exceptions!) of the original Futures (or coroutines), in the order
in which and as soon as they complete.
This differs from PEP 3148; the proper way to use this is:
for f in as_completed(fs):
result = await f # The 'await' may raise.
# Use result.
If a timeout is specified, the 'await' will raise
TimeoutError when the timeout occurs before all Futures are done.
Note: The futures 'f' are not necessarily members of fs.
"""
else:
def as_completed(
fs: Iterable[_FutureLike[_T]], *, loop: AbstractEventLoop | None = None, timeout: float | None = None
) -> Iterator[Future[_T]]: ...
) -> Iterator[Future[_T]]:
"""
Return an iterator whose values are coroutines.
When waiting for the yielded coroutines you'll get the results (or
exceptions!) of the original Futures (or coroutines), in the order
in which and as soon as they complete.
This differs from PEP 3148; the proper way to use this is:
for f in as_completed(fs):
result = await f # The 'await' may raise.
# Use result.
If a timeout is specified, the 'await' will raise
TimeoutError when the timeout occurs before all Futures are done.
Note: The futures 'f' are not necessarily members of fs.
"""
@overload
def ensure_future(coro_or_future: _FT, *, loop: AbstractEventLoop | None = None) -> _FT: ... # type: ignore[overload-overlap]
def ensure_future(coro_or_future: _FT, *, loop: AbstractEventLoop | None = None) -> _FT: # type: ignore[overload-overlap]
"""
Wrap a coroutine or an awaitable in a future.
If the argument is a Future, it is returned directly.
"""
@overload
def ensure_future(coro_or_future: Awaitable[_T], *, loop: AbstractEventLoop | None = None) -> Task[_T]: ...
@ -112,7 +204,38 @@ def ensure_future(coro_or_future: Awaitable[_T], *, loop: AbstractEventLoop | No
# N.B. Having overlapping overloads is the only way to get acceptable type inference in all edge cases.
if sys.version_info >= (3, 10):
@overload
def gather(coro_or_future1: _FutureLike[_T1], /, *, return_exceptions: Literal[False] = False) -> Future[tuple[_T1]]: ... # type: ignore[overload-overlap]
def gather(coro_or_future1: _FutureLike[_T1], /, *, return_exceptions: Literal[False] = False) -> Future[tuple[_T1]]: # type: ignore[overload-overlap]
"""
Return a future aggregating results from the given coroutines/futures.
Coroutines will be wrapped in a future and scheduled in the event
loop. They will not necessarily be scheduled in the same order as
passed in.
All futures must share the same event loop. If all the tasks are
done successfully, the returned future's result is the list of
results (in the order of the original sequence, not necessarily
the order of results arrival). If *return_exceptions* is True,
exceptions in the tasks are treated the same as successful
results, and gathered in the result list; otherwise, the first
raised exception will be immediately propagated to the returned
future.
Cancellation: if the outer Future is cancelled, all children (that
have not completed yet) are also cancelled. If any child is
cancelled, this is treated as if it raised CancelledError --
the outer Future is *not* cancelled in this case. (This is to
prevent the cancellation of one child to cause other children to
be cancelled.)
If *return_exceptions* is False, cancelling gather() after it
has been marked done won't cancel any submitted awaitables.
For instance, gather can be marked done after propagating an
exception to the caller, therefore, calling ``gather.cancel()``
after catching an exception (raised by one of the awaitables) from
gather won't cancel any other awaitables.
"""
@overload
def gather( # type: ignore[overload-overlap]
coro_or_future1: _FutureLike[_T1], coro_or_future2: _FutureLike[_T2], /, *, return_exceptions: Literal[False] = False
@ -227,7 +350,38 @@ else:
@overload
def gather( # type: ignore[overload-overlap]
coro_or_future1: _FutureLike[_T1], /, *, loop: AbstractEventLoop | None = None, return_exceptions: Literal[False] = False
) -> Future[tuple[_T1]]: ...
) -> Future[tuple[_T1]]:
"""
Return a future aggregating results from the given coroutines/futures.
Coroutines will be wrapped in a future and scheduled in the event
loop. They will not necessarily be scheduled in the same order as
passed in.
All futures must share the same event loop. If all the tasks are
done successfully, the returned future's result is the list of
results (in the order of the original sequence, not necessarily
the order of results arrival). If *return_exceptions* is True,
exceptions in the tasks are treated the same as successful
results, and gathered in the result list; otherwise, the first
raised exception will be immediately propagated to the returned
future.
Cancellation: if the outer Future is cancelled, all children (that
have not completed yet) are also cancelled. If any child is
cancelled, this is treated as if it raised CancelledError --
the outer Future is *not* cancelled in this case. (This is to
prevent the cancellation of one child to cause other children to
be cancelled.)
If *return_exceptions* is False, cancelling gather() after it
has been marked done won't cancel any submitted awaitables.
For instance, gather can be marked done after propagating an
exception to the caller, therefore, calling ``gather.cancel()``
after catching an exception (raised by one of the awaitables) from
gather won't cancel any other awaitables.
"""
@overload
def gather( # type: ignore[overload-overlap]
coro_or_future1: _FutureLike[_T1],
@ -349,29 +503,147 @@ else:
) -> Future[list[_T | BaseException]]: ...
# unlike some asyncio apis, This does strict runtime checking of actually being a coroutine, not of any future-like.
def run_coroutine_threadsafe(coro: Coroutine[Any, Any, _T], loop: AbstractEventLoop) -> concurrent.futures.Future[_T]: ...
def run_coroutine_threadsafe(coro: Coroutine[Any, Any, _T], loop: AbstractEventLoop) -> concurrent.futures.Future[_T]:
"""
Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
if sys.version_info >= (3, 10):
def shield(arg: _FutureLike[_T]) -> Future[_T]: ...
def shield(arg: _FutureLike[_T]) -> Future[_T]:
"""
Wait for a future, shielding it from cancellation.
The statement
task = asyncio.create_task(something())
res = await shield(task)
is exactly equivalent to the statement
res = await something()
*except* that if the coroutine containing it is cancelled, the
task running in something() is not cancelled. From the POV of
something(), the cancellation did not happen. But its caller is
still cancelled, so the yield-from expression still raises
CancelledError. Note: If something() is cancelled by other means
this will still cancel shield().
If you want to completely ignore cancellation (not recommended)
you can combine shield() with a try/except clause, as follows:
task = asyncio.create_task(something())
try:
res = await shield(task)
except CancelledError:
res = None
Save a reference to tasks passed to this function, to avoid
a task disappearing mid-execution. The event loop only keeps
weak references to tasks. A task that isn't referenced elsewhere
may get garbage collected at any time, even before it's done.
"""
@overload
async def sleep(delay: float) -> None: ...
async def sleep(delay: float) -> None:
"""
Coroutine that completes after a given time (in seconds).
"""
@overload
async def sleep(delay: float, result: _T) -> _T: ...
async def wait_for(fut: _FutureLike[_T], timeout: float | None) -> _T: ...
async def wait_for(fut: _FutureLike[_T], timeout: float | None) -> _T:
"""
Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
Returns result of the Future or coroutine. When a timeout occurs,
it cancels the task and raises TimeoutError. To avoid the task
cancellation, wrap it in shield().
If the wait is cancelled, the task is also cancelled.
If the task suppresses the cancellation and returns a value instead,
that value is returned.
This function is a coroutine.
"""
else:
def shield(arg: _FutureLike[_T], *, loop: AbstractEventLoop | None = None) -> Future[_T]: ...
def shield(arg: _FutureLike[_T], *, loop: AbstractEventLoop | None = None) -> Future[_T]:
"""
Wait for a future, shielding it from cancellation.
The statement
res = await shield(something())
is exactly equivalent to the statement
res = await something()
*except* that if the coroutine containing it is cancelled, the
task running in something() is not cancelled. From the POV of
something(), the cancellation did not happen. But its caller is
still cancelled, so the yield-from expression still raises
CancelledError. Note: If something() is cancelled by other means
this will still cancel shield().
If you want to completely ignore cancellation (not recommended)
you can combine shield() with a try/except clause, as follows:
try:
res = await shield(something())
except CancelledError:
res = None
"""
@overload
async def sleep(delay: float, *, loop: AbstractEventLoop | None = None) -> None: ...
async def sleep(delay: float, *, loop: AbstractEventLoop | None = None) -> None:
"""
Coroutine that completes after a given time (in seconds).
"""
@overload
async def sleep(delay: float, result: _T, *, loop: AbstractEventLoop | None = None) -> _T: ...
async def wait_for(fut: _FutureLike[_T], timeout: float | None, *, loop: AbstractEventLoop | None = None) -> _T: ...
async def wait_for(fut: _FutureLike[_T], timeout: float | None, *, loop: AbstractEventLoop | None = None) -> _T:
"""
Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
Returns result of the Future or coroutine. When a timeout occurs,
it cancels the task and raises TimeoutError. To avoid the task
cancellation, wrap it in shield().
If the wait is cancelled, the task is also cancelled.
This function is a coroutine.
"""
if sys.version_info >= (3, 11):
@overload
async def wait(
fs: Iterable[_FT], *, timeout: float | None = None, return_when: str = "ALL_COMPLETED"
) -> tuple[set[_FT], set[_FT]]: ...
) -> tuple[set[_FT], set[_FT]]:
"""
Wait for the Futures or Tasks given by fs to complete.
The fs iterable must not be empty.
Returns two sets of Future: (done, pending).
Usage:
done, pending = await asyncio.wait(fs)
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set.
"""
@overload
async def wait(
fs: Iterable[Task[_T]], *, timeout: float | None = None, return_when: str = "ALL_COMPLETED"
@ -381,7 +653,24 @@ elif sys.version_info >= (3, 10):
@overload
async def wait( # type: ignore[overload-overlap]
fs: Iterable[_FT], *, timeout: float | None = None, return_when: str = "ALL_COMPLETED"
) -> tuple[set[_FT], set[_FT]]: ...
) -> tuple[set[_FT], set[_FT]]:
"""
Wait for the Futures and coroutines given by fs to complete.
The fs iterable must not be empty.
Coroutines will be wrapped in Tasks.
Returns two sets of Future: (done, pending).
Usage:
done, pending = await asyncio.wait(fs)
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set.
"""
@overload
async def wait(
fs: Iterable[Awaitable[_T]], *, timeout: float | None = None, return_when: str = "ALL_COMPLETED"
@ -395,7 +684,24 @@ else:
loop: AbstractEventLoop | None = None,
timeout: float | None = None,
return_when: str = "ALL_COMPLETED",
) -> tuple[set[_FT], set[_FT]]: ...
) -> tuple[set[_FT], set[_FT]]:
"""
Wait for the Futures and coroutines given by fs to complete.
The fs iterable must not be empty.
Coroutines will be wrapped in Tasks.
Returns two sets of Future: (done, pending).
Usage:
done, pending = await asyncio.wait(fs)
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set.
"""
@overload
async def wait(
fs: Iterable[Awaitable[_T]],
@ -410,18 +716,34 @@ if sys.version_info >= (3, 12):
else:
_TaskCompatibleCoro: TypeAlias = Generator[_TaskYieldType, None, _T_co] | Coroutine[Any, Any, _T_co]
def all_tasks(loop: AbstractEventLoop | None = None) -> set[Task[Any]]: ...
def all_tasks(loop: AbstractEventLoop | None = None) -> set[Task[Any]]:
"""
Return a set of all tasks for the loop.
"""
if sys.version_info >= (3, 11):
def create_task(coro: _CoroutineLike[_T], *, name: str | None = None, context: Context | None = None) -> Task[_T]: ...
def create_task(coro: _CoroutineLike[_T], *, name: str | None = None, context: Context | None = None) -> Task[_T]:
"""
Schedule the execution of a coroutine object in a spawn task.
Return a Task object.
"""
else:
def create_task(coro: _CoroutineLike[_T], *, name: str | None = None) -> Task[_T]: ...
def create_task(coro: _CoroutineLike[_T], *, name: str | None = None) -> Task[_T]:
"""
Schedule the execution of a coroutine object in a spawn task.
Return a Task object.
"""
if sys.version_info >= (3, 12):
from _asyncio import current_task as current_task
else:
def current_task(loop: AbstractEventLoop | None = None) -> Task[Any] | None: ...
def current_task(loop: AbstractEventLoop | None = None) -> Task[Any] | None:
"""
Return a currently executed task.
"""
if sys.version_info >= (3, 14):
def eager_task_factory(
@ -467,6 +789,22 @@ if sys.version_info >= (3, 12):
context: Context | None = None,
) -> _TaskT_co: ...
def create_eager_task_factory(
custom_task_constructor: _CustomTaskConstructor[_TaskT_co],
) -> _EagerTaskFactoryType[_TaskT_co]: ...
def create_eager_task_factory(custom_task_constructor: _CustomTaskConstructor[_TaskT_co]) -> _EagerTaskFactoryType[_TaskT_co]:
"""
Create a function suitable for use as a task factory on an event-loop.
Example usage:
loop.set_task_factory(
asyncio.create_eager_task_factory(my_task_constructor))
Now, tasks created will be started immediately (rather than being first
scheduled to an event loop). The constructor argument can be any callable
that returns a Task-compatible object and has a signature compatible
with `Task.__init__`; it must have the `eager_start` keyword argument.
Most applications will use `Task` for `custom_task_constructor` and in
this case there's no need to call `create_eager_task_factory()`
directly. Instead the global `eager_task_factory` instance can be
used. E.g. `loop.set_task_factory(asyncio.eager_task_factory)`.
"""

View file

@ -1,3 +1,7 @@
"""
High-level support for working with threads in asyncio
"""
from collections.abc import Callable
from typing import TypeVar
from typing_extensions import ParamSpec
@ -7,4 +11,14 @@ __all__ = ("to_thread",)
_P = ParamSpec("_P")
_R = TypeVar("_R")
async def to_thread(func: Callable[_P, _R], /, *args: _P.args, **kwargs: _P.kwargs) -> _R: ...
async def to_thread(func: Callable[_P, _R], /, *args: _P.args, **kwargs: _P.kwargs) -> _R:
"""
Asynchronously run function *func* in a separate thread.
Any *args and **kwargs supplied for this function are directly passed
to *func*. Also, the current :class:`contextvars.Context` is propagated,
allowing context variables from the main thread to be accessed in the
separate thread.
Return a coroutine that can be awaited to get the eventual result of *func*.
"""

View file

@ -7,14 +7,76 @@ __all__ = ("Timeout", "timeout", "timeout_at")
@final
class Timeout:
def __init__(self, when: float | None) -> None: ...
def when(self) -> float | None: ...
def reschedule(self, when: float | None) -> None: ...
def expired(self) -> bool: ...
"""
Asynchronous context manager for cancelling overdue coroutines.
Use `timeout()` or `timeout_at()` rather than instantiating this class directly.
"""
def __init__(self, when: float | None) -> None:
"""
Schedule a timeout that will trigger at a given loop time.
- If `when` is `None`, the timeout will never trigger.
- If `when < loop.time()`, the timeout will trigger on the next
iteration of the event loop.
"""
def when(self) -> float | None:
"""
Return the current deadline.
"""
def reschedule(self, when: float | None) -> None:
"""
Reschedule the timeout.
"""
def expired(self) -> bool:
"""
Is timeout expired during execution?
"""
async def __aenter__(self) -> Self: ...
async def __aexit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
def timeout(delay: float | None) -> Timeout: ...
def timeout_at(when: float | None) -> Timeout: ...
def timeout(delay: float | None) -> Timeout:
"""
Timeout async context manager.
Useful in cases when you want to apply timeout logic around block
of code or in cases when asyncio.wait_for is not suitable. For example:
>>> async with asyncio.timeout(10): # 10 seconds timeout
... await long_running_task()
delay - value in seconds or None to disable timeout logic
long_running_task() is interrupted by raising asyncio.CancelledError,
the top-most affected timeout() context manager converts CancelledError
into TimeoutError.
"""
def timeout_at(when: float | None) -> Timeout:
"""
Schedule the timeout at absolute time.
Like timeout() but argument gives absolute time in the same clock system
as loop.time().
Please note: it is not POSIX time but a time with
undefined starting base, e.g. the time of the system power on.
>>> async with asyncio.timeout_at(loop.time() + 10):
... await long_running_task()
when - a deadline when timeout occurs or None to disable timeout logic
long_running_task() is interrupted by raising asyncio.CancelledError,
the top-most affected timeout() context manager converts CancelledError
into TimeoutError.
"""

View file

@ -1,3 +1,7 @@
"""
Tools to analyze tasks running in asyncio programs.
"""
from collections.abc import Iterable
from enum import Enum
from typing import NamedTuple, SupportsIndex, type_check_only
@ -30,12 +34,30 @@ class NodeType(Enum):
TASK = 2
class CycleFoundException(Exception):
"""
Raised when there is a cycle when drawing the call tree.
"""
cycles: list[list[int]]
id2name: dict[int, str]
def __init__(self, cycles: list[list[int]], id2name: dict[int, str]) -> None: ...
def get_all_awaited_by(pid: SupportsIndex) -> list[_AwaitedInfo]: ...
def build_async_tree(result: Iterable[_AwaitedInfo], task_emoji: str = "(T)", cor_emoji: str = "") -> list[list[str]]: ...
def build_async_tree(result: Iterable[_AwaitedInfo], task_emoji: str = "(T)", cor_emoji: str = "") -> list[list[str]]:
"""
Build a list of strings for pretty-print an async call tree.
The call tree is produced by `get_all_async_stacks()`, prefixing tasks
with `task_emoji` and coroutine frames with `cor_emoji`.
"""
def build_task_table(result: Iterable[_AwaitedInfo]) -> list[list[int | str]]: ...
def display_awaited_by_tasks_table(pid: SupportsIndex) -> None: ...
def display_awaited_by_tasks_tree(pid: SupportsIndex) -> None: ...
def display_awaited_by_tasks_table(pid: SupportsIndex) -> None:
"""
Build and print a table of all pending tasks under `pid`.
"""
def display_awaited_by_tasks_tree(pid: SupportsIndex) -> None:
"""
Build and print a tree of all pending tasks under `pid`.
"""

View file

@ -1,3 +1,7 @@
"""
Abstract Transport class.
"""
from asyncio.events import AbstractEventLoop
from asyncio.protocols import BaseProtocol
from collections.abc import Iterable, Mapping
@ -8,43 +12,260 @@ from typing import Any
__all__ = ("BaseTransport", "ReadTransport", "WriteTransport", "Transport", "DatagramTransport", "SubprocessTransport")
class BaseTransport:
"""
Base class for transports.
"""
def __init__(self, extra: Mapping[str, Any] | None = None) -> None: ...
def get_extra_info(self, name: str, default: Any = None) -> Any: ...
def is_closing(self) -> bool: ...
def close(self) -> None: ...
def set_protocol(self, protocol: BaseProtocol) -> None: ...
def get_protocol(self) -> BaseProtocol: ...
def get_extra_info(self, name: str, default: Any = None) -> Any:
"""
Get optional transport information.
"""
def is_closing(self) -> bool:
"""
Return True if the transport is closing or closed.
"""
def close(self) -> None:
"""
Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
def set_protocol(self, protocol: BaseProtocol) -> None:
"""
Set a new protocol.
"""
def get_protocol(self) -> BaseProtocol:
"""
Return the current protocol.
"""
class ReadTransport(BaseTransport):
def is_reading(self) -> bool: ...
def pause_reading(self) -> None: ...
def resume_reading(self) -> None: ...
"""
Interface for read-only transports.
"""
def is_reading(self) -> bool:
"""
Return True if the transport is receiving.
"""
def pause_reading(self) -> None:
"""
Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
def resume_reading(self) -> None:
"""
Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
class WriteTransport(BaseTransport):
def set_write_buffer_limits(self, high: int | None = None, low: int | None = None) -> None: ...
def get_write_buffer_size(self) -> int: ...
def get_write_buffer_limits(self) -> tuple[int, int]: ...
def write(self, data: bytes | bytearray | memoryview[Any]) -> None: ... # any memoryview format or shape
def writelines(
self, list_of_data: Iterable[bytes | bytearray | memoryview[Any]]
) -> None: ... # any memoryview format or shape
def write_eof(self) -> None: ...
def can_write_eof(self) -> bool: ...
def abort(self) -> None: ...
"""
Interface for write-only transports.
"""
class Transport(ReadTransport, WriteTransport): ...
def set_write_buffer_limits(self, high: int | None = None, low: int | None = None) -> None:
"""
Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
def get_write_buffer_size(self) -> int:
"""
Return the current size of the write buffer.
"""
def get_write_buffer_limits(self) -> tuple[int, int]:
"""
Get the high and low watermarks for write flow control.
Return a tuple (low, high) where low and high are
positive number of bytes.
"""
def write(self, data: bytes | bytearray | memoryview[Any]) -> None: # any memoryview format or shape
"""
Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
def writelines(self, list_of_data: Iterable[bytes | bytearray | memoryview[Any]]) -> None: # any memoryview format or shape
"""
Write a list (or any iterable) of data bytes to the transport.
The default implementation concatenates the arguments and
calls write() on the result.
"""
def write_eof(self) -> None:
"""
Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
def can_write_eof(self) -> bool:
"""
Return True if this transport supports write_eof(), False if not.
"""
def abort(self) -> None:
"""
Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
class Transport(ReadTransport, WriteTransport):
"""
Interface representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
class DatagramTransport(BaseTransport):
def sendto(self, data: bytes | bytearray | memoryview, addr: _Address | None = None) -> None: ...
def abort(self) -> None: ...
"""
Interface for datagram (UDP) transports.
"""
def sendto(self, data: bytes | bytearray | memoryview, addr: _Address | None = None) -> None:
"""
Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
If data is an empty bytes object a zero-length datagram will be
sent.
"""
def abort(self) -> None:
"""
Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
class SubprocessTransport(BaseTransport):
def get_pid(self) -> int: ...
def get_returncode(self) -> int | None: ...
def get_pipe_transport(self, fd: int) -> BaseTransport | None: ...
def send_signal(self, signal: int) -> None: ...
def terminate(self) -> None: ...
def kill(self) -> None: ...
def get_pid(self) -> int:
"""
Get subprocess id.
"""
def get_returncode(self) -> int | None:
"""
Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
def get_pipe_transport(self, fd: int) -> BaseTransport | None:
"""
Get transport for pipe with number fd.
"""
def send_signal(self, signal: int) -> None:
"""
Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
def terminate(self) -> None:
"""
Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
def kill(self) -> None:
"""
Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
class _FlowControlMixin(Transport):
"""
All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super().__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
def __init__(self, extra: Mapping[str, Any] | None = None, loop: AbstractEventLoop | None = None) -> None: ...

View file

@ -14,6 +14,14 @@ _WriteBuffer: TypeAlias = bytearray | memoryview
_CMSG: TypeAlias = tuple[int, int, bytes]
class TransportSocket:
"""
A socket-like wrapper for exposing real transport sockets.
These objects can be safely returned by APIs like
`transport.get_extra_info('socket')`. All potentially disruptive
operations (like "socket.close()") are banned.
"""
def __init__(self, sock: socket.socket) -> None: ...
@property
def family(self) -> int: ...

View file

@ -1,3 +1,7 @@
"""
Selector event loop for Unix with signal handling.
"""
import sys
import types
from _typeshed import StrPath
@ -50,45 +54,187 @@ if sys.version_info < (3, 14):
if sys.version_info >= (3, 12):
@deprecated("Deprecated as of Python 3.12; will be removed in Python 3.14")
class AbstractChildWatcher:
"""
Abstract base class for monitoring child processes.
Objects derived from this class monitor a collection of subprocesses and
report their termination or interruption by a signal.
New callbacks are registered with .add_child_handler(). Starting a new
process must be done within a 'with' block to allow the watcher to suspend
its activity until the new process if fully registered (this is needed to
prevent a race condition in some implementations).
Example:
with watcher:
proc = subprocess.Popen("sleep 1")
watcher.add_child_handler(proc.pid, callback)
Notes:
Implementations of this class must be thread-safe.
Since child watcher objects may catch the SIGCHLD signal and call
waitpid(-1), there should be only one active object per process.
"""
@abstractmethod
def add_child_handler(
self, pid: int, callback: Callable[[int, int, Unpack[_Ts]], object], *args: Unpack[_Ts]
) -> None: ...
) -> None:
"""
Register a new child handler.
Arrange for callback(pid, returncode, *args) to be called when
process 'pid' terminates. Specifying another callback for the same
process replaces the previous handler.
Note: callback() must be thread-safe.
"""
@abstractmethod
def remove_child_handler(self, pid: int) -> bool: ...
def remove_child_handler(self, pid: int) -> bool:
"""
Removes the handler for process 'pid'.
The function returns True if the handler was successfully removed,
False if there was nothing to remove.
"""
@abstractmethod
def attach_loop(self, loop: events.AbstractEventLoop | None) -> None: ...
def attach_loop(self, loop: events.AbstractEventLoop | None) -> None:
"""
Attach the watcher to an event loop.
If the watcher was previously attached to an event loop, then it is
first detached before attaching to the new loop.
Note: loop may be None.
"""
@abstractmethod
def close(self) -> None: ...
def close(self) -> None:
"""
Close the watcher.
This must be called to make sure that any underlying resource is freed.
"""
@abstractmethod
def __enter__(self) -> Self: ...
def __enter__(self) -> Self:
"""
Enter the watcher's context and allow starting new processes
This function must return self
"""
@abstractmethod
def __exit__(
self, typ: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None
) -> None: ...
) -> None:
"""
Exit the watcher's context
"""
@abstractmethod
def is_active(self) -> bool: ...
def is_active(self) -> bool:
"""
Return ``True`` if the watcher is active and is used by the event loop.
Return True if the watcher is installed and ready to handle process exit
notifications.
"""
else:
class AbstractChildWatcher:
"""
Abstract base class for monitoring child processes.
Objects derived from this class monitor a collection of subprocesses and
report their termination or interruption by a signal.
New callbacks are registered with .add_child_handler(). Starting a new
process must be done within a 'with' block to allow the watcher to suspend
its activity until the new process if fully registered (this is needed to
prevent a race condition in some implementations).
Example:
with watcher:
proc = subprocess.Popen("sleep 1")
watcher.add_child_handler(proc.pid, callback)
Notes:
Implementations of this class must be thread-safe.
Since child watcher objects may catch the SIGCHLD signal and call
waitpid(-1), there should be only one active object per process.
"""
@abstractmethod
def add_child_handler(
self, pid: int, callback: Callable[[int, int, Unpack[_Ts]], object], *args: Unpack[_Ts]
) -> None: ...
) -> None:
"""
Register a new child handler.
Arrange for callback(pid, returncode, *args) to be called when
process 'pid' terminates. Specifying another callback for the same
process replaces the previous handler.
Note: callback() must be thread-safe.
"""
@abstractmethod
def remove_child_handler(self, pid: int) -> bool: ...
def remove_child_handler(self, pid: int) -> bool:
"""
Removes the handler for process 'pid'.
The function returns True if the handler was successfully removed,
False if there was nothing to remove.
"""
@abstractmethod
def attach_loop(self, loop: events.AbstractEventLoop | None) -> None: ...
def attach_loop(self, loop: events.AbstractEventLoop | None) -> None:
"""
Attach the watcher to an event loop.
If the watcher was previously attached to an event loop, then it is
first detached before attaching to the new loop.
Note: loop may be None.
"""
@abstractmethod
def close(self) -> None: ...
def close(self) -> None:
"""
Close the watcher.
This must be called to make sure that any underlying resource is freed.
"""
@abstractmethod
def __enter__(self) -> Self: ...
def __enter__(self) -> Self:
"""
Enter the watcher's context and allow starting new processes
This function must return self
"""
@abstractmethod
def __exit__(
self, typ: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None
) -> None: ...
) -> None:
"""
Exit the watcher's context
"""
@abstractmethod
def is_active(self) -> bool: ...
def is_active(self) -> bool:
"""
Return ``True`` if the watcher is active and is used by the event loop.
Return True if the watcher is installed and ready to handle process exit
notifications.
"""
if sys.platform != "win32":
if sys.version_info < (3, 14):
@ -102,6 +248,17 @@ if sys.platform != "win32":
@deprecated("Deprecated as of Python 3.12; will be removed in Python 3.14")
class SafeChildWatcher(BaseChildWatcher):
"""
'Safe' child watcher implementation.
This implementation avoids disrupting other code spawning processes by
polling explicitly each process in the SIGCHLD handler instead of calling
os.waitpid(-1).
This is a safe solution but it has a significant overhead when handling a
big number of children (O(n) each time SIGCHLD is raised)
"""
def __enter__(self) -> Self: ...
def __exit__(
self, a: type[BaseException] | None, b: BaseException | None, c: types.TracebackType | None
@ -113,6 +270,17 @@ if sys.platform != "win32":
@deprecated("Deprecated as of Python 3.12; will be removed in Python 3.14")
class FastChildWatcher(BaseChildWatcher):
"""
'Fast' child watcher implementation.
This implementation reaps every terminated processes by calling
os.waitpid(-1) directly, possibly breaking other code spawning processes
and waiting for their termination.
There is no noticeable overhead when handling a big number of children
(O(1) each time a child terminates).
"""
def __enter__(self) -> Self: ...
def __exit__(
self, a: type[BaseException] | None, b: BaseException | None, c: types.TracebackType | None
@ -131,6 +299,17 @@ if sys.platform != "win32":
def attach_loop(self, loop: events.AbstractEventLoop | None) -> None: ...
class SafeChildWatcher(BaseChildWatcher):
"""
'Safe' child watcher implementation.
This implementation avoids disrupting other code spawning processes by
polling explicitly each process in the SIGCHLD handler instead of calling
os.waitpid(-1).
This is a safe solution but it has a significant overhead when handling a
big number of children (O(n) each time SIGCHLD is raised)
"""
def __enter__(self) -> Self: ...
def __exit__(
self, a: type[BaseException] | None, b: BaseException | None, c: types.TracebackType | None
@ -141,6 +320,17 @@ if sys.platform != "win32":
def remove_child_handler(self, pid: int) -> bool: ...
class FastChildWatcher(BaseChildWatcher):
"""
'Fast' child watcher implementation.
This implementation reaps every terminated processes by calling
os.waitpid(-1) directly, possibly breaking other code spawning processes
and waiting for their termination.
There is no noticeable overhead when handling a big number of children
(O(1) each time a child terminates).
"""
def __enter__(self) -> Self: ...
def __exit__(
self, a: type[BaseException] | None, b: BaseException | None, c: types.TracebackType | None
@ -151,6 +341,12 @@ if sys.platform != "win32":
def remove_child_handler(self, pid: int) -> bool: ...
class _UnixSelectorEventLoop(BaseSelectorEventLoop):
"""
Unix event loop.
Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
"""
if sys.version_info >= (3, 13):
async def create_unix_server(
self,
@ -167,17 +363,43 @@ if sys.platform != "win32":
) -> Server: ...
if sys.version_info >= (3, 14):
class _UnixDefaultEventLoopPolicy(events._BaseDefaultEventLoopPolicy): ...
class _UnixDefaultEventLoopPolicy(events._BaseDefaultEventLoopPolicy):
"""
UNIX event loop policy
"""
else:
class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
"""
UNIX event loop policy with a watcher for child processes.
"""
if sys.version_info >= (3, 12):
@deprecated("Deprecated as of Python 3.12; will be removed in Python 3.14")
def get_child_watcher(self) -> AbstractChildWatcher: ...
def get_child_watcher(self) -> AbstractChildWatcher:
"""
Get the watcher for child processes.
If not yet set, a ThreadedChildWatcher object is automatically created.
"""
@deprecated("Deprecated as of Python 3.12; will be removed in Python 3.14")
def set_child_watcher(self, watcher: AbstractChildWatcher | None) -> None: ...
def set_child_watcher(self, watcher: AbstractChildWatcher | None) -> None:
"""
Set the watcher for child processes.
"""
else:
def get_child_watcher(self) -> AbstractChildWatcher: ...
def set_child_watcher(self, watcher: AbstractChildWatcher | None) -> None: ...
def get_child_watcher(self) -> AbstractChildWatcher:
"""
Get the watcher for child processes.
If not yet set, a ThreadedChildWatcher object is automatically created.
"""
def set_child_watcher(self, watcher: AbstractChildWatcher | None) -> None:
"""
Set the watcher for child processes.
"""
SelectorEventLoop = _UnixSelectorEventLoop
@ -193,6 +415,18 @@ if sys.platform != "win32":
if sys.version_info >= (3, 12):
@deprecated("Deprecated as of Python 3.12; will be removed in Python 3.14")
class MultiLoopChildWatcher(AbstractChildWatcher):
"""
A watcher that doesn't require running loop in the main thread.
This implementation registers a SIGCHLD signal handler on
instantiation (which may conflict with other code that
install own handler for this signal).
The solution is safe but it has a significant overhead when
handling a big number of processes (*O(n)* each time a
SIGCHLD is received).
"""
def is_active(self) -> bool: ...
def close(self) -> None: ...
def __enter__(self) -> Self: ...
@ -207,6 +441,18 @@ if sys.platform != "win32":
else:
class MultiLoopChildWatcher(AbstractChildWatcher):
"""
A watcher that doesn't require running loop in the main thread.
This implementation registers a SIGCHLD signal handler on
instantiation (which may conflict with other code that
install own handler for this signal).
The solution is safe but it has a significant overhead when
handling a big number of processes (*O(n)* each time a
SIGCHLD is received).
"""
def is_active(self) -> bool: ...
def close(self) -> None: ...
def __enter__(self) -> Self: ...
@ -221,6 +467,19 @@ if sys.platform != "win32":
if sys.version_info < (3, 14):
class ThreadedChildWatcher(AbstractChildWatcher):
"""
Threaded child watcher implementation.
The watcher uses a thread per process
for waiting for the process finish.
It doesn't require subscription on POSIX signal
but a thread creation is not free.
The watcher has O(1) complexity, its performance doesn't depend
on amount of spawn processes.
"""
def is_active(self) -> Literal[True]: ...
def close(self) -> None: ...
def __enter__(self) -> Self: ...
@ -235,6 +494,18 @@ if sys.platform != "win32":
def attach_loop(self, loop: events.AbstractEventLoop | None) -> None: ...
class PidfdChildWatcher(AbstractChildWatcher):
"""
Child watcher implementation using Linux's pid file descriptors.
This child watcher polls process file descriptors (pidfds) to await child
process termination. In some respects, PidfdChildWatcher is a "Goldilocks"
child watcher implementation. It doesn't require signals or threads, doesn't
interfere with any processes launched outside the event loop, and scales
linearly with the number of subprocesses launched by the event loop. The
main disadvantage is that pidfds are specific to Linux, and only work on
recent (5.3+) kernels.
"""
def __enter__(self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: types.TracebackType | None

View file

@ -1,3 +1,25 @@
"""
Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import sys
from _typeshed import FileDescriptorLike, ReadableBuffer
from socket import socket

View file

@ -1,3 +1,10 @@
"""
allow programmer to define multiple exit functions to be executed
upon normal program termination.
Two public functions, register and unregister, are defined.
"""
from collections.abc import Callable
from typing import TypeVar
from typing_extensions import ParamSpec
@ -5,8 +12,38 @@ from typing_extensions import ParamSpec
_T = TypeVar("_T")
_P = ParamSpec("_P")
def _clear() -> None: ...
def _ncallbacks() -> int: ...
def _run_exitfuncs() -> None: ...
def register(func: Callable[_P, _T], /, *args: _P.args, **kwargs: _P.kwargs) -> Callable[_P, _T]: ...
def unregister(func: Callable[..., object], /) -> None: ...
def _clear() -> None:
"""
Clear the list of previously registered exit functions.
"""
def _ncallbacks() -> int:
"""
Return the number of registered exit functions.
"""
def _run_exitfuncs() -> None:
"""
Run all registered exit functions.
If a callback raises an exception, it is logged with sys.unraisablehook.
"""
def register(func: Callable[_P, _T], /, *args: _P.args, **kwargs: _P.kwargs) -> Callable[_P, _T]:
"""
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator.
"""
def unregister(func: Callable[..., object], /) -> None:
"""
Unregister an exit function which was previously registered using
atexit.register
func - function to be unregistered
"""

View file

@ -5,26 +5,106 @@ _RatecvState: TypeAlias = tuple[int, tuple[tuple[int, int], ...]]
class error(Exception): ...
def add(fragment1: Buffer, fragment2: Buffer, width: int, /) -> bytes: ...
def adpcm2lin(fragment: Buffer, width: int, state: _AdpcmState | None, /) -> tuple[bytes, _AdpcmState]: ...
def alaw2lin(fragment: Buffer, width: int, /) -> bytes: ...
def avg(fragment: Buffer, width: int, /) -> int: ...
def avgpp(fragment: Buffer, width: int, /) -> int: ...
def bias(fragment: Buffer, width: int, bias: int, /) -> bytes: ...
def byteswap(fragment: Buffer, width: int, /) -> bytes: ...
def cross(fragment: Buffer, width: int, /) -> int: ...
def findfactor(fragment: Buffer, reference: Buffer, /) -> float: ...
def findfit(fragment: Buffer, reference: Buffer, /) -> tuple[int, float]: ...
def findmax(fragment: Buffer, length: int, /) -> int: ...
def getsample(fragment: Buffer, width: int, index: int, /) -> int: ...
def lin2adpcm(fragment: Buffer, width: int, state: _AdpcmState | None, /) -> tuple[bytes, _AdpcmState]: ...
def lin2alaw(fragment: Buffer, width: int, /) -> bytes: ...
def lin2lin(fragment: Buffer, width: int, newwidth: int, /) -> bytes: ...
def lin2ulaw(fragment: Buffer, width: int, /) -> bytes: ...
def max(fragment: Buffer, width: int, /) -> int: ...
def maxpp(fragment: Buffer, width: int, /) -> int: ...
def minmax(fragment: Buffer, width: int, /) -> tuple[int, int]: ...
def mul(fragment: Buffer, width: int, factor: float, /) -> bytes: ...
def add(fragment1: Buffer, fragment2: Buffer, width: int, /) -> bytes:
"""
Return a fragment which is the addition of the two samples passed as parameters.
"""
def adpcm2lin(fragment: Buffer, width: int, state: _AdpcmState | None, /) -> tuple[bytes, _AdpcmState]:
"""
Decode an Intel/DVI ADPCM coded fragment to a linear fragment.
"""
def alaw2lin(fragment: Buffer, width: int, /) -> bytes:
"""
Convert sound fragments in a-LAW encoding to linearly encoded sound fragments.
"""
def avg(fragment: Buffer, width: int, /) -> int:
"""
Return the average over all samples in the fragment.
"""
def avgpp(fragment: Buffer, width: int, /) -> int:
"""
Return the average peak-peak value over all samples in the fragment.
"""
def bias(fragment: Buffer, width: int, bias: int, /) -> bytes:
"""
Return a fragment that is the original fragment with a bias added to each sample.
"""
def byteswap(fragment: Buffer, width: int, /) -> bytes:
"""
Convert big-endian samples to little-endian and vice versa.
"""
def cross(fragment: Buffer, width: int, /) -> int:
"""
Return the number of zero crossings in the fragment passed as an argument.
"""
def findfactor(fragment: Buffer, reference: Buffer, /) -> float:
"""
Return a factor F such that rms(add(fragment, mul(reference, -F))) is minimal.
"""
def findfit(fragment: Buffer, reference: Buffer, /) -> tuple[int, float]:
"""
Try to match reference as well as possible to a portion of fragment.
"""
def findmax(fragment: Buffer, length: int, /) -> int:
"""
Search fragment for a slice of specified number of samples with maximum energy.
"""
def getsample(fragment: Buffer, width: int, index: int, /) -> int:
"""
Return the value of sample index from the fragment.
"""
def lin2adpcm(fragment: Buffer, width: int, state: _AdpcmState | None, /) -> tuple[bytes, _AdpcmState]:
"""
Convert samples to 4 bit Intel/DVI ADPCM encoding.
"""
def lin2alaw(fragment: Buffer, width: int, /) -> bytes:
"""
Convert samples in the audio fragment to a-LAW encoding.
"""
def lin2lin(fragment: Buffer, width: int, newwidth: int, /) -> bytes:
"""
Convert samples between 1-, 2-, 3- and 4-byte formats.
"""
def lin2ulaw(fragment: Buffer, width: int, /) -> bytes:
"""
Convert samples in the audio fragment to u-LAW encoding.
"""
def max(fragment: Buffer, width: int, /) -> int:
"""
Return the maximum of the absolute value of all samples in a fragment.
"""
def maxpp(fragment: Buffer, width: int, /) -> int:
"""
Return the maximum peak-peak value in the sound fragment.
"""
def minmax(fragment: Buffer, width: int, /) -> tuple[int, int]:
"""
Return the minimum and maximum values of all samples in the sound fragment.
"""
def mul(fragment: Buffer, width: int, factor: float, /) -> bytes:
"""
Return a fragment that has all samples in the original fragment multiplied by the floating-point value factor.
"""
def ratecv(
fragment: Buffer,
width: int,
@ -35,9 +115,32 @@ def ratecv(
weightA: int = 1,
weightB: int = 0,
/,
) -> tuple[bytes, _RatecvState]: ...
def reverse(fragment: Buffer, width: int, /) -> bytes: ...
def rms(fragment: Buffer, width: int, /) -> int: ...
def tomono(fragment: Buffer, width: int, lfactor: float, rfactor: float, /) -> bytes: ...
def tostereo(fragment: Buffer, width: int, lfactor: float, rfactor: float, /) -> bytes: ...
def ulaw2lin(fragment: Buffer, width: int, /) -> bytes: ...
) -> tuple[bytes, _RatecvState]:
"""
Convert the frame rate of the input fragment.
"""
def reverse(fragment: Buffer, width: int, /) -> bytes:
"""
Reverse the samples in a fragment and returns the modified fragment.
"""
def rms(fragment: Buffer, width: int, /) -> int:
"""
Return the root-mean-square of the fragment, i.e. sqrt(sum(S_i^2)/n).
"""
def tomono(fragment: Buffer, width: int, lfactor: float, rfactor: float, /) -> bytes:
"""
Convert a stereo fragment to a mono fragment.
"""
def tostereo(fragment: Buffer, width: int, lfactor: float, rfactor: float, /) -> bytes:
"""
Generate a stereo fragment from a mono fragment.
"""
def ulaw2lin(fragment: Buffer, width: int, /) -> bytes:
"""
Convert sound fragments in u-LAW encoding to linearly encoded sound fragments.
"""

View file

@ -1,3 +1,7 @@
"""
Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings
"""
import sys
from _typeshed import ReadableBuffer
from typing import IO
@ -28,34 +32,218 @@ if sys.version_info >= (3, 10):
if sys.version_info >= (3, 13):
__all__ += ["z85decode", "z85encode"]
def b64encode(s: ReadableBuffer, altchars: ReadableBuffer | None = None) -> bytes: ...
def b64decode(s: str | ReadableBuffer, altchars: str | ReadableBuffer | None = None, validate: bool = False) -> bytes: ...
def standard_b64encode(s: ReadableBuffer) -> bytes: ...
def standard_b64decode(s: str | ReadableBuffer) -> bytes: ...
def urlsafe_b64encode(s: ReadableBuffer) -> bytes: ...
def urlsafe_b64decode(s: str | ReadableBuffer) -> bytes: ...
def b32encode(s: ReadableBuffer) -> bytes: ...
def b32decode(s: str | ReadableBuffer, casefold: bool = False, map01: str | ReadableBuffer | None = None) -> bytes: ...
def b16encode(s: ReadableBuffer) -> bytes: ...
def b16decode(s: str | ReadableBuffer, casefold: bool = False) -> bytes: ...
def b64encode(s: ReadableBuffer, altchars: ReadableBuffer | None = None) -> bytes:
"""
Encode the bytes-like object s using Base64 and return a bytes object.
Optional altchars should be a byte string of length 2 which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
"""
def b64decode(s: str | ReadableBuffer, altchars: str | ReadableBuffer | None = None, validate: bool = False) -> bytes:
"""
Decode the Base64 encoded bytes-like object or ASCII string s.
Optional altchars must be a bytes-like object or ASCII string of length 2
which specifies the alternative alphabet used instead of the '+' and '/'
characters.
The result is returned as a bytes object. A binascii.Error is raised if
s is incorrectly padded.
If validate is False (the default), characters that are neither in the
normal base-64 alphabet nor the alternative alphabet are discarded prior
to the padding check. If validate is True, these non-alphabet characters
in the input result in a binascii.Error.
For more information about the strict base64 check, see:
https://docs.python.org/3.11/library/binascii.html#binascii.a2b_base64
"""
def standard_b64encode(s: ReadableBuffer) -> bytes:
"""
Encode bytes-like object s using the standard Base64 alphabet.
The result is returned as a bytes object.
"""
def standard_b64decode(s: str | ReadableBuffer) -> bytes:
"""
Decode bytes encoded with the standard Base64 alphabet.
Argument s is a bytes-like object or ASCII string to decode. The result
is returned as a bytes object. A binascii.Error is raised if the input
is incorrectly padded. Characters that are not in the standard alphabet
are discarded prior to the padding check.
"""
def urlsafe_b64encode(s: ReadableBuffer) -> bytes:
"""
Encode bytes using the URL- and filesystem-safe Base64 alphabet.
Argument s is a bytes-like object to encode. The result is returned as a
bytes object. The alphabet uses '-' instead of '+' and '_' instead of
'/'.
"""
def urlsafe_b64decode(s: str | ReadableBuffer) -> bytes:
"""
Decode bytes using the URL- and filesystem-safe Base64 alphabet.
Argument s is a bytes-like object or ASCII string to decode. The result
is returned as a bytes object. A binascii.Error is raised if the input
is incorrectly padded. Characters that are not in the URL-safe base-64
alphabet, and are not a plus '+' or slash '/', are discarded prior to the
padding check.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
def b32encode(s: ReadableBuffer) -> bytes:
"""
Encode the bytes-like objects using base32 and return a bytes object.
"""
def b32decode(s: str | ReadableBuffer, casefold: bool = False, map01: str | ReadableBuffer | None = None) -> bytes:
"""
Decode the base32 encoded bytes-like object or ASCII string s.
Optional casefold is a flag specifying whether a lowercase alphabet is
acceptable as input. For security purposes, the default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
letter O (oh), and for optional mapping of the digit 1 (one) to
either the letter I (eye) or letter L (el). The optional argument
map01 when not None, specifies which letter the digit 1 should be
mapped to (when map01 is not None, the digit 0 is always mapped to
the letter O). For security purposes the default is None, so that
0 and 1 are not allowed in the input.
The result is returned as a bytes object. A binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
def b16encode(s: ReadableBuffer) -> bytes:
"""
Encode the bytes-like object s using Base16 and return a bytes object.
"""
def b16decode(s: str | ReadableBuffer, casefold: bool = False) -> bytes:
"""
Decode the Base16 encoded bytes-like object or ASCII string s.
Optional casefold is a flag specifying whether a lowercase alphabet is
acceptable as input. For security purposes, the default is False.
The result is returned as a bytes object. A binascii.Error is raised if
s is incorrectly padded or if there are non-alphabet characters present
in the input.
"""
if sys.version_info >= (3, 10):
def b32hexencode(s: ReadableBuffer) -> bytes: ...
def b32hexdecode(s: str | ReadableBuffer, casefold: bool = False) -> bytes: ...
def b32hexencode(s: ReadableBuffer) -> bytes:
"""
Encode the bytes-like objects using base32hex and return a bytes object.
"""
def b32hexdecode(s: str | ReadableBuffer, casefold: bool = False) -> bytes:
"""
Decode the base32hex encoded bytes-like object or ASCII string s.
Optional casefold is a flag specifying whether a lowercase alphabet is
acceptable as input. For security purposes, the default is False.
The result is returned as a bytes object. A binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
def a85encode(b: ReadableBuffer, *, foldspaces: bool = False, wrapcol: int = 0, pad: bool = False, adobe: bool = False) -> bytes:
"""
Encode bytes-like object b using Ascii85 and return a bytes object.
foldspaces is an optional flag that uses the special short sequence 'y'
instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
feature is not supported by the "standard" Adobe encoding.
wrapcol controls whether the output should have newline (b'\\n') characters
added to it. If this is non-zero, each output line will be at most this
many characters long, excluding the trailing newline.
pad controls whether the input is padded to a multiple of 4 before
encoding. Note that the btoa implementation always pads.
adobe controls whether the encoded byte sequence is framed with <~ and ~>,
which is used by the Adobe implementation.
"""
def a85encode(
b: ReadableBuffer, *, foldspaces: bool = False, wrapcol: int = 0, pad: bool = False, adobe: bool = False
) -> bytes: ...
def a85decode(
b: str | ReadableBuffer, *, foldspaces: bool = False, adobe: bool = False, ignorechars: bytearray | bytes = b" \t\n\r\x0b"
) -> bytes: ...
def b85encode(b: ReadableBuffer, pad: bool = False) -> bytes: ...
def b85decode(b: str | ReadableBuffer) -> bytes: ...
def decode(input: IO[bytes], output: IO[bytes]) -> None: ...
def encode(input: IO[bytes], output: IO[bytes]) -> None: ...
def encodebytes(s: ReadableBuffer) -> bytes: ...
def decodebytes(s: ReadableBuffer) -> bytes: ...
) -> bytes:
"""
Decode the Ascii85 encoded bytes-like object or ASCII string b.
foldspaces is a flag that specifies whether the 'y' short sequence should be
accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
not supported by the "standard" Adobe encoding.
adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.
is framed with <~ and ~>).
ignorechars should be a byte string containing characters to ignore from the
input. This should only contain whitespace characters, and by default
contains all whitespace characters in ASCII.
The result is returned as a bytes object.
"""
def b85encode(b: ReadableBuffer, pad: bool = False) -> bytes:
"""
Encode bytes-like object b in base85 format and return a bytes object.
If pad is true, the input is padded with b'\\0' so its length is a multiple of
4 bytes before encoding.
"""
def b85decode(b: str | ReadableBuffer) -> bytes:
"""
Decode the base85-encoded bytes-like object or ASCII string b
The result is returned as a bytes object.
"""
def decode(input: IO[bytes], output: IO[bytes]) -> None:
"""
Decode a file; input and output are binary files.
"""
def encode(input: IO[bytes], output: IO[bytes]) -> None:
"""
Encode a file; input and output are binary files.
"""
def encodebytes(s: ReadableBuffer) -> bytes:
"""
Encode a bytestring into a bytes object containing multiple lines
of base-64 data.
"""
def decodebytes(s: ReadableBuffer) -> bytes:
"""
Decode a bytestring of base-64 data into a bytes object.
"""
if sys.version_info >= (3, 13):
def z85encode(s: ReadableBuffer) -> bytes: ...
def z85decode(s: str | ReadableBuffer) -> bytes: ...
def z85encode(s: ReadableBuffer) -> bytes:
"""
Encode bytes-like object b in z85 format and return a bytes object.
"""
def z85decode(s: str | ReadableBuffer) -> bytes:
"""
Decode the z85-encoded bytes-like object or ASCII string b
The result is returned as a bytes object.
"""

View file

@ -1,3 +1,7 @@
"""
Debugger basics
"""
import sys
from _typeshed import ExcInfo, TraceFunction, Unused
from collections.abc import Callable, Iterable, Iterator, Mapping
@ -17,9 +21,26 @@ _Backend: TypeAlias = Literal["settrace", "monitoring"]
# so we don't include the value of this constant in the stubs.
GENERATOR_AND_COROUTINE_FLAGS: Final[int]
class BdbQuit(Exception): ...
class BdbQuit(Exception):
"""
Exception to give up completely.
"""
class Bdb:
"""
Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
The optional skip argument must be an iterable of glob-style
module name patterns. The debugger will not step into frames
that originate in a module that matches one of these patterns.
Whether a frame is considered to originate in a certain module
is determined by the __name__ in the frame globals.
"""
skip: set[str] | None
breaks: dict[str, list[int]]
fncache: dict[str, str]
@ -35,69 +56,345 @@ class Bdb:
else:
def __init__(self, skip: Iterable[str] | None = None) -> None: ...
def canonic(self, filename: str) -> str: ...
def reset(self) -> None: ...
def canonic(self, filename: str) -> str:
"""
Return canonical form of filename.
For real filenames, the canonical form is a case-normalized (on
case insensitive filesystems) absolute path. 'Filenames' with
angle brackets, such as "<stdin>", generated in interactive
mode, are returned unchanged.
"""
def reset(self) -> None:
"""
Set values of attributes as ready to start debugging.
"""
if sys.version_info >= (3, 12):
@contextmanager
def set_enterframe(self, frame: FrameType) -> Iterator[None]: ...
def trace_dispatch(self, frame: FrameType, event: str, arg: Any) -> TraceFunction: ...
def dispatch_line(self, frame: FrameType) -> TraceFunction: ...
def dispatch_call(self, frame: FrameType, arg: None) -> TraceFunction: ...
def dispatch_return(self, frame: FrameType, arg: Any) -> TraceFunction: ...
def dispatch_exception(self, frame: FrameType, arg: ExcInfo) -> TraceFunction: ...
if sys.version_info >= (3, 13):
def dispatch_opcode(self, frame: FrameType, arg: Unused) -> Callable[[FrameType, str, Any], TraceFunction]: ...
def trace_dispatch(self, frame: FrameType, event: str, arg: Any) -> TraceFunction:
"""
Dispatch a trace function for debugged frames based on the event.
def is_skipped_module(self, module_name: str) -> bool: ...
def stop_here(self, frame: FrameType) -> bool: ...
def break_here(self, frame: FrameType) -> bool: ...
def do_clear(self, arg: Any) -> bool | None: ...
def break_anywhere(self, frame: FrameType) -> bool: ...
def user_call(self, frame: FrameType, argument_list: None) -> None: ...
def user_line(self, frame: FrameType) -> None: ...
def user_return(self, frame: FrameType, return_value: Any) -> None: ...
def user_exception(self, frame: FrameType, exc_info: ExcInfo) -> None: ...
def set_until(self, frame: FrameType, lineno: int | None = None) -> None: ...
if sys.version_info >= (3, 13):
def user_opcode(self, frame: FrameType) -> None: ... # undocumented
This function is installed as the trace function for debugged
frames. Its return value is the new trace function, which is
usually itself. The default implementation decides how to
dispatch a frame, depending on the type of event (passed in as a
string) that is about to be executed.
def set_step(self) -> None: ...
if sys.version_info >= (3, 13):
def set_stepinstr(self) -> None: ... # undocumented
The event can be one of the following:
line: A new line of code is going to be executed.
call: A function is about to be called or another code block
is entered.
return: A function or other code block is about to return.
exception: An exception has occurred.
c_call: A C function is about to be called.
c_return: A C function has returned.
c_exception: A C function has raised an exception.
For the Python events, specialized functions (see the dispatch_*()
methods) are called. For the C events, no action is taken.
The arg parameter depends on the previous event.
"""
def dispatch_line(self, frame: FrameType) -> TraceFunction:
"""
Invoke user function and return trace function for line event.
If the debugger stops on the current line, invoke
self.user_line(). Raise BdbQuit if self.quitting is set.
Return self.trace_dispatch to continue tracing in this scope.
"""
def dispatch_call(self, frame: FrameType, arg: None) -> TraceFunction:
"""
Invoke user function and return trace function for call event.
If the debugger stops on this function call, invoke
self.user_call(). Raise BdbQuit if self.quitting is set.
Return self.trace_dispatch to continue tracing in this scope.
"""
def dispatch_return(self, frame: FrameType, arg: Any) -> TraceFunction:
"""
Invoke user function and return trace function for return event.
If the debugger stops on this function return, invoke
self.user_return(). Raise BdbQuit if self.quitting is set.
Return self.trace_dispatch to continue tracing in this scope.
"""
def dispatch_exception(self, frame: FrameType, arg: ExcInfo) -> TraceFunction:
"""
Invoke user function and return trace function for exception event.
If the debugger stops on this exception, invoke
self.user_exception(). Raise BdbQuit if self.quitting is set.
Return self.trace_dispatch to continue tracing in this scope.
"""
if sys.version_info >= (3, 13):
def dispatch_opcode(self, frame: FrameType, arg: Unused) -> Callable[[FrameType, str, Any], TraceFunction]:
"""
Invoke user function and return trace function for opcode event.
If the debugger stops on the current opcode, invoke
self.user_opcode(). Raise BdbQuit if self.quitting is set.
Return self.trace_dispatch to continue tracing in this scope.
Opcode event will always trigger the user callback. For now the only
opcode event is from an inline set_trace() and we want to stop there
unconditionally.
"""
def is_skipped_module(self, module_name: str) -> bool:
"""
Return True if module_name matches any skip pattern.
"""
def stop_here(self, frame: FrameType) -> bool:
"""
Return True if frame is below the starting frame in the stack.
"""
def break_here(self, frame: FrameType) -> bool:
"""
Return True if there is an effective breakpoint for this line.
Check for line or function breakpoint and if in effect.
Delete temporary breakpoints if effective() says to.
"""
def do_clear(self, arg: Any) -> bool | None:
"""
Remove temporary breakpoint.
Must implement in derived classes or get NotImplementedError.
"""
def break_anywhere(self, frame: FrameType) -> bool:
"""
Return True if there is any breakpoint in that frame
"""
def user_call(self, frame: FrameType, argument_list: None) -> None:
"""
Called if we might stop in a function.
"""
def user_line(self, frame: FrameType) -> None:
"""
Called when we stop or break at a line.
"""
def user_return(self, frame: FrameType, return_value: Any) -> None:
"""
Called when a return trap is set here.
"""
def user_exception(self, frame: FrameType, exc_info: ExcInfo) -> None:
"""
Called when we stop on an exception.
"""
def set_until(self, frame: FrameType, lineno: int | None = None) -> None:
"""
Stop when the line with the lineno greater than the current one is
reached or when returning from current frame.
"""
if sys.version_info >= (3, 13):
def user_opcode(self, frame: FrameType) -> None: # undocumented
"""
Called when we are about to execute an opcode.
"""
def set_step(self) -> None:
"""
Stop after one line of code.
"""
if sys.version_info >= (3, 13):
def set_stepinstr(self) -> None: # undocumented
"""
Stop before the next instruction.
"""
def set_next(self, frame: FrameType) -> None:
"""
Stop on the next line in or below the given frame.
"""
def set_return(self, frame: FrameType) -> None:
"""
Stop when returning from the given frame.
"""
def set_trace(self, frame: FrameType | None = None) -> None:
"""
Start debugging from frame.
If frame is not specified, debugging starts from caller's frame.
"""
def set_continue(self) -> None:
"""
Stop only at breakpoints or when finished.
If there are no breakpoints, set the system trace function to None.
"""
def set_quit(self) -> None:
"""
Set quitting attribute to True.
Raises BdbQuit exception in the next call to a dispatch_*() method.
"""
def set_next(self, frame: FrameType) -> None: ...
def set_return(self, frame: FrameType) -> None: ...
def set_trace(self, frame: FrameType | None = None) -> None: ...
def set_continue(self) -> None: ...
def set_quit(self) -> None: ...
def set_break(
self, filename: str, lineno: int, temporary: bool = False, cond: str | None = None, funcname: str | None = None
) -> str | None: ...
def clear_break(self, filename: str, lineno: int) -> str | None: ...
def clear_bpbynumber(self, arg: SupportsInt) -> str | None: ...
def clear_all_file_breaks(self, filename: str) -> str | None: ...
def clear_all_breaks(self) -> str | None: ...
def get_bpbynumber(self, arg: SupportsInt) -> Breakpoint: ...
def get_break(self, filename: str, lineno: int) -> bool: ...
def get_breaks(self, filename: str, lineno: int) -> list[Breakpoint]: ...
def get_file_breaks(self, filename: str) -> list[Breakpoint]: ...
def get_all_breaks(self) -> list[Breakpoint]: ...
def get_stack(self, f: FrameType | None, t: TracebackType | None) -> tuple[list[tuple[FrameType, int]], int]: ...
def format_stack_entry(self, frame_lineno: tuple[FrameType, int], lprefix: str = ": ") -> str: ...
def run(
self, cmd: str | CodeType, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None
) -> None: ...
def runeval(self, expr: str, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None) -> None: ...
def runctx(self, cmd: str | CodeType, globals: dict[str, Any] | None, locals: Mapping[str, Any] | None) -> None: ...
def runcall(self, func: Callable[_P, _T], /, *args: _P.args, **kwds: _P.kwargs) -> _T | None: ...
) -> str | None:
"""
Set a new breakpoint for filename:lineno.
If lineno doesn't exist for the filename, return an error message.
The filename should be in canonical form.
"""
def clear_break(self, filename: str, lineno: int) -> str | None:
"""
Delete breakpoints for filename:lineno.
If no breakpoints were set, return an error message.
"""
def clear_bpbynumber(self, arg: SupportsInt) -> str | None:
"""
Delete a breakpoint by its index in Breakpoint.bpbynumber.
If arg is invalid, return an error message.
"""
def clear_all_file_breaks(self, filename: str) -> str | None:
"""
Delete all breakpoints in filename.
If none were set, return an error message.
"""
def clear_all_breaks(self) -> str | None:
"""
Delete all existing breakpoints.
If none were set, return an error message.
"""
def get_bpbynumber(self, arg: SupportsInt) -> Breakpoint:
"""
Return a breakpoint by its index in Breakpoint.bybpnumber.
For invalid arg values or if the breakpoint doesn't exist,
raise a ValueError.
"""
def get_break(self, filename: str, lineno: int) -> bool:
"""
Return True if there is a breakpoint for filename:lineno.
"""
def get_breaks(self, filename: str, lineno: int) -> list[Breakpoint]:
"""
Return all breakpoints for filename:lineno.
If no breakpoints are set, return an empty list.
"""
def get_file_breaks(self, filename: str) -> list[Breakpoint]:
"""
Return all lines with breakpoints for filename.
If no breakpoints are set, return an empty list.
"""
def get_all_breaks(self) -> list[Breakpoint]:
"""
Return all breakpoints that are set.
"""
def get_stack(self, f: FrameType | None, t: TracebackType | None) -> tuple[list[tuple[FrameType, int]], int]:
"""
Return a list of (frame, lineno) in a stack trace and a size.
List starts with original calling frame, if there is one.
Size may be number of frames above or below f.
"""
def format_stack_entry(self, frame_lineno: tuple[FrameType, int], lprefix: str = ": ") -> str:
"""
Return a string with information about a stack entry.
The stack entry frame_lineno is a (frame, lineno) tuple. The
return string contains the canonical filename, the function name
or '<lambda>', the input arguments, the return value, and the
line of code (if it exists).
"""
def run(self, cmd: str | CodeType, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None) -> None:
"""
Debug a statement executed via the exec() function.
globals defaults to __main__.dict; locals defaults to globals.
"""
def runeval(self, expr: str, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None) -> None:
"""
Debug an expression executed via the eval() function.
globals defaults to __main__.dict; locals defaults to globals.
"""
def runctx(self, cmd: str | CodeType, globals: dict[str, Any] | None, locals: Mapping[str, Any] | None) -> None:
"""
For backwards-compatibility. Defers to run().
"""
def runcall(self, func: Callable[_P, _T], /, *args: _P.args, **kwds: _P.kwargs) -> _T | None:
"""
Debug a single function call.
Return the result of the function call.
"""
if sys.version_info >= (3, 14):
def start_trace(self) -> None: ...
def stop_trace(self) -> None: ...
def disable_current_event(self) -> None: ...
def restart_events(self) -> None: ...
def disable_current_event(self) -> None:
"""
Disable the current event.
"""
def restart_events(self) -> None:
"""
Restart all events.
"""
class Breakpoint:
"""
Breakpoint class.
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the (file, line) tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
When creating a breakpoint, its associated filename should be
in canonical form. If funcname is defined, a breakpoint hit will be
counted when the first line of that function is executed. A
conditional breakpoint always counts a hit.
"""
next: int
bplist: dict[tuple[str, int], list[Breakpoint]]
bpbynumber: list[Breakpoint | None]
@ -119,12 +416,67 @@ class Breakpoint:
@staticmethod
def clearBreakpoints() -> None: ...
def deleteMe(self) -> None: ...
def enable(self) -> None: ...
def disable(self) -> None: ...
def bpprint(self, out: IO[str] | None = None) -> None: ...
def bpformat(self) -> str: ...
def deleteMe(self) -> None:
"""
Delete the breakpoint from the list associated to a file:line.
def checkfuncname(b: Breakpoint, frame: FrameType) -> bool: ...
def effective(file: str, line: int, frame: FrameType) -> tuple[Breakpoint, bool] | tuple[None, None]: ...
def set_trace() -> None: ...
If it is the last breakpoint in that position, it also deletes
the entry for the file:line.
"""
def enable(self) -> None:
"""
Mark the breakpoint as enabled.
"""
def disable(self) -> None:
"""
Mark the breakpoint as disabled.
"""
def bpprint(self, out: IO[str] | None = None) -> None:
"""
Print the output of bpformat().
The optional out argument directs where the output is sent
and defaults to standard output.
"""
def bpformat(self) -> str:
"""
Return a string with information about the breakpoint.
The information includes the breakpoint number, temporary
status, file:line position, break condition, number of times to
ignore, and number of times hit.
"""
def checkfuncname(b: Breakpoint, frame: FrameType) -> bool:
"""
Return True if break should happen here.
Whether a break should happen depends on the way that b (the breakpoint)
was set. If it was set via line number, check if b.line is the same as
the one in the frame. If it was set via function name, check if this is
the right function and if it is on the first executable line.
"""
def effective(file: str, line: int, frame: FrameType) -> tuple[Breakpoint, bool] | tuple[None, None]:
"""
Return (active breakpoint, delete temporary flag) or (None, None) as
breakpoint to act upon.
The "active breakpoint" is the first entry in bplist[line, file] (which
must exist) that is enabled, for which checkfuncname is True, and that
has neither a False condition nor a positive ignore count. The flag,
meaning that a temporary breakpoint should be deleted, is False only
when the condiion cannot be evaluated (in which case, ignore count is
ignored).
If no such entry exists, then (None, None) is returned.
"""
def set_trace() -> None:
"""
Start debugging with a Bdb instance from the caller's frame.
"""

View file

@ -1,3 +1,7 @@
"""
Conversion between binary data and ASCII
"""
import sys
from _typeshed import ReadableBuffer
from typing_extensions import TypeAlias
@ -6,31 +10,132 @@ from typing_extensions import TypeAlias
# or ASCII-only strings.
_AsciiBuffer: TypeAlias = str | ReadableBuffer
def a2b_uu(data: _AsciiBuffer, /) -> bytes: ...
def b2a_uu(data: ReadableBuffer, /, *, backtick: bool = False) -> bytes: ...
def a2b_uu(data: _AsciiBuffer, /) -> bytes:
"""
Decode a line of uuencoded data.
"""
def b2a_uu(data: ReadableBuffer, /, *, backtick: bool = False) -> bytes:
"""
Uuencode line of data.
"""
if sys.version_info >= (3, 11):
def a2b_base64(data: _AsciiBuffer, /, *, strict_mode: bool = False) -> bytes: ...
def a2b_base64(data: _AsciiBuffer, /, *, strict_mode: bool = False) -> bytes:
"""
Decode a line of base64 data.
strict_mode
When set to True, bytes that are not part of the base64 standard are not allowed.
The same applies to excess data after padding (= / ==).
"""
else:
def a2b_base64(data: _AsciiBuffer, /) -> bytes: ...
def a2b_base64(data: _AsciiBuffer, /) -> bytes:
"""
Decode a line of base64 data.
"""
def b2a_base64(data: ReadableBuffer, /, *, newline: bool = True) -> bytes: ...
def a2b_qp(data: _AsciiBuffer, header: bool = False) -> bytes: ...
def b2a_qp(data: ReadableBuffer, quotetabs: bool = False, istext: bool = True, header: bool = False) -> bytes: ...
def b2a_base64(data: ReadableBuffer, /, *, newline: bool = True) -> bytes:
"""
Base64-code line of data.
"""
def a2b_qp(data: _AsciiBuffer, header: bool = False) -> bytes:
"""
Decode a string of qp-encoded data.
"""
def b2a_qp(data: ReadableBuffer, quotetabs: bool = False, istext: bool = True, header: bool = False) -> bytes:
"""
Encode a string using quoted-printable encoding.
On encoding, when istext is set, newlines are not encoded, and white
space at end of lines is. When istext is not set, \\r and \\n (CR/LF)
are both encoded. When quotetabs is set, space and tabs are encoded.
"""
if sys.version_info < (3, 11):
def a2b_hqx(data: _AsciiBuffer, /) -> bytes: ...
def rledecode_hqx(data: ReadableBuffer, /) -> bytes: ...
def rlecode_hqx(data: ReadableBuffer, /) -> bytes: ...
def b2a_hqx(data: ReadableBuffer, /) -> bytes: ...
def a2b_hqx(data: _AsciiBuffer, /) -> bytes:
"""
Decode .hqx coding.
"""
def crc_hqx(data: ReadableBuffer, crc: int, /) -> int: ...
def crc32(data: ReadableBuffer, crc: int = 0, /) -> int: ...
def b2a_hex(data: ReadableBuffer, sep: str | bytes = ..., bytes_per_sep: int = ...) -> bytes: ...
def hexlify(data: ReadableBuffer, sep: str | bytes = ..., bytes_per_sep: int = ...) -> bytes: ...
def a2b_hex(hexstr: _AsciiBuffer, /) -> bytes: ...
def unhexlify(hexstr: _AsciiBuffer, /) -> bytes: ...
def rledecode_hqx(data: ReadableBuffer, /) -> bytes:
"""
Decode hexbin RLE-coded string.
"""
def rlecode_hqx(data: ReadableBuffer, /) -> bytes:
"""
Binhex RLE-code binary data.
"""
def b2a_hqx(data: ReadableBuffer, /) -> bytes:
"""
Encode .hqx data.
"""
def crc_hqx(data: ReadableBuffer, crc: int, /) -> int:
"""
Compute CRC-CCITT incrementally.
"""
def crc32(data: ReadableBuffer, crc: int = 0, /) -> int:
"""
Compute CRC-32 incrementally.
"""
def b2a_hex(data: ReadableBuffer, sep: str | bytes = ..., bytes_per_sep: int = ...) -> bytes:
"""
Hexadecimal representation of binary data.
sep
An optional single character or byte to separate hex bytes.
bytes_per_sep
How many bytes between separators. Positive values count from the
right, negative values count from the left.
The return value is a bytes object. This function is also
available as "hexlify()".
Example:
>>> binascii.b2a_hex(b'\\xb9\\x01\\xef')
b'b901ef'
>>> binascii.hexlify(b'\\xb9\\x01\\xef', ':')
b'b9:01:ef'
>>> binascii.b2a_hex(b'\\xb9\\x01\\xef', b'_', 2)
b'b9_01ef'
"""
def hexlify(data: ReadableBuffer, sep: str | bytes = ..., bytes_per_sep: int = ...) -> bytes:
"""
Hexadecimal representation of binary data.
sep
An optional single character or byte to separate hex bytes.
bytes_per_sep
How many bytes between separators. Positive values count from the
right, negative values count from the left.
The return value is a bytes object. This function is also
available as "b2a_hex()".
"""
def a2b_hex(hexstr: _AsciiBuffer, /) -> bytes:
"""
Binary data of hexadecimal representation.
hexstr must contain an even number of hex digits (upper or lower case).
This function is also available as "unhexlify()".
"""
def unhexlify(hexstr: _AsciiBuffer, /) -> bytes:
"""
Binary data of hexadecimal representation.
hexstr must contain an even number of hex digits (upper or lower case).
"""
class Error(ValueError): ...
class Incomplete(Exception): ...

View file

@ -1,3 +1,11 @@
"""
Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
from _typeshed import SizedBuffer
from typing import IO, Any, Final
from typing_extensions import TypeAlias
@ -33,7 +41,10 @@ class BinHex:
def write_rsrc(self, data: SizedBuffer) -> None: ...
def close(self) -> None: ...
def binhex(inp: str, out: str) -> None: ...
def binhex(inp: str, out: str) -> None:
"""
binhex(infilename, outfilename): create binhex-encoded copy of a file
"""
class HexBin:
def __init__(self, ifp: _FileHandleUnion) -> None: ...
@ -42,4 +53,7 @@ class HexBin:
def read_rsrc(self, *n: int) -> bytes: ...
def close(self) -> None: ...
def hexbin(inp: str, out: str) -> None: ...
def hexbin(inp: str, out: str) -> None:
"""
hexbin(infilename, outfilename) - Decode binhexed file
"""

Some files were not shown because too many files have changed in this diff Show more