| Current File : /home/mmdealscpanel/yummmdeals.com/asyncio.tar |
sslproto.py 0000644 00000065112 15032721122 0007003 0 ustar 00 import collections
import warnings
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import constants
from . import protocols
from . import transports
from .log import logger
def _create_transport_context(server_side, server_hostname):
if server_side:
raise ValueError('Server side SSL needs a valid SSLContext')
# Client side may pass ssl=True to use a default
# context; in that case the sslcontext passed is None.
# The default is secure for client connections.
# Python 3.4+: use up-to-date strong settings.
sslcontext = ssl.create_default_context()
if not server_hostname:
sslcontext.check_hostname = False
return sslcontext
# States of an _SSLPipe.
_UNWRAPPED = "UNWRAPPED"
_DO_HANDSHAKE = "DO_HANDSHAKE"
_WRAPPED = "WRAPPED"
_SHUTDOWN = "SHUTDOWN"
class _SSLPipe(object):
"""An SSL "Pipe".
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
through memory buffers. It can be used to implement a security layer for an
existing connection where you don't have access to the connection's file
descriptor, or for some reason you don't want to use it.
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
data is passed through untransformed. In wrapped mode, application level
data is encrypted to SSL record level data and vice versa. The SSL record
level is the lowest level in the SSL protocol suite and is what travels
as-is over the wire.
An SslPipe initially is in "unwrapped" mode. To start SSL, call
do_handshake(). To shutdown SSL again, call unwrap().
"""
max_size = 256 * 1024 # Buffer size passed to read()
def __init__(self, context, server_side, server_hostname=None):
"""
The *context* argument specifies the ssl.SSLContext to use.
The *server_side* argument indicates whether this is a server side or
client side transport.
The optional *server_hostname* argument can be used to specify the
hostname you are connecting to. You may only specify this parameter if
the _ssl module supports Server Name Indication (SNI).
"""
self._context = context
self._server_side = server_side
self._server_hostname = server_hostname
self._state = _UNWRAPPED
self._incoming = ssl.MemoryBIO()
self._outgoing = ssl.MemoryBIO()
self._sslobj = None
self._need_ssldata = False
self._handshake_cb = None
self._shutdown_cb = None
@property
def context(self):
"""The SSL context passed to the constructor."""
return self._context
@property
def ssl_object(self):
"""The internal ssl.SSLObject instance.
Return None if the pipe is not wrapped.
"""
return self._sslobj
@property
def need_ssldata(self):
"""Whether more record level data is needed to complete a handshake
that is currently in progress."""
return self._need_ssldata
@property
def wrapped(self):
"""
Whether a security layer is currently in effect.
Return False during handshake.
"""
return self._state == _WRAPPED
def do_handshake(self, callback=None):
"""Start the SSL handshake.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the handshake is complete. The callback will be
called with None if successful, else an exception instance.
"""
if self._state != _UNWRAPPED:
raise RuntimeError('handshake in progress or completed')
self._sslobj = self._context.wrap_bio(
self._incoming, self._outgoing,
server_side=self._server_side,
server_hostname=self._server_hostname)
self._state = _DO_HANDSHAKE
self._handshake_cb = callback
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
assert len(appdata) == 0
return ssldata
def shutdown(self, callback=None):
"""Start the SSL shutdown sequence.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the shutdown is complete. The callback will be
called without arguments.
"""
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOWN
self._shutdown_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
return ssldata
def feed_eof(self):
"""Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
def feed_ssldata(self, data, only_handshake=False):
"""Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling shutdown().
"""
if self._state == _UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
if data:
appdata = [data]
else:
appdata = []
return ([], appdata)
self._need_ssldata = False
if data:
self._incoming.write(data)
ssldata = []
appdata = []
try:
if self._state == _DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = _WRAPPED
if self._handshake_cb:
self._handshake_cb(None)
if only_handshake:
return (ssldata, appdata)
# Handshake done: execute the wrapped block
if self._state == _WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.max_size)
appdata.append(chunk)
if not chunk: # close_notify
break
elif self._state == _SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = _UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
elif self._state == _UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, ssl.CertificateError) as exc:
exc_errno = getattr(exc, 'errno', None)
if exc_errno not in (
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
if self._state == _DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(exc)
raise
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata)
def feed_appdata(self, data, offset=0):
"""Feed plaintext data into the pipe.
Return an (ssldata, offset) tuple. The ssldata element is a list of
buffers containing record level data that needs to be sent to the
remote SSL instance. The offset is the number of plaintext bytes that
were processed, which may be less than the length of data.
NOTE: In case of short writes, this call MUST be retried with the SAME
buffer passed into the *data* argument (i.e. the id() must be the
same). This is an OpenSSL requirement. A further particularity is that
a short write will always have offset == 0, because the _ssl module
does not enable partial writes. And even though the offset is zero,
there will still be encrypted data in ssldata.
"""
assert 0 <= offset <= len(data)
if self._state == _UNWRAPPED:
# pass through data in unwrapped mode
if offset < len(data):
ssldata = [data[offset:]]
else:
ssldata = []
return (ssldata, len(data))
ssldata = []
view = memoryview(data)
while True:
self._need_ssldata = False
try:
if offset < len(view):
offset += self._sslobj.write(view[offset:])
except ssl.SSLError as exc:
# It is not allowed to call write() after unwrap() until the
# close_notify is acknowledged. We return the condition to the
# caller as a short write.
exc_errno = getattr(exc, 'errno', None)
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
exc_errno = exc.errno = ssl.SSL_ERROR_WANT_READ
if exc_errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
raise
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
# See if there's any record level data back for us.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
if offset == len(view) or self._need_ssldata:
break
return (ssldata, offset)
class _SSLProtocolTransport(transports._FlowControlMixin,
transports.Transport):
_sendfile_compatible = constants._SendfileMode.FALLBACK
def __init__(self, loop, ssl_protocol):
self._loop = loop
# SSLProtocol instance
self._ssl_protocol = ssl_protocol
self._closed = False
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._ssl_protocol._get_extra_info(name, default)
def set_protocol(self, protocol):
self._ssl_protocol._set_app_protocol(protocol)
def get_protocol(self):
return self._ssl_protocol._app_protocol
def is_closing(self):
return self._closed
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
self._closed = True
self._ssl_protocol._start_shutdown()
def __del__(self, _warn=warnings.warn):
if not self._closed:
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
self.close()
def is_reading(self):
tr = self._ssl_protocol._transport
if tr is None:
raise RuntimeError('SSL transport has not been initialized yet')
return tr.is_reading()
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
self._ssl_protocol._transport.pause_reading()
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
self._ssl_protocol._transport.resume_reading()
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
return self._ssl_protocol._transport.get_write_buffer_size()
@property
def _protocol_paused(self):
# Required for sendfile fallback pause_writing/resume_writing logic
return self._ssl_protocol._transport._protocol_paused
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError(f"data: expecting a bytes-like instance, "
f"got {type(data).__name__}")
if not data:
return
self._ssl_protocol._write_appdata(data)
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
return False
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
self._ssl_protocol._abort()
self._closed = True
class SSLProtocol(protocols.Protocol):
"""SSL protocol.
Implementation of SSL on top of a socket using incoming and outgoing
buffers which are ssl.MemoryBIO objects.
"""
def __init__(self, loop, app_protocol, sslcontext, waiter,
server_side=False, server_hostname=None,
call_connection_made=True,
ssl_handshake_timeout=None):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if ssl_handshake_timeout is None:
ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT
elif ssl_handshake_timeout <= 0:
raise ValueError(
f"ssl_handshake_timeout should be a positive number, "
f"got {ssl_handshake_timeout}")
if not sslcontext:
sslcontext = _create_transport_context(
server_side, server_hostname)
self._server_side = server_side
if server_hostname and not server_side:
self._server_hostname = server_hostname
else:
self._server_hostname = None
self._sslcontext = sslcontext
# SSL-specific extra info. More info are set when the handshake
# completes.
self._extra = dict(sslcontext=sslcontext)
# App data write buffering
self._write_backlog = collections.deque()
self._write_buffer_size = 0
self._waiter = waiter
self._loop = loop
self._set_app_protocol(app_protocol)
self._app_transport = _SSLProtocolTransport(self._loop, self)
# _SSLPipe instance (None until the connection is made)
self._sslpipe = None
self._session_established = False
self._in_handshake = False
self._in_shutdown = False
# transport, ex: SelectorSocketTransport
self._transport = None
self._call_connection_made = call_connection_made
self._ssl_handshake_timeout = ssl_handshake_timeout
def _set_app_protocol(self, app_protocol):
self._app_protocol = app_protocol
self._app_protocol_is_buffer = \
isinstance(app_protocol, protocols.BufferedProtocol)
def _wakeup_waiter(self, exc=None):
if self._waiter is None:
return
if not self._waiter.cancelled():
if exc is not None:
self._waiter.set_exception(exc)
else:
self._waiter.set_result(None)
self._waiter = None
def connection_made(self, transport):
"""Called when the low-level connection is made.
Start the SSL handshake.
"""
self._transport = transport
self._sslpipe = _SSLPipe(self._sslcontext,
self._server_side,
self._server_hostname)
self._start_handshake()
def connection_lost(self, exc):
"""Called when the low-level connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
if self._session_established:
self._session_established = False
self._loop.call_soon(self._app_protocol.connection_lost, exc)
else:
# Most likely an exception occurred while in SSL handshake.
# Just mark the app transport as closed so that its __del__
# doesn't complain.
if self._app_transport is not None:
self._app_transport._closed = True
self._transport = None
self._app_transport = None
if getattr(self, '_handshake_timeout_handle', None):
self._handshake_timeout_handle.cancel()
self._wakeup_waiter(exc)
self._app_protocol = None
self._sslpipe = None
def pause_writing(self):
"""Called when the low-level transport's buffer goes over
the high-water mark.
"""
self._app_protocol.pause_writing()
def resume_writing(self):
"""Called when the low-level transport's buffer drains below
the low-water mark.
"""
self._app_protocol.resume_writing()
def data_received(self, data):
"""Called when some SSL data is received.
The argument is a bytes object.
"""
if self._sslpipe is None:
# transport closing, sslpipe is destroyed
return
try:
ssldata, appdata = self._sslpipe.feed_ssldata(data)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as e:
self._fatal_error(e, 'SSL error in data received')
return
for chunk in ssldata:
self._transport.write(chunk)
for chunk in appdata:
if chunk:
try:
if self._app_protocol_is_buffer:
protocols._feed_data_to_buffered_proto(
self._app_protocol, chunk)
else:
self._app_protocol.data_received(chunk)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as ex:
self._fatal_error(
ex, 'application protocol failed to receive SSL data')
return
else:
self._start_shutdown()
break
def eof_received(self):
"""Called when the other end of the low-level stream
is half-closed.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
self._wakeup_waiter(ConnectionResetError)
if not self._in_handshake:
keep_open = self._app_protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self._transport.close()
def _get_extra_info(self, name, default=None):
if name in self._extra:
return self._extra[name]
elif self._transport is not None:
return self._transport.get_extra_info(name, default)
else:
return default
def _start_shutdown(self):
if self._in_shutdown:
return
if self._in_handshake:
self._abort()
else:
self._in_shutdown = True
self._write_appdata(b'')
def _write_appdata(self, data):
self._write_backlog.append((data, 0))
self._write_buffer_size += len(data)
self._process_write_backlog()
def _start_handshake(self):
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
self._handshake_start_time = self._loop.time()
else:
self._handshake_start_time = None
self._in_handshake = True
# (b'', 1) is a special value in _process_write_backlog() to do
# the SSL handshake
self._write_backlog.append((b'', 1))
self._handshake_timeout_handle = \
self._loop.call_later(self._ssl_handshake_timeout,
self._check_handshake_timeout)
self._process_write_backlog()
def _check_handshake_timeout(self):
if self._in_handshake is True:
msg = (
f"SSL handshake is taking longer than "
f"{self._ssl_handshake_timeout} seconds: "
f"aborting the connection"
)
self._fatal_error(ConnectionAbortedError(msg))
def _on_handshake_complete(self, handshake_exc):
self._in_handshake = False
self._handshake_timeout_handle.cancel()
sslobj = self._sslpipe.ssl_object
try:
if handshake_exc is not None:
raise handshake_exc
peercert = sslobj.getpeercert()
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
if isinstance(exc, ssl.CertificateError):
msg = 'SSL handshake failed on verifying the certificate'
else:
msg = 'SSL handshake failed'
self._fatal_error(exc, msg)
return
if self._loop.get_debug():
dt = self._loop.time() - self._handshake_start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=sslobj.cipher(),
compression=sslobj.compression(),
ssl_object=sslobj,
)
if self._call_connection_made:
self._app_protocol.connection_made(self._app_transport)
self._wakeup_waiter()
self._session_established = True
# In case transport.write() was already called. Don't call
# immediately _process_write_backlog(), but schedule it:
# _on_handshake_complete() can be called indirectly from
# _process_write_backlog(), and _process_write_backlog() is not
# reentrant.
self._loop.call_soon(self._process_write_backlog)
def _process_write_backlog(self):
# Try to make progress on the write backlog.
if self._transport is None or self._sslpipe is None:
return
try:
for i in range(len(self._write_backlog)):
data, offset = self._write_backlog[0]
if data:
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
elif offset:
ssldata = self._sslpipe.do_handshake(
self._on_handshake_complete)
offset = 1
else:
ssldata = self._sslpipe.shutdown(self._finalize)
offset = 1
for chunk in ssldata:
self._transport.write(chunk)
if offset < len(data):
self._write_backlog[0] = (data, offset)
# A short write means that a write is blocked on a read
# We need to enable reading if it is paused!
assert self._sslpipe.need_ssldata
if self._transport._paused:
self._transport.resume_reading()
break
# An entire chunk from the backlog was processed. We can
# delete it and reduce the outstanding buffer size.
del self._write_backlog[0]
self._write_buffer_size -= len(data)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
if self._in_handshake:
# Exceptions will be re-raised in _on_handshake_complete.
self._on_handshake_complete(exc)
else:
self._fatal_error(exc, 'Fatal error on SSL transport')
def _fatal_error(self, exc, message='Fatal error on transport'):
if isinstance(exc, OSError):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self._transport,
'protocol': self,
})
if self._transport:
self._transport._force_close(exc)
def _finalize(self):
self._sslpipe = None
if self._transport is not None:
self._transport.close()
def _abort(self):
try:
if self._transport is not None:
self._transport.abort()
finally:
self._finalize()
__init__.py 0000644 00000002313 15032721122 0006647 0 ustar 00 """The asyncio package, tracking PEP 3156."""
# flake8: noqa
import sys
# This relies on each of the submodules having an __all__ variable.
from .base_events import *
from .coroutines import *
from .events import *
from .exceptions import *
from .futures import *
from .locks import *
from .protocols import *
from .runners import *
from .queues import *
from .streams import *
from .subprocess import *
from .tasks import *
from .transports import *
# Exposed for _asynciomodule.c to implement now deprecated
# Task.all_tasks() method. This function will be removed in 3.9.
from .tasks import _all_tasks_compat # NoQA
__all__ = (base_events.__all__ +
coroutines.__all__ +
events.__all__ +
exceptions.__all__ +
futures.__all__ +
locks.__all__ +
protocols.__all__ +
runners.__all__ +
queues.__all__ +
streams.__all__ +
subprocess.__all__ +
tasks.__all__ +
transports.__all__)
if sys.platform == 'win32': # pragma: no cover
from .windows_events import *
__all__ += windows_events.__all__
else:
from .unix_events import * # pragma: no cover
__all__ += unix_events.__all__
transports.py 0000644 00000024366 15032721122 0007343 0 ustar 00 """Abstract Transport class."""
__all__ = (
'BaseTransport', 'ReadTransport', 'WriteTransport',
'Transport', 'DatagramTransport', 'SubprocessTransport',
)
class BaseTransport:
"""Base class for transports."""
__slots__ = ('_extra',)
def __init__(self, extra=None):
if extra is None:
extra = {}
self._extra = extra
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._extra.get(name, default)
def is_closing(self):
"""Return True if the transport is closing or closed."""
raise NotImplementedError
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
def set_protocol(self, protocol):
"""Set a new protocol."""
raise NotImplementedError
def get_protocol(self):
"""Return the current protocol."""
raise NotImplementedError
class ReadTransport(BaseTransport):
"""Interface for read-only transports."""
__slots__ = ()
def is_reading(self):
"""Return True if the transport is receiving."""
raise NotImplementedError
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
raise NotImplementedError
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
raise NotImplementedError
class WriteTransport(BaseTransport):
"""Interface for write-only transports."""
__slots__ = ()
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
raise NotImplementedError
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
raise NotImplementedError
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
raise NotImplementedError
def writelines(self, list_of_data):
"""Write a list (or any iterable) of data bytes to the transport.
The default implementation concatenates the arguments and
calls write() on the result.
"""
data = b''.join(list_of_data)
self.write(data)
def write_eof(self):
"""Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
raise NotImplementedError
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class Transport(ReadTransport, WriteTransport):
"""Interface representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
__slots__ = ()
class DatagramTransport(BaseTransport):
"""Interface for datagram (UDP) transports."""
__slots__ = ()
def sendto(self, data, addr=None):
"""Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
"""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class SubprocessTransport(BaseTransport):
__slots__ = ()
def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError
def get_returncode(self):
"""Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
raise NotImplementedError
def get_pipe_transport(self, fd):
"""Get transport for pipe with number fd."""
raise NotImplementedError
def send_signal(self, signal):
"""Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
raise NotImplementedError
def terminate(self):
"""Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
raise NotImplementedError
def kill(self):
"""Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
raise NotImplementedError
class _FlowControlMixin(Transport):
"""All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super().__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
__slots__ = ('_loop', '_protocol_paused', '_high_water', '_low_water')
def __init__(self, extra=None, loop=None):
super().__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
self._set_write_buffer_limits()
def _maybe_pause_protocol(self):
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if not self._protocol_paused:
self._protocol_paused = True
try:
self._protocol.pause_writing()
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._loop.call_exception_handler({
'message': 'protocol.pause_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def _maybe_resume_protocol(self):
if (self._protocol_paused and
self.get_write_buffer_size() <= self._low_water):
self._protocol_paused = False
try:
self._protocol.resume_writing()
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._loop.call_exception_handler({
'message': 'protocol.resume_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def get_write_buffer_limits(self):
return (self._low_water, self._high_water)
def _set_write_buffer_limits(self, high=None, low=None):
if high is None:
if low is None:
high = 64 * 1024
else:
high = 4 * low
if low is None:
low = high // 4
if not high >= low >= 0:
raise ValueError(
f'high ({high!r}) must be >= low ({low!r}) must be >= 0')
self._high_water = high
self._low_water = low
def set_write_buffer_limits(self, high=None, low=None):
self._set_write_buffer_limits(high=high, low=low)
self._maybe_pause_protocol()
def get_write_buffer_size(self):
raise NotImplementedError
compat.py 0000644 00000001037 15032721122 0006375 0 ustar 00 """Compatibility helpers for the different Python versions."""
import sys
PY34 = sys.version_info >= (3, 4)
PY35 = sys.version_info >= (3, 5)
PY352 = sys.version_info >= (3, 5, 2)
def flatten_list_bytes(list_of_data):
"""Concatenate a sequence of bytes-like objects."""
if not PY34:
# On Python 3.3 and older, bytes.join() doesn't handle
# memoryview.
list_of_data = (
bytes(data) if isinstance(data, memoryview) else data
for data in list_of_data)
return b''.join(list_of_data)
base_tasks.py 0000644 00000004643 15032721122 0007237 0 ustar 00 import linecache
import traceback
from . import base_futures
from . import coroutines
def _task_repr_info(task):
info = base_futures._future_repr_info(task)
if task._must_cancel:
# replace status
info[0] = 'cancelling'
info.insert(1, 'name=%r' % task.get_name())
coro = coroutines._format_coroutine(task._coro)
info.insert(2, f'coro=<{coro}>')
if task._fut_waiter is not None:
info.insert(3, f'wait_for={task._fut_waiter!r}')
return info
def _task_get_stack(task, limit):
frames = []
if hasattr(task._coro, 'cr_frame'):
# case 1: 'async def' coroutines
f = task._coro.cr_frame
elif hasattr(task._coro, 'gi_frame'):
# case 2: legacy coroutines
f = task._coro.gi_frame
elif hasattr(task._coro, 'ag_frame'):
# case 3: async generators
f = task._coro.ag_frame
else:
# case 4: unknown objects
f = None
if f is not None:
while f is not None:
if limit is not None:
if limit <= 0:
break
limit -= 1
frames.append(f)
f = f.f_back
frames.reverse()
elif task._exception is not None:
tb = task._exception.__traceback__
while tb is not None:
if limit is not None:
if limit <= 0:
break
limit -= 1
frames.append(tb.tb_frame)
tb = tb.tb_next
return frames
def _task_print_stack(task, limit, file):
extracted_list = []
checked = set()
for f in task.get_stack(limit=limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
if filename not in checked:
checked.add(filename)
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
extracted_list.append((filename, lineno, name, line))
exc = task._exception
if not extracted_list:
print(f'No stack for {task!r}', file=file)
elif exc is not None:
print(f'Traceback for {task!r} (most recent call last):', file=file)
else:
print(f'Stack for {task!r} (most recent call last):', file=file)
traceback.print_list(extracted_list, file=file)
if exc is not None:
for line in traceback.format_exception_only(exc.__class__, exc):
print(line, file=file, end='')
events.py 0000644 00000063064 15032721122 0006426 0 ustar 00 """Event loop and event loop policy."""
__all__ = (
'AbstractEventLoopPolicy',
'AbstractEventLoop', 'AbstractServer',
'Handle', 'TimerHandle',
'get_event_loop_policy', 'set_event_loop_policy',
'get_event_loop', 'set_event_loop', 'new_event_loop',
'get_child_watcher', 'set_child_watcher',
'_set_running_loop', 'get_running_loop',
'_get_running_loop',
)
import contextvars
import os
import socket
import subprocess
import sys
import threading
from . import format_helpers
from . import exceptions
class Handle:
"""Object returned by callback registration methods."""
__slots__ = ('_callback', '_args', '_cancelled', '_loop',
'_source_traceback', '_repr', '__weakref__',
'_context')
def __init__(self, callback, args, loop, context=None):
if context is None:
context = contextvars.copy_context()
self._context = context
self._loop = loop
self._callback = callback
self._args = args
self._cancelled = False
self._repr = None
if self._loop.get_debug():
self._source_traceback = format_helpers.extract_stack(
sys._getframe(1))
else:
self._source_traceback = None
def _repr_info(self):
info = [self.__class__.__name__]
if self._cancelled:
info.append('cancelled')
if self._callback is not None:
info.append(format_helpers._format_callback_source(
self._callback, self._args))
if self._source_traceback:
frame = self._source_traceback[-1]
info.append(f'created at {frame[0]}:{frame[1]}')
return info
def __repr__(self):
if self._repr is not None:
return self._repr
info = self._repr_info()
return '<{}>'.format(' '.join(info))
def cancel(self):
if not self._cancelled:
self._cancelled = True
if self._loop.get_debug():
# Keep a representation in debug mode to keep callback and
# parameters. For example, to log the warning
# "Executing <Handle...> took 2.5 second"
self._repr = repr(self)
self._callback = None
self._args = None
def cancelled(self):
return self._cancelled
def _run(self):
try:
self._context.run(self._callback, *self._args)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
cb = format_helpers._format_callback_source(
self._callback, self._args)
msg = f'Exception in callback {cb}'
context = {
'message': msg,
'exception': exc,
'handle': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
self = None # Needed to break cycles when an exception occurs.
class TimerHandle(Handle):
"""Object returned by timed callback registration methods."""
__slots__ = ['_scheduled', '_when']
def __init__(self, when, callback, args, loop, context=None):
assert when is not None
super().__init__(callback, args, loop, context)
if self._source_traceback:
del self._source_traceback[-1]
self._when = when
self._scheduled = False
def _repr_info(self):
info = super()._repr_info()
pos = 2 if self._cancelled else 1
info.insert(pos, f'when={self._when}')
return info
def __hash__(self):
return hash(self._when)
def __lt__(self, other):
return self._when < other._when
def __le__(self, other):
if self._when < other._when:
return True
return self.__eq__(other)
def __gt__(self, other):
return self._when > other._when
def __ge__(self, other):
if self._when > other._when:
return True
return self.__eq__(other)
def __eq__(self, other):
if isinstance(other, TimerHandle):
return (self._when == other._when and
self._callback == other._callback and
self._args == other._args and
self._cancelled == other._cancelled)
return NotImplemented
def __ne__(self, other):
equal = self.__eq__(other)
return NotImplemented if equal is NotImplemented else not equal
def cancel(self):
if not self._cancelled:
self._loop._timer_handle_cancelled(self)
super().cancel()
def when(self):
"""Return a scheduled callback time.
The time is an absolute timestamp, using the same time
reference as loop.time().
"""
return self._when
class AbstractServer:
"""Abstract server returned by create_server()."""
def close(self):
"""Stop serving. This leaves existing connections open."""
raise NotImplementedError
def get_loop(self):
"""Get the event loop the Server object is attached to."""
raise NotImplementedError
def is_serving(self):
"""Return True if the server is accepting connections."""
raise NotImplementedError
async def start_serving(self):
"""Start accepting connections.
This method is idempotent, so it can be called when
the server is already being serving.
"""
raise NotImplementedError
async def serve_forever(self):
"""Start accepting connections until the coroutine is cancelled.
The server is closed when the coroutine is cancelled.
"""
raise NotImplementedError
async def wait_closed(self):
"""Coroutine to wait until service is closed."""
raise NotImplementedError
async def __aenter__(self):
return self
async def __aexit__(self, *exc):
self.close()
await self.wait_closed()
class AbstractEventLoop:
"""Abstract event loop."""
# Running and stopping the event loop.
def run_forever(self):
"""Run the event loop until stop() is called."""
raise NotImplementedError
def run_until_complete(self, future):
"""Run the event loop until a Future is done.
Return the Future's result, or raise its exception.
"""
raise NotImplementedError
def stop(self):
"""Stop the event loop as soon as reasonable.
Exactly how soon that is may depend on the implementation, but
no more I/O callbacks should be scheduled.
"""
raise NotImplementedError
def is_running(self):
"""Return whether the event loop is currently running."""
raise NotImplementedError
def is_closed(self):
"""Returns True if the event loop was closed."""
raise NotImplementedError
def close(self):
"""Close the loop.
The loop should not be running.
This is idempotent and irreversible.
No other methods should be called after this one.
"""
raise NotImplementedError
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
raise NotImplementedError
# Methods scheduling callbacks. All these return Handles.
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
raise NotImplementedError
def call_soon(self, callback, *args):
return self.call_later(0, callback, *args)
def call_later(self, delay, callback, *args):
raise NotImplementedError
def call_at(self, when, callback, *args):
raise NotImplementedError
def time(self):
raise NotImplementedError
def create_future(self):
raise NotImplementedError
# Method scheduling a coroutine object: create a task.
def create_task(self, coro, *, name=None):
raise NotImplementedError
# Methods for interacting with threads.
def call_soon_threadsafe(self, callback, *args):
raise NotImplementedError
def run_in_executor(self, executor, func, *args):
raise NotImplementedError
def set_default_executor(self, executor):
raise NotImplementedError
# Network I/O methods returning Futures.
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
raise NotImplementedError
async def getnameinfo(self, sockaddr, flags=0):
raise NotImplementedError
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0, proto=0,
flags=0, sock=None, local_addr=None,
server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
raise NotImplementedError
async def create_server(
self, protocol_factory, host=None, port=None,
*, family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE, sock=None, backlog=100,
ssl=None, reuse_address=None, reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""A coroutine which creates a TCP server bound to host and port.
The return value is a Server object which can be used to stop
the service.
If host is an empty string or None all interfaces are assumed
and a list of multiple sockets will be returned (most likely
one for IPv4 and another one for IPv6). The host parameter can also be
a sequence (e.g. list) of hosts to bind to.
family can be set to either AF_INET or AF_INET6 to force the
socket to use IPv4 or IPv6. If not set it will be determined
from host (defaults to AF_UNSPEC).
flags is a bitmask for getaddrinfo().
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
reuse_address tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to
expire. If not specified will automatically be set to True on
UNIX.
reuse_port tells the kernel to allow this endpoint to be bound to
the same port as other existing endpoints are bound to, so long as
they all set this flag when being created. This option is not
supported on Windows.
ssl_handshake_timeout is the time in seconds that an SSL server
will wait for completion of the SSL handshake before aborting the
connection. Default is 60s.
start_serving set to True (default) causes the created server
to start accepting connections immediately. When set to False,
the user should await Server.start_serving() or Server.serve_forever()
to make the server to start accepting connections.
"""
raise NotImplementedError
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file through a transport.
Return an amount of sent bytes.
"""
raise NotImplementedError
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade a transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
raise NotImplementedError
async def create_unix_connection(
self, protocol_factory, path=None, *,
ssl=None, sock=None,
server_hostname=None,
ssl_handshake_timeout=None):
raise NotImplementedError
async def create_unix_server(
self, protocol_factory, path=None, *,
sock=None, backlog=100, ssl=None,
ssl_handshake_timeout=None,
start_serving=True):
"""A coroutine which creates a UNIX Domain Socket server.
The return value is a Server object, which can be used to stop
the service.
path is a str, representing a file systsem path to bind the
server socket to.
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
ssl_handshake_timeout is the time in seconds that an SSL server
will wait for the SSL handshake to complete (defaults to 60s).
start_serving set to True (default) causes the created server
to start accepting connections immediately. When set to False,
the user should await Server.start_serving() or Server.serve_forever()
to make the server to start accepting connections.
"""
raise NotImplementedError
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=None, reuse_port=None,
allow_broadcast=None, sock=None):
"""A coroutine which creates a datagram endpoint.
This method will try to establish the endpoint in the background.
When successful, the coroutine returns a (transport, protocol) pair.
protocol_factory must be a callable returning a protocol instance.
socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on
host (or family if specified), socket type SOCK_DGRAM.
reuse_address tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to
expire. If not specified it will automatically be set to True on
UNIX.
reuse_port tells the kernel to allow this endpoint to be bound to
the same port as other existing endpoints are bound to, so long as
they all set this flag when being created. This option is not
supported on Windows and some UNIX's. If the
:py:data:`~socket.SO_REUSEPORT` constant is not defined then this
capability is unsupported.
allow_broadcast tells the kernel to allow this endpoint to send
messages to the broadcast address.
sock can optionally be specified in order to use a preexisting
socket object.
"""
raise NotImplementedError
# Pipes and subprocesses.
async def connect_read_pipe(self, protocol_factory, pipe):
"""Register read pipe in event loop. Set the pipe to non-blocking mode.
protocol_factory should instantiate object with Protocol interface.
pipe is a file-like object.
Return pair (transport, protocol), where transport supports the
ReadTransport interface."""
# The reason to accept file-like object instead of just file descriptor
# is: we need to own pipe and close it at transport finishing
# Can got complicated errors if pass f.fileno(),
# close fd in pipe transport then close f and vise versa.
raise NotImplementedError
async def connect_write_pipe(self, protocol_factory, pipe):
"""Register write pipe in event loop.
protocol_factory should instantiate object with BaseProtocol interface.
Pipe is file-like object already switched to nonblocking.
Return pair (transport, protocol), where transport support
WriteTransport interface."""
# The reason to accept file-like object instead of just file descriptor
# is: we need to own pipe and close it at transport finishing
# Can got complicated errors if pass f.fileno(),
# close fd in pipe transport then close f and vise versa.
raise NotImplementedError
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs):
raise NotImplementedError
async def subprocess_exec(self, protocol_factory, *args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs):
raise NotImplementedError
# Ready-based callback registration methods.
# The add_*() methods return None.
# The remove_*() methods return True if something was removed,
# False if there was nothing to delete.
def add_reader(self, fd, callback, *args):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
def add_writer(self, fd, callback, *args):
raise NotImplementedError
def remove_writer(self, fd):
raise NotImplementedError
# Completion based I/O methods returning Futures.
async def sock_recv(self, sock, nbytes):
raise NotImplementedError
async def sock_recv_into(self, sock, buf):
raise NotImplementedError
async def sock_sendall(self, sock, data):
raise NotImplementedError
async def sock_connect(self, sock, address):
raise NotImplementedError
async def sock_accept(self, sock):
raise NotImplementedError
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=None):
raise NotImplementedError
# Signal handling.
def add_signal_handler(self, sig, callback, *args):
raise NotImplementedError
def remove_signal_handler(self, sig):
raise NotImplementedError
# Task factory.
def set_task_factory(self, factory):
raise NotImplementedError
def get_task_factory(self):
raise NotImplementedError
# Error handlers.
def get_exception_handler(self):
raise NotImplementedError
def set_exception_handler(self, handler):
raise NotImplementedError
def default_exception_handler(self, context):
raise NotImplementedError
def call_exception_handler(self, context):
raise NotImplementedError
# Debug flag management.
def get_debug(self):
raise NotImplementedError
def set_debug(self, enabled):
raise NotImplementedError
class AbstractEventLoopPolicy:
"""Abstract policy for accessing the event loop."""
def get_event_loop(self):
"""Get the event loop for the current context.
Returns an event loop object implementing the BaseEventLoop interface,
or raises an exception in case no event loop has been set for the
current context and the current policy does not specify to create one.
It should never return None."""
raise NotImplementedError
def set_event_loop(self, loop):
"""Set the event loop for the current context to loop."""
raise NotImplementedError
def new_event_loop(self):
"""Create and return a new event loop object according to this
policy's rules. If there's need to set this loop as the event loop for
the current context, set_event_loop must be called explicitly."""
raise NotImplementedError
# Child processes handling (Unix only).
def get_child_watcher(self):
"Get the watcher for child processes."
raise NotImplementedError
def set_child_watcher(self, watcher):
"""Set the watcher for child processes."""
raise NotImplementedError
class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
"""Default policy implementation for accessing the event loop.
In this policy, each thread has its own event loop. However, we
only automatically create an event loop by default for the main
thread; other threads by default have no event loop.
Other policies may have different rules (e.g. a single global
event loop, or automatically creating an event loop per thread, or
using some other notion of context to which an event loop is
associated).
"""
_loop_factory = None
class _Local(threading.local):
_loop = None
_set_called = False
def __init__(self):
self._local = self._Local()
def get_event_loop(self):
"""Get the event loop for the current context.
Returns an instance of EventLoop or raises an exception.
"""
if (self._local._loop is None and
not self._local._set_called and
isinstance(threading.current_thread(), threading._MainThread)):
self.set_event_loop(self.new_event_loop())
if self._local._loop is None:
raise RuntimeError('There is no current event loop in thread %r.'
% threading.current_thread().name)
return self._local._loop
def set_event_loop(self, loop):
"""Set the event loop."""
self._local._set_called = True
assert loop is None or isinstance(loop, AbstractEventLoop)
self._local._loop = loop
def new_event_loop(self):
"""Create a new event loop.
You must call set_event_loop() to make this the current event
loop.
"""
return self._loop_factory()
# Event loop policy. The policy itself is always global, even if the
# policy's rules say that there is an event loop per thread (or other
# notion of context). The default policy is installed by the first
# call to get_event_loop_policy().
_event_loop_policy = None
# Lock for protecting the on-the-fly creation of the event loop policy.
_lock = threading.Lock()
# A TLS for the running event loop, used by _get_running_loop.
class _RunningLoop(threading.local):
loop_pid = (None, None)
_running_loop = _RunningLoop()
def get_running_loop():
"""Return the running event loop. Raise a RuntimeError if there is none.
This function is thread-specific.
"""
# NOTE: this function is implemented in C (see _asynciomodule.c)
loop = _get_running_loop()
if loop is None:
raise RuntimeError('no running event loop')
return loop
def _get_running_loop():
"""Return the running event loop or None.
This is a low-level function intended to be used by event loops.
This function is thread-specific.
"""
# NOTE: this function is implemented in C (see _asynciomodule.c)
running_loop, pid = _running_loop.loop_pid
if running_loop is not None and pid == os.getpid():
return running_loop
def _set_running_loop(loop):
"""Set the running event loop.
This is a low-level function intended to be used by event loops.
This function is thread-specific.
"""
# NOTE: this function is implemented in C (see _asynciomodule.c)
_running_loop.loop_pid = (loop, os.getpid())
def _init_event_loop_policy():
global _event_loop_policy
with _lock:
if _event_loop_policy is None: # pragma: no branch
from . import DefaultEventLoopPolicy
_event_loop_policy = DefaultEventLoopPolicy()
def get_event_loop_policy():
"""Get the current event loop policy."""
if _event_loop_policy is None:
_init_event_loop_policy()
return _event_loop_policy
def set_event_loop_policy(policy):
"""Set the current event loop policy.
If policy is None, the default policy is restored."""
global _event_loop_policy
assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
_event_loop_policy = policy
def get_event_loop():
"""Return an asyncio event loop.
When called from a coroutine or a callback (e.g. scheduled with call_soon
or similar API), this function will always return the running event loop.
If there is no running event loop set, the function will return
the result of `get_event_loop_policy().get_event_loop()` call.
"""
# NOTE: this function is implemented in C (see _asynciomodule.c)
current_loop = _get_running_loop()
if current_loop is not None:
return current_loop
return get_event_loop_policy().get_event_loop()
def set_event_loop(loop):
"""Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
get_event_loop_policy().set_event_loop(loop)
def new_event_loop():
"""Equivalent to calling get_event_loop_policy().new_event_loop()."""
return get_event_loop_policy().new_event_loop()
def get_child_watcher():
"""Equivalent to calling get_event_loop_policy().get_child_watcher()."""
return get_event_loop_policy().get_child_watcher()
def set_child_watcher(watcher):
"""Equivalent to calling
get_event_loop_policy().set_child_watcher(watcher)."""
return get_event_loop_policy().set_child_watcher(watcher)
# Alias pure-Python implementations for testing purposes.
_py__get_running_loop = _get_running_loop
_py__set_running_loop = _set_running_loop
_py_get_running_loop = get_running_loop
_py_get_event_loop = get_event_loop
try:
# get_event_loop() is one of the most frequently called
# functions in asyncio. Pure Python implementation is
# about 4 times slower than C-accelerated.
from _asyncio import (_get_running_loop, _set_running_loop,
get_running_loop, get_event_loop)
except ImportError:
pass
else:
# Alias C implementations for testing purposes.
_c__get_running_loop = _get_running_loop
_c__set_running_loop = _set_running_loop
_c_get_running_loop = get_running_loop
_c_get_event_loop = get_event_loop
subprocess.py 0000644 00000017604 15032721122 0007311 0 ustar 00 __all__ = 'create_subprocess_exec', 'create_subprocess_shell'
import subprocess
import warnings
from . import events
from . import protocols
from . import streams
from . import tasks
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
self._process_exited = False
self._pipe_fds = []
self._stdin_closed = self._loop.create_future()
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append(f'stdin={self.stdin!r}')
if self.stdout is not None:
info.append(f'stdout={self.stdout!r}')
if self.stderr is not None:
info.append(f'stderr={self.stderr!r}')
return '<{}>'.format(' '.join(info))
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
self._pipe_fds.append(1)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
self._pipe_fds.append(2)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
if exc is None:
self._stdin_closed.set_result(None)
else:
self._stdin_closed.set_exception(exc)
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
if fd in self._pipe_fds:
self._pipe_fds.remove(fd)
self._maybe_close_transport()
def process_exited(self):
self._process_exited = True
self._maybe_close_transport()
def _maybe_close_transport(self):
if len(self._pipe_fds) == 0 and self._process_exited:
self._transport.close()
self._transport = None
def _get_close_waiter(self, stream):
if stream is self.stdin:
return self._stdin_closed
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return f'<{self.__class__.__name__} {self.pid}>'
@property
def returncode(self):
return self._transport.get_returncode()
async def wait(self):
"""Wait until the process exit and return the process return code."""
return await self._transport._wait()
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
async def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug(
'%r communicate: feed stdin (%s bytes)', self, len(input))
try:
await self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
async def _noop(self):
return None
async def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = await stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
async def communicate(self, input=None):
if input is not None:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr,
loop=self._loop)
await self.wait()
return (stdout, stderr)
async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
loop=None, limit=streams._DEFAULT_LIMIT,
**kwds):
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8 "
"and scheduled for removal in Python 3.10.",
DeprecationWarning,
stacklevel=2
)
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = await loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8 "
"and scheduled for removal in Python 3.10.",
DeprecationWarning,
stacklevel=2
)
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = await loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
base_futures.py 0000644 00000005016 15032721122 0007602 0 ustar 00 __all__ = ()
import reprlib
from _thread import get_ident
from . import format_helpers
# States for Future.
_PENDING = 'PENDING'
_CANCELLED = 'CANCELLED'
_FINISHED = 'FINISHED'
def isfuture(obj):
"""Check for a Future.
This returns True when obj is a Future instance or is advertising
itself as duck-type compatible by setting _asyncio_future_blocking.
See comment in Future for more details.
"""
return (hasattr(obj.__class__, '_asyncio_future_blocking') and
obj._asyncio_future_blocking is not None)
def _format_callbacks(cb):
"""helper function for Future.__repr__"""
size = len(cb)
if not size:
cb = ''
def format_cb(callback):
return format_helpers._format_callback_source(callback, ())
if size == 1:
cb = format_cb(cb[0][0])
elif size == 2:
cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0]))
elif size > 2:
cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]),
size - 2,
format_cb(cb[-1][0]))
return f'cb=[{cb}]'
# bpo-42183: _repr_running is needed for repr protection
# when a Future or Task result contains itself directly or indirectly.
# The logic is borrowed from @reprlib.recursive_repr decorator.
# Unfortunately, the direct decorator usage is impossible because of
# AttributeError: '_asyncio.Task' object has no attribute '__module__' error.
#
# After fixing this thing we can return to the decorator based approach.
_repr_running = set()
def _future_repr_info(future):
# (Future) -> str
"""helper function for Future.__repr__"""
info = [future._state.lower()]
if future._state == _FINISHED:
if future._exception is not None:
info.append(f'exception={future._exception!r}')
else:
key = id(future), get_ident()
if key in _repr_running:
result = '...'
else:
_repr_running.add(key)
try:
# use reprlib to limit the length of the output, especially
# for very long strings
result = reprlib.repr(future._result)
finally:
_repr_running.discard(key)
info.append(f'result={result}')
if future._callbacks:
info.append(_format_callbacks(future._callbacks))
if future._source_traceback:
frame = future._source_traceback[-1]
info.append(f'created at {frame[0]}:{frame[1]}')
return info
proactor_events.py 0000644 00000076474 15032721122 0010350 0 ustar 00 """Event loop using a proactor and related classes.
A proactor is a "notify-on-completion" multiplexer. Currently a
proactor is only implemented on Windows with IOCP.
"""
__all__ = 'BaseProactorEventLoop',
import io
import os
import socket
import warnings
import signal
import threading
import collections
from . import base_events
from . import constants
from . import futures
from . import exceptions
from . import protocols
from . import sslproto
from . import transports
from . import trsock
from .log import logger
def _set_socket_extra(transport, sock):
transport._extra['socket'] = trsock.TransportSocket(sock)
try:
transport._extra['sockname'] = sock.getsockname()
except socket.error:
if transport._loop.get_debug():
logger.warning(
"getsockname() failed on %r", sock, exc_info=True)
if 'peername' not in transport._extra:
try:
transport._extra['peername'] = sock.getpeername()
except socket.error:
# UDP sockets may not have a peer name
transport._extra['peername'] = None
class _ProactorBasePipeTransport(transports._FlowControlMixin,
transports.BaseTransport):
"""Base class for pipe and socket transports."""
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(extra, loop)
self._set_extra(sock)
self._sock = sock
self.set_protocol(protocol)
self._server = server
self._buffer = None # None or bytearray.
self._read_fut = None
self._write_fut = None
self._pending_write = 0
self._conn_lost = 0
self._closing = False # Set when close() called.
self._eof_written = False
if self._server is not None:
self._server._attach()
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._sock is None:
info.append('closed')
elif self._closing:
info.append('closing')
if self._sock is not None:
info.append(f'fd={self._sock.fileno()}')
if self._read_fut is not None:
info.append(f'read={self._read_fut!r}')
if self._write_fut is not None:
info.append(f'write={self._write_fut!r}')
if self._buffer:
info.append(f'write_bufsize={len(self._buffer)}')
if self._eof_written:
info.append('EOF written')
return '<{}>'.format(' '.join(info))
def _set_extra(self, sock):
self._extra['pipe'] = sock
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closing
def close(self):
if self._closing:
return
self._closing = True
self._conn_lost += 1
if not self._buffer and self._write_fut is None:
self._loop.call_soon(self._call_connection_lost, None)
if self._read_fut is not None:
self._read_fut.cancel()
self._read_fut = None
def __del__(self, _warn=warnings.warn):
if self._sock is not None:
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
self.close()
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
try:
if isinstance(exc, OSError):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
finally:
self._force_close(exc)
def _force_close(self, exc):
if self._empty_waiter is not None and not self._empty_waiter.done():
if exc is None:
self._empty_waiter.set_result(None)
else:
self._empty_waiter.set_exception(exc)
if self._closing:
return
self._closing = True
self._conn_lost += 1
if self._write_fut:
self._write_fut.cancel()
self._write_fut = None
if self._read_fut:
self._read_fut.cancel()
self._read_fut = None
self._pending_write = 0
self._buffer = None
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
# XXX If there is a pending overlapped read on the other
# end then it may fail with ERROR_NETNAME_DELETED if we
# just close our end. First calling shutdown() seems to
# cure it, but maybe using DisconnectEx() would be better.
if hasattr(self._sock, 'shutdown'):
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
self._sock = None
server = self._server
if server is not None:
server._detach()
self._server = None
def get_write_buffer_size(self):
size = self._pending_write
if self._buffer is not None:
size += len(self._buffer)
return size
class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
transports.ReadTransport):
"""Transport for read pipes."""
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
self._pending_data = None
self._paused = True
super().__init__(loop, sock, protocol, waiter, extra, server)
self._loop.call_soon(self._loop_reading)
self._paused = False
def is_reading(self):
return not self._paused and not self._closing
def pause_reading(self):
if self._closing or self._paused:
return
self._paused = True
# bpo-33694: Don't cancel self._read_fut because cancelling an
# overlapped WSASend() loss silently data with the current proactor
# implementation.
#
# If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend()
# completed (even if HasOverlappedIoCompleted() returns 0), but
# Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND
# error. Once the overlapped is ignored, the IOCP loop will ignores the
# completion I/O event and so not read the result of the overlapped
# WSARecv().
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if self._closing or not self._paused:
return
self._paused = False
if self._read_fut is None:
self._loop.call_soon(self._loop_reading, None)
data = self._pending_data
self._pending_data = None
if data is not None:
# Call the protocol methode after calling _loop_reading(),
# since the protocol can decide to pause reading again.
self._loop.call_soon(self._data_received, data)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _eof_received(self):
if self._loop.get_debug():
logger.debug("%r received EOF", self)
try:
keep_open = self._protocol.eof_received()
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._fatal_error(
exc, 'Fatal error: protocol.eof_received() call failed.')
return
if not keep_open:
self.close()
def _data_received(self, data):
if self._paused:
# Don't call any protocol method while reading is paused.
# The protocol will be called on resume_reading().
assert self._pending_data is None
self._pending_data = data
return
if not data:
self._eof_received()
return
if isinstance(self._protocol, protocols.BufferedProtocol):
try:
protocols._feed_data_to_buffered_proto(self._protocol, data)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._fatal_error(exc,
'Fatal error: protocol.buffer_updated() '
'call failed.')
return
else:
self._protocol.data_received(data)
def _loop_reading(self, fut=None):
data = None
try:
if fut is not None:
assert self._read_fut is fut or (self._read_fut is None and
self._closing)
self._read_fut = None
if fut.done():
# deliver data later in "finally" clause
data = fut.result()
else:
# the future will be replaced by next proactor.recv call
fut.cancel()
if self._closing:
# since close() has been called we ignore any read data
data = None
return
if data == b'':
# we got end-of-file so no need to reschedule a new read
return
# bpo-33694: buffer_updated() has currently no fast path because of
# a data loss issue caused by overlapped WSASend() cancellation.
if not self._paused:
# reschedule a new read
self._read_fut = self._loop._proactor.recv(self._sock, 32768)
except ConnectionAbortedError as exc:
if not self._closing:
self._fatal_error(exc, 'Fatal read error on pipe transport')
elif self._loop.get_debug():
logger.debug("Read error on pipe transport while closing",
exc_info=True)
except ConnectionResetError as exc:
self._force_close(exc)
except OSError as exc:
self._fatal_error(exc, 'Fatal read error on pipe transport')
except exceptions.CancelledError:
if not self._closing:
raise
else:
if not self._paused:
self._read_fut.add_done_callback(self._loop_reading)
finally:
if data is not None:
self._data_received(data)
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
transports.WriteTransport):
"""Transport for write pipes."""
_start_tls_compatible = True
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._empty_waiter = None
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError(
f"data argument must be a bytes-like object, "
f"not {type(data).__name__}")
if self._eof_written:
raise RuntimeError('write_eof() already called')
if self._empty_waiter is not None:
raise RuntimeError('unable to write; sendfile is in progress')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
# Observable states:
# 1. IDLE: _write_fut and _buffer both None
# 2. WRITING: _write_fut set; _buffer None
# 3. BACKED UP: _write_fut set; _buffer a bytearray
# We always copy the data, so the caller can't modify it
# while we're still waiting for the I/O to happen.
if self._write_fut is None: # IDLE -> WRITING
assert self._buffer is None
# Pass a copy, except if it's already immutable.
self._loop_writing(data=bytes(data))
elif not self._buffer: # WRITING -> BACKED UP
# Make a mutable copy which we can extend.
self._buffer = bytearray(data)
self._maybe_pause_protocol()
else: # BACKED UP
# Append to buffer (also copies).
self._buffer.extend(data)
self._maybe_pause_protocol()
def _loop_writing(self, f=None, data=None):
try:
if f is not None and self._write_fut is None and self._closing:
# XXX most likely self._force_close() has been called, and
# it has set self._write_fut to None.
return
assert f is self._write_fut
self._write_fut = None
self._pending_write = 0
if f:
f.result()
if data is None:
data = self._buffer
self._buffer = None
if not data:
if self._closing:
self._loop.call_soon(self._call_connection_lost, None)
if self._eof_written:
self._sock.shutdown(socket.SHUT_WR)
# Now that we've reduced the buffer size, tell the
# protocol to resume writing if it was paused. Note that
# we do this last since the callback is called immediately
# and it may add more data to the buffer (even causing the
# protocol to be paused again).
self._maybe_resume_protocol()
else:
self._write_fut = self._loop._proactor.send(self._sock, data)
if not self._write_fut.done():
assert self._pending_write == 0
self._pending_write = len(data)
self._write_fut.add_done_callback(self._loop_writing)
self._maybe_pause_protocol()
else:
self._write_fut.add_done_callback(self._loop_writing)
if self._empty_waiter is not None and self._write_fut is None:
self._empty_waiter.set_result(None)
except ConnectionResetError as exc:
self._force_close(exc)
except OSError as exc:
self._fatal_error(exc, 'Fatal write error on pipe transport')
def can_write_eof(self):
return True
def write_eof(self):
self.close()
def abort(self):
self._force_close(None)
def _make_empty_waiter(self):
if self._empty_waiter is not None:
raise RuntimeError("Empty waiter is already set")
self._empty_waiter = self._loop.create_future()
if self._write_fut is None:
self._empty_waiter.set_result(None)
return self._empty_waiter
def _reset_empty_waiter(self):
self._empty_waiter = None
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._read_fut = self._loop._proactor.recv(self._sock, 16)
self._read_fut.add_done_callback(self._pipe_closed)
def _pipe_closed(self, fut):
if fut.cancelled():
# the transport has been closed
return
assert fut.result() == b''
if self._closing:
assert self._read_fut is None
return
assert fut is self._read_fut, (fut, self._read_fut)
self._read_fut = None
if self._write_fut is not None:
self._force_close(BrokenPipeError())
else:
self.close()
class _ProactorDatagramTransport(_ProactorBasePipeTransport):
max_size = 256 * 1024
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
self._address = address
self._empty_waiter = None
# We don't need to call _protocol.connection_made() since our base
# constructor does it for us.
super().__init__(loop, sock, protocol, waiter=waiter, extra=extra)
# The base constructor sets _buffer = None, so we set it here
self._buffer = collections.deque()
self._loop.call_soon(self._loop_reading)
def _set_extra(self, sock):
_set_socket_extra(self, sock)
def get_write_buffer_size(self):
return sum(len(data) for data, _ in self._buffer)
def abort(self):
self._force_close(None)
def sendto(self, data, addr=None):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be bytes-like object (%r)',
type(data))
if not data:
return
if self._address is not None and addr not in (None, self._address):
raise ValueError(
f'Invalid address: must be None or {self._address}')
if self._conn_lost and self._address:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.sendto() raised exception.')
self._conn_lost += 1
return
# Ensure that what we buffer is immutable.
self._buffer.append((bytes(data), addr))
if self._write_fut is None:
# No current write operations are active, kick one off
self._loop_writing()
# else: A write operation is already kicked off
self._maybe_pause_protocol()
def _loop_writing(self, fut=None):
try:
if self._conn_lost:
return
assert fut is self._write_fut
self._write_fut = None
if fut:
# We are in a _loop_writing() done callback, get the result
fut.result()
if not self._buffer or (self._conn_lost and self._address):
# The connection has been closed
if self._closing:
self._loop.call_soon(self._call_connection_lost, None)
return
data, addr = self._buffer.popleft()
if self._address is not None:
self._write_fut = self._loop._proactor.send(self._sock,
data)
else:
self._write_fut = self._loop._proactor.sendto(self._sock,
data,
addr=addr)
except OSError as exc:
self._protocol.error_received(exc)
except Exception as exc:
self._fatal_error(exc, 'Fatal write error on datagram transport')
else:
self._write_fut.add_done_callback(self._loop_writing)
self._maybe_resume_protocol()
def _loop_reading(self, fut=None):
data = None
try:
if self._conn_lost:
return
assert self._read_fut is fut or (self._read_fut is None and
self._closing)
self._read_fut = None
if fut is not None:
res = fut.result()
if self._closing:
# since close() has been called we ignore any read data
data = None
return
if self._address is not None:
data, addr = res, self._address
else:
data, addr = res
if self._conn_lost:
return
if self._address is not None:
self._read_fut = self._loop._proactor.recv(self._sock,
self.max_size)
else:
self._read_fut = self._loop._proactor.recvfrom(self._sock,
self.max_size)
except OSError as exc:
self._protocol.error_received(exc)
except exceptions.CancelledError:
if not self._closing:
raise
else:
if self._read_fut is not None:
self._read_fut.add_done_callback(self._loop_reading)
finally:
if data:
self._protocol.datagram_received(data, addr)
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
_ProactorBaseWritePipeTransport,
transports.Transport):
"""Transport for duplex pipes."""
def can_write_eof(self):
return False
def write_eof(self):
raise NotImplementedError
class _ProactorSocketTransport(_ProactorReadPipeTransport,
_ProactorBaseWritePipeTransport,
transports.Transport):
"""Transport for connected sockets."""
_sendfile_compatible = constants._SendfileMode.TRY_NATIVE
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(loop, sock, protocol, waiter, extra, server)
base_events._set_nodelay(sock)
def _set_extra(self, sock):
_set_socket_extra(self, sock)
def can_write_eof(self):
return True
def write_eof(self):
if self._closing or self._eof_written:
return
self._eof_written = True
if self._write_fut is None:
self._sock.shutdown(socket.SHUT_WR)
class BaseProactorEventLoop(base_events.BaseEventLoop):
def __init__(self, proactor):
super().__init__()
logger.debug('Using proactor: %s', proactor.__class__.__name__)
self._proactor = proactor
self._selector = proactor # convenient alias
self._self_reading_future = None
self._accept_futures = {} # socket file descriptor => Future
proactor.set_loop(self)
self._make_self_pipe()
if threading.current_thread() is threading.main_thread():
# wakeup fd can only be installed to a file descriptor from the main thread
signal.set_wakeup_fd(self._csock.fileno())
def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None):
return _ProactorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None):
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
_ProactorSocketTransport(self, rawsock, ssl_protocol,
extra=extra, server=server)
return ssl_protocol._app_transport
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
return _ProactorDatagramTransport(self, sock, protocol, address,
waiter, extra)
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
return _ProactorDuplexPipeTransport(self,
sock, protocol, waiter, extra)
def _make_read_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
def _make_write_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
# We want connection_lost() to be called when other end closes
return _ProactorWritePipeTransport(self,
sock, protocol, waiter, extra)
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
if threading.current_thread() is threading.main_thread():
signal.set_wakeup_fd(-1)
# Call these methods before closing the event loop (before calling
# BaseEventLoop.close), because they can schedule callbacks with
# call_soon(), which is forbidden when the event loop is closed.
self._stop_accept_futures()
self._close_self_pipe()
self._proactor.close()
self._proactor = None
self._selector = None
# Close the event loop
super().close()
async def sock_recv(self, sock, n):
return await self._proactor.recv(sock, n)
async def sock_recv_into(self, sock, buf):
return await self._proactor.recv_into(sock, buf)
async def sock_sendall(self, sock, data):
return await self._proactor.send(sock, data)
async def sock_connect(self, sock, address):
return await self._proactor.connect(sock, address)
async def sock_accept(self, sock):
return await self._proactor.accept(sock)
async def _sock_sendfile_native(self, sock, file, offset, count):
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise exceptions.SendfileNotAvailableError("not a regular file")
try:
fsize = os.fstat(fileno).st_size
except OSError as err:
raise exceptions.SendfileNotAvailableError("not a regular file")
blocksize = count if count else fsize
if not blocksize:
return 0 # empty file
blocksize = min(blocksize, 0xffff_ffff)
end_pos = min(offset + count, fsize) if count else fsize
offset = min(offset, fsize)
total_sent = 0
try:
while True:
blocksize = min(end_pos - offset, blocksize)
if blocksize <= 0:
return total_sent
await self._proactor.sendfile(sock, file, offset, blocksize)
offset += blocksize
total_sent += blocksize
finally:
if total_sent > 0:
file.seek(offset)
async def _sendfile_native(self, transp, file, offset, count):
resume_reading = transp.is_reading()
transp.pause_reading()
await transp._make_empty_waiter()
try:
return await self.sock_sendfile(transp._sock, file, offset, count,
fallback=False)
finally:
transp._reset_empty_waiter()
if resume_reading:
transp.resume_reading()
def _close_self_pipe(self):
if self._self_reading_future is not None:
self._self_reading_future.cancel()
self._self_reading_future = None
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = socket.socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
def _loop_self_reading(self, f=None):
try:
if f is not None:
f.result() # may raise
if self._self_reading_future is not f:
# When we scheduled this Future, we assigned it to
# _self_reading_future. If it's not there now, something has
# tried to cancel the loop while this callback was still in the
# queue (see windows_events.ProactorEventLoop.run_forever). In
# that case stop here instead of continuing to schedule a new
# iteration.
return
f = self._proactor.recv(self._ssock, 4096)
except exceptions.CancelledError:
# _close_self_pipe() has been called, stop waiting for data
return
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self.call_exception_handler({
'message': 'Error on reading from the event loop self pipe',
'exception': exc,
'loop': self,
})
else:
self._self_reading_future = f
f.add_done_callback(self._loop_self_reading)
def _write_to_self(self):
# This may be called from a different thread, possibly after
# _close_self_pipe() has been called or even while it is
# running. Guard for self._csock being None or closed. When
# a socket is closed, send() raises OSError (with errno set to
# EBADF, but let's not rely on the exact error code).
csock = self._csock
if csock is None:
return
try:
csock.send(b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
"self-pipe socket",
exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None, backlog=100,
ssl_handshake_timeout=None):
def loop(f=None):
try:
if f is not None:
conn, addr = f.result()
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
protocol = protocol_factory()
if sslcontext is not None:
self._make_ssl_transport(
conn, protocol, sslcontext, server_side=True,
extra={'peername': addr}, server=server,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
self._make_socket_transport(
conn, protocol,
extra={'peername': addr}, server=server)
if self.is_closed():
return
f = self._proactor.accept(sock)
except OSError as exc:
if sock.fileno() != -1:
self.call_exception_handler({
'message': 'Accept failed on a socket',
'exception': exc,
'socket': trsock.TransportSocket(sock),
})
sock.close()
elif self._debug:
logger.debug("Accept failed on socket %r",
sock, exc_info=True)
except exceptions.CancelledError:
sock.close()
else:
self._accept_futures[sock.fileno()] = f
f.add_done_callback(loop)
self.call_soon(loop)
def _process_events(self, event_list):
# Events are processed in the IocpProactor._poll() method
pass
def _stop_accept_futures(self):
for future in self._accept_futures.values():
future.cancel()
self._accept_futures.clear()
def _stop_serving(self, sock):
future = self._accept_futures.pop(sock.fileno(), None)
if future:
future.cancel()
self._proactor._stop_serving(sock)
sock.close()
__pycache__/compat.cpython-36.opt-1.pyc 0000644 00000001336 15032721122 0013622 0 ustar 00 3
\ � @ s6 d Z ddlZejd kZejd
kZejdkZdd� ZdS )z8Compatibility helpers for the different Python versions.� N� � � � c C s t sdd� | D �} dj| �S )z-Concatenate a sequence of bytes-like objects.c s s$ | ]}t |t�rt|�n|V qd S )N)�
isinstance�
memoryview�bytes)�.0�data� r �&/usr/lib64/python3.6/asyncio/compat.py� <genexpr> s z%flatten_list_bytes.<locals>.<genexpr>� )�PY34�join)Zlist_of_datar r r �flatten_list_bytes
s r )r r )r r )r r r )�__doc__�sys�version_infor ZPY35ZPY352r r r r r �<module> s
__pycache__/base_events.cpython-36.pyc 0000644 00000114663 15032721122 0013706 0 ustar 00 3
\� � @ s� d Z ddlZddlZddlZddlZddlZddlZddlZddl Z ddl
Z
ddlZddlZddl
Z
ddlZddlZddlZddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dd lmZ dd
lmZ dgZdZd
ZeeefZ e!e d�Z"d)Z#dd� Z$dd� Z%dd� Z&dd� Z'dd� Z(dd� Z)de j*ddd�dd�Z+e!e d ��rRd!d"� Z,nd#d"� Z,d$d%� Z-G d&d'� d'ej.�Z/G d(d� dej0�Z1dS )*a� Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
� N� )�compat)�
coroutines)�events)�futures)�tasks)� coroutine)�logger�
BaseEventLoop�d g �?�AF_INET6� i c C s0 | j }tt|dd �tj�r$t|j�S t| �S d S )N�__self__)Z _callback�
isinstance�getattrr �Task�reprr �str)�handle�cb� r �+/usr/lib64/python3.6/asyncio/base_events.py�_format_handle? s
r c C s( | t jkrdS | t jkrdS t| �S d S )Nz<pipe>z<stdout>)�
subprocess�PIPE�STDOUTr )�fdr r r �_format_pipeH s
r c C sL t td�std��n4y| jtjtjd� W n tk
rF td��Y nX d S )N�SO_REUSEPORTz)reuse_port not supported by socket moduler zTreuse_port not supported by socket module, SO_REUSEPORT defined but not implemented.)�hasattr�socket�
ValueError�
setsockopt�
SOL_SOCKETr �OSError)�sockr r r �_set_reuseportQ s
r&