Update prebuilt Clang to r416183b from Android.

https://android.googlesource.com/platform/prebuilts/clang/host/
linux-x86/+/06a71ddac05c22edb2d10b590e1769b3f8619bef

clang 12.0.5 (based on r416183b) from build 7284624.

Change-Id: I277a316abcf47307562d8b748b84870f31a72866
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/__init__.py b/linux-x64/clang/python3/lib/python3.9/asyncio/__init__.py
new file mode 100644
index 0000000..eb84bfb
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/__init__.py
@@ -0,0 +1,47 @@
+"""The asyncio package, tracking PEP 3156."""
+
+# flake8: noqa
+
+import sys
+
+# This relies on each of the submodules having an __all__ variable.
+from .base_events import *
+from .coroutines import *
+from .events import *
+from .exceptions import *
+from .futures import *
+from .locks import *
+from .protocols import *
+from .runners import *
+from .queues import *
+from .streams import *
+from .subprocess import *
+from .tasks import *
+from .threads import *
+from .transports import *
+
+# Exposed for _asynciomodule.c to implement now deprecated
+# Task.all_tasks() method.  This function will be removed in 3.9.
+from .tasks import _all_tasks_compat  # NoQA
+
+__all__ = (base_events.__all__ +
+           coroutines.__all__ +
+           events.__all__ +
+           exceptions.__all__ +
+           futures.__all__ +
+           locks.__all__ +
+           protocols.__all__ +
+           runners.__all__ +
+           queues.__all__ +
+           streams.__all__ +
+           subprocess.__all__ +
+           tasks.__all__ +
+           threads.__all__ +
+           transports.__all__)
+
+if sys.platform == 'win32':  # pragma: no cover
+    from .windows_events import *
+    __all__ += windows_events.__all__
+else:
+    from .unix_events import *  # pragma: no cover
+    __all__ += unix_events.__all__
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/__main__.py b/linux-x64/clang/python3/lib/python3.9/asyncio/__main__.py
new file mode 100644
index 0000000..18bb87a
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/__main__.py
@@ -0,0 +1,125 @@
+import ast
+import asyncio
+import code
+import concurrent.futures
+import inspect
+import sys
+import threading
+import types
+import warnings
+
+from . import futures
+
+
+class AsyncIOInteractiveConsole(code.InteractiveConsole):
+
+    def __init__(self, locals, loop):
+        super().__init__(locals)
+        self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
+
+        self.loop = loop
+
+    def runcode(self, code):
+        future = concurrent.futures.Future()
+
+        def callback():
+            global repl_future
+            global repl_future_interrupted
+
+            repl_future = None
+            repl_future_interrupted = False
+
+            func = types.FunctionType(code, self.locals)
+            try:
+                coro = func()
+            except SystemExit:
+                raise
+            except KeyboardInterrupt as ex:
+                repl_future_interrupted = True
+                future.set_exception(ex)
+                return
+            except BaseException as ex:
+                future.set_exception(ex)
+                return
+
+            if not inspect.iscoroutine(coro):
+                future.set_result(coro)
+                return
+
+            try:
+                repl_future = self.loop.create_task(coro)
+                futures._chain_future(repl_future, future)
+            except BaseException as exc:
+                future.set_exception(exc)
+
+        loop.call_soon_threadsafe(callback)
+
+        try:
+            return future.result()
+        except SystemExit:
+            raise
+        except BaseException:
+            if repl_future_interrupted:
+                self.write("\nKeyboardInterrupt\n")
+            else:
+                self.showtraceback()
+
+
+class REPLThread(threading.Thread):
+
+    def run(self):
+        try:
+            banner = (
+                f'asyncio REPL {sys.version} on {sys.platform}\n'
+                f'Use "await" directly instead of "asyncio.run()".\n'
+                f'Type "help", "copyright", "credits" or "license" '
+                f'for more information.\n'
+                f'{getattr(sys, "ps1", ">>> ")}import asyncio'
+            )
+
+            console.interact(
+                banner=banner,
+                exitmsg='exiting asyncio REPL...')
+        finally:
+            warnings.filterwarnings(
+                'ignore',
+                message=r'^coroutine .* was never awaited$',
+                category=RuntimeWarning)
+
+            loop.call_soon_threadsafe(loop.stop)
+
+
+if __name__ == '__main__':
+    loop = asyncio.new_event_loop()
+    asyncio.set_event_loop(loop)
+
+    repl_locals = {'asyncio': asyncio}
+    for key in {'__name__', '__package__',
+                '__loader__', '__spec__',
+                '__builtins__', '__file__'}:
+        repl_locals[key] = locals()[key]
+
+    console = AsyncIOInteractiveConsole(repl_locals, loop)
+
+    repl_future = None
+    repl_future_interrupted = False
+
+    try:
+        import readline  # NoQA
+    except ImportError:
+        pass
+
+    repl_thread = REPLThread()
+    repl_thread.daemon = True
+    repl_thread.start()
+
+    while True:
+        try:
+            loop.run_forever()
+        except KeyboardInterrupt:
+            if repl_future and not repl_future.done():
+                repl_future.cancel()
+                repl_future_interrupted = True
+            continue
+        else:
+            break
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/base_events.py b/linux-x64/clang/python3/lib/python3.9/asyncio/base_events.py
new file mode 100644
index 0000000..b2d446a
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/base_events.py
@@ -0,0 +1,1915 @@
+"""Base implementation of event loop.
+
+The event loop can be broken up into a multiplexer (the part
+responsible for notifying us of I/O events) and the event loop proper,
+which wraps a multiplexer with functionality for scheduling callbacks,
+immediately or at a given time in the future.
+
+Whenever a public API takes a callback, subsequent positional
+arguments will be passed to the callback if/when it is called.  This
+avoids the proliferation of trivial lambdas implementing closures.
+Keyword arguments for the callback are not supported; this is a
+conscious design decision, leaving the door open for keyword arguments
+to modify the meaning of the API call itself.
+"""
+
+import collections
+import collections.abc
+import concurrent.futures
+import functools
+import heapq
+import itertools
+import os
+import socket
+import stat
+import subprocess
+import threading
+import time
+import traceback
+import sys
+import warnings
+import weakref
+
+try:
+    import ssl
+except ImportError:  # pragma: no cover
+    ssl = None
+
+from . import constants
+from . import coroutines
+from . import events
+from . import exceptions
+from . import futures
+from . import protocols
+from . import sslproto
+from . import staggered
+from . import tasks
+from . import transports
+from . import trsock
+from .log import logger
+
+
+__all__ = 'BaseEventLoop',
+
+
+# Minimum number of _scheduled timer handles before cleanup of
+# cancelled handles is performed.
+_MIN_SCHEDULED_TIMER_HANDLES = 100
+
+# Minimum fraction of _scheduled timer handles that are cancelled
+# before cleanup of cancelled handles is performed.
+_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
+
+
+_HAS_IPv6 = hasattr(socket, 'AF_INET6')
+
+# Maximum timeout passed to select to avoid OS limitations
+MAXIMUM_SELECT_TIMEOUT = 24 * 3600
+
+# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
+# *reuse_address* parameter
+_unset = object()
+
+
+def _format_handle(handle):
+    cb = handle._callback
+    if isinstance(getattr(cb, '__self__', None), tasks.Task):
+        # format the task
+        return repr(cb.__self__)
+    else:
+        return str(handle)
+
+
+def _format_pipe(fd):
+    if fd == subprocess.PIPE:
+        return '<pipe>'
+    elif fd == subprocess.STDOUT:
+        return '<stdout>'
+    else:
+        return repr(fd)
+
+
+def _set_reuseport(sock):
+    if not hasattr(socket, 'SO_REUSEPORT'):
+        raise ValueError('reuse_port not supported by socket module')
+    else:
+        try:
+            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+        except OSError:
+            raise ValueError('reuse_port not supported by socket module, '
+                             'SO_REUSEPORT defined but not implemented.')
+
+
+def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
+    # Try to skip getaddrinfo if "host" is already an IP. Users might have
+    # handled name resolution in their own code and pass in resolved IPs.
+    if not hasattr(socket, 'inet_pton'):
+        return
+
+    if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
+            host is None:
+        return None
+
+    if type == socket.SOCK_STREAM:
+        proto = socket.IPPROTO_TCP
+    elif type == socket.SOCK_DGRAM:
+        proto = socket.IPPROTO_UDP
+    else:
+        return None
+
+    if port is None:
+        port = 0
+    elif isinstance(port, bytes) and port == b'':
+        port = 0
+    elif isinstance(port, str) and port == '':
+        port = 0
+    else:
+        # If port's a service name like "http", don't skip getaddrinfo.
+        try:
+            port = int(port)
+        except (TypeError, ValueError):
+            return None
+
+    if family == socket.AF_UNSPEC:
+        afs = [socket.AF_INET]
+        if _HAS_IPv6:
+            afs.append(socket.AF_INET6)
+    else:
+        afs = [family]
+
+    if isinstance(host, bytes):
+        host = host.decode('idna')
+    if '%' in host:
+        # Linux's inet_pton doesn't accept an IPv6 zone index after host,
+        # like '::1%lo0'.
+        return None
+
+    for af in afs:
+        try:
+            socket.inet_pton(af, host)
+            # The host has already been resolved.
+            if _HAS_IPv6 and af == socket.AF_INET6:
+                return af, type, proto, '', (host, port, flowinfo, scopeid)
+            else:
+                return af, type, proto, '', (host, port)
+        except OSError:
+            pass
+
+    # "host" is not an IP address.
+    return None
+
+
+def _interleave_addrinfos(addrinfos, first_address_family_count=1):
+    """Interleave list of addrinfo tuples by family."""
+    # Group addresses by family
+    addrinfos_by_family = collections.OrderedDict()
+    for addr in addrinfos:
+        family = addr[0]
+        if family not in addrinfos_by_family:
+            addrinfos_by_family[family] = []
+        addrinfos_by_family[family].append(addr)
+    addrinfos_lists = list(addrinfos_by_family.values())
+
+    reordered = []
+    if first_address_family_count > 1:
+        reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
+        del addrinfos_lists[0][:first_address_family_count - 1]
+    reordered.extend(
+        a for a in itertools.chain.from_iterable(
+            itertools.zip_longest(*addrinfos_lists)
+        ) if a is not None)
+    return reordered
+
+
+def _run_until_complete_cb(fut):
+    if not fut.cancelled():
+        exc = fut.exception()
+        if isinstance(exc, (SystemExit, KeyboardInterrupt)):
+            # Issue #22429: run_forever() already finished, no need to
+            # stop it.
+            return
+    futures._get_loop(fut).stop()
+
+
+if hasattr(socket, 'TCP_NODELAY'):
+    def _set_nodelay(sock):
+        if (sock.family in {socket.AF_INET, socket.AF_INET6} and
+                sock.type == socket.SOCK_STREAM and
+                sock.proto == socket.IPPROTO_TCP):
+            sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+else:
+    def _set_nodelay(sock):
+        pass
+
+
+class _SendfileFallbackProtocol(protocols.Protocol):
+    def __init__(self, transp):
+        if not isinstance(transp, transports._FlowControlMixin):
+            raise TypeError("transport should be _FlowControlMixin instance")
+        self._transport = transp
+        self._proto = transp.get_protocol()
+        self._should_resume_reading = transp.is_reading()
+        self._should_resume_writing = transp._protocol_paused
+        transp.pause_reading()
+        transp.set_protocol(self)
+        if self._should_resume_writing:
+            self._write_ready_fut = self._transport._loop.create_future()
+        else:
+            self._write_ready_fut = None
+
+    async def drain(self):
+        if self._transport.is_closing():
+            raise ConnectionError("Connection closed by peer")
+        fut = self._write_ready_fut
+        if fut is None:
+            return
+        await fut
+
+    def connection_made(self, transport):
+        raise RuntimeError("Invalid state: "
+                           "connection should have been established already.")
+
+    def connection_lost(self, exc):
+        if self._write_ready_fut is not None:
+            # Never happens if peer disconnects after sending the whole content
+            # Thus disconnection is always an exception from user perspective
+            if exc is None:
+                self._write_ready_fut.set_exception(
+                    ConnectionError("Connection is closed by peer"))
+            else:
+                self._write_ready_fut.set_exception(exc)
+        self._proto.connection_lost(exc)
+
+    def pause_writing(self):
+        if self._write_ready_fut is not None:
+            return
+        self._write_ready_fut = self._transport._loop.create_future()
+
+    def resume_writing(self):
+        if self._write_ready_fut is None:
+            return
+        self._write_ready_fut.set_result(False)
+        self._write_ready_fut = None
+
+    def data_received(self, data):
+        raise RuntimeError("Invalid state: reading should be paused")
+
+    def eof_received(self):
+        raise RuntimeError("Invalid state: reading should be paused")
+
+    async def restore(self):
+        self._transport.set_protocol(self._proto)
+        if self._should_resume_reading:
+            self._transport.resume_reading()
+        if self._write_ready_fut is not None:
+            # Cancel the future.
+            # Basically it has no effect because protocol is switched back,
+            # no code should wait for it anymore.
+            self._write_ready_fut.cancel()
+        if self._should_resume_writing:
+            self._proto.resume_writing()
+
+
+class Server(events.AbstractServer):
+
+    def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
+                 ssl_handshake_timeout):
+        self._loop = loop
+        self._sockets = sockets
+        self._active_count = 0
+        self._waiters = []
+        self._protocol_factory = protocol_factory
+        self._backlog = backlog
+        self._ssl_context = ssl_context
+        self._ssl_handshake_timeout = ssl_handshake_timeout
+        self._serving = False
+        self._serving_forever_fut = None
+
+    def __repr__(self):
+        return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
+
+    def _attach(self):
+        assert self._sockets is not None
+        self._active_count += 1
+
+    def _detach(self):
+        assert self._active_count > 0
+        self._active_count -= 1
+        if self._active_count == 0 and self._sockets is None:
+            self._wakeup()
+
+    def _wakeup(self):
+        waiters = self._waiters
+        self._waiters = None
+        for waiter in waiters:
+            if not waiter.done():
+                waiter.set_result(waiter)
+
+    def _start_serving(self):
+        if self._serving:
+            return
+        self._serving = True
+        for sock in self._sockets:
+            sock.listen(self._backlog)
+            self._loop._start_serving(
+                self._protocol_factory, sock, self._ssl_context,
+                self, self._backlog, self._ssl_handshake_timeout)
+
+    def get_loop(self):
+        return self._loop
+
+    def is_serving(self):
+        return self._serving
+
+    @property
+    def sockets(self):
+        if self._sockets is None:
+            return ()
+        return tuple(trsock.TransportSocket(s) for s in self._sockets)
+
+    def close(self):
+        sockets = self._sockets
+        if sockets is None:
+            return
+        self._sockets = None
+
+        for sock in sockets:
+            self._loop._stop_serving(sock)
+
+        self._serving = False
+
+        if (self._serving_forever_fut is not None and
+                not self._serving_forever_fut.done()):
+            self._serving_forever_fut.cancel()
+            self._serving_forever_fut = None
+
+        if self._active_count == 0:
+            self._wakeup()
+
+    async def start_serving(self):
+        self._start_serving()
+        # Skip one loop iteration so that all 'loop.add_reader'
+        # go through.
+        await tasks.sleep(0, loop=self._loop)
+
+    async def serve_forever(self):
+        if self._serving_forever_fut is not None:
+            raise RuntimeError(
+                f'server {self!r} is already being awaited on serve_forever()')
+        if self._sockets is None:
+            raise RuntimeError(f'server {self!r} is closed')
+
+        self._start_serving()
+        self._serving_forever_fut = self._loop.create_future()
+
+        try:
+            await self._serving_forever_fut
+        except exceptions.CancelledError:
+            try:
+                self.close()
+                await self.wait_closed()
+            finally:
+                raise
+        finally:
+            self._serving_forever_fut = None
+
+    async def wait_closed(self):
+        if self._sockets is None or self._waiters is None:
+            return
+        waiter = self._loop.create_future()
+        self._waiters.append(waiter)
+        await waiter
+
+
+class BaseEventLoop(events.AbstractEventLoop):
+
+    def __init__(self):
+        self._timer_cancelled_count = 0
+        self._closed = False
+        self._stopping = False
+        self._ready = collections.deque()
+        self._scheduled = []
+        self._default_executor = None
+        self._internal_fds = 0
+        # Identifier of the thread running the event loop, or None if the
+        # event loop is not running
+        self._thread_id = None
+        self._clock_resolution = time.get_clock_info('monotonic').resolution
+        self._exception_handler = None
+        self.set_debug(coroutines._is_debug_mode())
+        # In debug mode, if the execution of a callback or a step of a task
+        # exceed this duration in seconds, the slow callback/task is logged.
+        self.slow_callback_duration = 0.1
+        self._current_handle = None
+        self._task_factory = None
+        self._coroutine_origin_tracking_enabled = False
+        self._coroutine_origin_tracking_saved_depth = None
+
+        # A weak set of all asynchronous generators that are
+        # being iterated by the loop.
+        self._asyncgens = weakref.WeakSet()
+        # Set to True when `loop.shutdown_asyncgens` is called.
+        self._asyncgens_shutdown_called = False
+        # Set to True when `loop.shutdown_default_executor` is called.
+        self._executor_shutdown_called = False
+
+    def __repr__(self):
+        return (
+            f'<{self.__class__.__name__} running={self.is_running()} '
+            f'closed={self.is_closed()} debug={self.get_debug()}>'
+        )
+
+    def create_future(self):
+        """Create a Future object attached to the loop."""
+        return futures.Future(loop=self)
+
+    def create_task(self, coro, *, name=None):
+        """Schedule a coroutine object.
+
+        Return a task object.
+        """
+        self._check_closed()
+        if self._task_factory is None:
+            task = tasks.Task(coro, loop=self, name=name)
+            if task._source_traceback:
+                del task._source_traceback[-1]
+        else:
+            task = self._task_factory(self, coro)
+            tasks._set_task_name(task, name)
+
+        return task
+
+    def set_task_factory(self, factory):
+        """Set a task factory that will be used by loop.create_task().
+
+        If factory is None the default task factory will be set.
+
+        If factory is a callable, it should have a signature matching
+        '(loop, coro)', where 'loop' will be a reference to the active
+        event loop, 'coro' will be a coroutine object.  The callable
+        must return a Future.
+        """
+        if factory is not None and not callable(factory):
+            raise TypeError('task factory must be a callable or None')
+        self._task_factory = factory
+
+    def get_task_factory(self):
+        """Return a task factory, or None if the default one is in use."""
+        return self._task_factory
+
+    def _make_socket_transport(self, sock, protocol, waiter=None, *,
+                               extra=None, server=None):
+        """Create socket transport."""
+        raise NotImplementedError
+
+    def _make_ssl_transport(
+            self, rawsock, protocol, sslcontext, waiter=None,
+            *, server_side=False, server_hostname=None,
+            extra=None, server=None,
+            ssl_handshake_timeout=None,
+            call_connection_made=True):
+        """Create SSL transport."""
+        raise NotImplementedError
+
+    def _make_datagram_transport(self, sock, protocol,
+                                 address=None, waiter=None, extra=None):
+        """Create datagram transport."""
+        raise NotImplementedError
+
+    def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
+                                  extra=None):
+        """Create read pipe transport."""
+        raise NotImplementedError
+
+    def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
+                                   extra=None):
+        """Create write pipe transport."""
+        raise NotImplementedError
+
+    async def _make_subprocess_transport(self, protocol, args, shell,
+                                         stdin, stdout, stderr, bufsize,
+                                         extra=None, **kwargs):
+        """Create subprocess transport."""
+        raise NotImplementedError
+
+    def _write_to_self(self):
+        """Write a byte to self-pipe, to wake up the event loop.
+
+        This may be called from a different thread.
+
+        The subclass is responsible for implementing the self-pipe.
+        """
+        raise NotImplementedError
+
+    def _process_events(self, event_list):
+        """Process selector events."""
+        raise NotImplementedError
+
+    def _check_closed(self):
+        if self._closed:
+            raise RuntimeError('Event loop is closed')
+
+    def _check_default_executor(self):
+        if self._executor_shutdown_called:
+            raise RuntimeError('Executor shutdown has been called')
+
+    def _asyncgen_finalizer_hook(self, agen):
+        self._asyncgens.discard(agen)
+        if not self.is_closed():
+            self.call_soon_threadsafe(self.create_task, agen.aclose())
+
+    def _asyncgen_firstiter_hook(self, agen):
+        if self._asyncgens_shutdown_called:
+            warnings.warn(
+                f"asynchronous generator {agen!r} was scheduled after "
+                f"loop.shutdown_asyncgens() call",
+                ResourceWarning, source=self)
+
+        self._asyncgens.add(agen)
+
+    async def shutdown_asyncgens(self):
+        """Shutdown all active asynchronous generators."""
+        self._asyncgens_shutdown_called = True
+
+        if not len(self._asyncgens):
+            # If Python version is <3.6 or we don't have any asynchronous
+            # generators alive.
+            return
+
+        closing_agens = list(self._asyncgens)
+        self._asyncgens.clear()
+
+        results = await tasks.gather(
+            *[ag.aclose() for ag in closing_agens],
+            return_exceptions=True,
+            loop=self)
+
+        for result, agen in zip(results, closing_agens):
+            if isinstance(result, Exception):
+                self.call_exception_handler({
+                    'message': f'an error occurred during closing of '
+                               f'asynchronous generator {agen!r}',
+                    'exception': result,
+                    'asyncgen': agen
+                })
+
+    async def shutdown_default_executor(self):
+        """Schedule the shutdown of the default executor."""
+        self._executor_shutdown_called = True
+        if self._default_executor is None:
+            return
+        future = self.create_future()
+        thread = threading.Thread(target=self._do_shutdown, args=(future,))
+        thread.start()
+        try:
+            await future
+        finally:
+            thread.join()
+
+    def _do_shutdown(self, future):
+        try:
+            self._default_executor.shutdown(wait=True)
+            self.call_soon_threadsafe(future.set_result, None)
+        except Exception as ex:
+            self.call_soon_threadsafe(future.set_exception, ex)
+
+    def _check_running(self):
+        if self.is_running():
+            raise RuntimeError('This event loop is already running')
+        if events._get_running_loop() is not None:
+            raise RuntimeError(
+                'Cannot run the event loop while another loop is running')
+
+    def run_forever(self):
+        """Run until stop() is called."""
+        self._check_closed()
+        self._check_running()
+        self._set_coroutine_origin_tracking(self._debug)
+        self._thread_id = threading.get_ident()
+
+        old_agen_hooks = sys.get_asyncgen_hooks()
+        sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
+                               finalizer=self._asyncgen_finalizer_hook)
+        try:
+            events._set_running_loop(self)
+            while True:
+                self._run_once()
+                if self._stopping:
+                    break
+        finally:
+            self._stopping = False
+            self._thread_id = None
+            events._set_running_loop(None)
+            self._set_coroutine_origin_tracking(False)
+            sys.set_asyncgen_hooks(*old_agen_hooks)
+
+    def run_until_complete(self, future):
+        """Run until the Future is done.
+
+        If the argument is a coroutine, it is wrapped in a Task.
+
+        WARNING: It would be disastrous to call run_until_complete()
+        with the same coroutine twice -- it would wrap it in two
+        different Tasks and that can't be good.
+
+        Return the Future's result, or raise its exception.
+        """
+        self._check_closed()
+        self._check_running()
+
+        new_task = not futures.isfuture(future)
+        future = tasks.ensure_future(future, loop=self)
+        if new_task:
+            # An exception is raised if the future didn't complete, so there
+            # is no need to log the "destroy pending task" message
+            future._log_destroy_pending = False
+
+        future.add_done_callback(_run_until_complete_cb)
+        try:
+            self.run_forever()
+        except:
+            if new_task and future.done() and not future.cancelled():
+                # The coroutine raised a BaseException. Consume the exception
+                # to not log a warning, the caller doesn't have access to the
+                # local task.
+                future.exception()
+            raise
+        finally:
+            future.remove_done_callback(_run_until_complete_cb)
+        if not future.done():
+            raise RuntimeError('Event loop stopped before Future completed.')
+
+        return future.result()
+
+    def stop(self):
+        """Stop running the event loop.
+
+        Every callback already scheduled will still run.  This simply informs
+        run_forever to stop looping after a complete iteration.
+        """
+        self._stopping = True
+
+    def close(self):
+        """Close the event loop.
+
+        This clears the queues and shuts down the executor,
+        but does not wait for the executor to finish.
+
+        The event loop must not be running.
+        """
+        if self.is_running():
+            raise RuntimeError("Cannot close a running event loop")
+        if self._closed:
+            return
+        if self._debug:
+            logger.debug("Close %r", self)
+        self._closed = True
+        self._ready.clear()
+        self._scheduled.clear()
+        self._executor_shutdown_called = True
+        executor = self._default_executor
+        if executor is not None:
+            self._default_executor = None
+            executor.shutdown(wait=False)
+
+    def is_closed(self):
+        """Returns True if the event loop was closed."""
+        return self._closed
+
+    def __del__(self, _warn=warnings.warn):
+        if not self.is_closed():
+            _warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
+            if not self.is_running():
+                self.close()
+
+    def is_running(self):
+        """Returns True if the event loop is running."""
+        return (self._thread_id is not None)
+
+    def time(self):
+        """Return the time according to the event loop's clock.
+
+        This is a float expressed in seconds since an epoch, but the
+        epoch, precision, accuracy and drift are unspecified and may
+        differ per event loop.
+        """
+        return time.monotonic()
+
+    def call_later(self, delay, callback, *args, context=None):
+        """Arrange for a callback to be called at a given time.
+
+        Return a Handle: an opaque object with a cancel() method that
+        can be used to cancel the call.
+
+        The delay can be an int or float, expressed in seconds.  It is
+        always relative to the current time.
+
+        Each callback will be called exactly once.  If two callbacks
+        are scheduled for exactly the same time, it undefined which
+        will be called first.
+
+        Any positional arguments after the callback will be passed to
+        the callback when it is called.
+        """
+        timer = self.call_at(self.time() + delay, callback, *args,
+                             context=context)
+        if timer._source_traceback:
+            del timer._source_traceback[-1]
+        return timer
+
+    def call_at(self, when, callback, *args, context=None):
+        """Like call_later(), but uses an absolute time.
+
+        Absolute time corresponds to the event loop's time() method.
+        """
+        self._check_closed()
+        if self._debug:
+            self._check_thread()
+            self._check_callback(callback, 'call_at')
+        timer = events.TimerHandle(when, callback, args, self, context)
+        if timer._source_traceback:
+            del timer._source_traceback[-1]
+        heapq.heappush(self._scheduled, timer)
+        timer._scheduled = True
+        return timer
+
+    def call_soon(self, callback, *args, context=None):
+        """Arrange for a callback to be called as soon as possible.
+
+        This operates as a FIFO queue: callbacks are called in the
+        order in which they are registered.  Each callback will be
+        called exactly once.
+
+        Any positional arguments after the callback will be passed to
+        the callback when it is called.
+        """
+        self._check_closed()
+        if self._debug:
+            self._check_thread()
+            self._check_callback(callback, 'call_soon')
+        handle = self._call_soon(callback, args, context)
+        if handle._source_traceback:
+            del handle._source_traceback[-1]
+        return handle
+
+    def _check_callback(self, callback, method):
+        if (coroutines.iscoroutine(callback) or
+                coroutines.iscoroutinefunction(callback)):
+            raise TypeError(
+                f"coroutines cannot be used with {method}()")
+        if not callable(callback):
+            raise TypeError(
+                f'a callable object was expected by {method}(), '
+                f'got {callback!r}')
+
+    def _call_soon(self, callback, args, context):
+        handle = events.Handle(callback, args, self, context)
+        if handle._source_traceback:
+            del handle._source_traceback[-1]
+        self._ready.append(handle)
+        return handle
+
+    def _check_thread(self):
+        """Check that the current thread is the thread running the event loop.
+
+        Non-thread-safe methods of this class make this assumption and will
+        likely behave incorrectly when the assumption is violated.
+
+        Should only be called when (self._debug == True).  The caller is
+        responsible for checking this condition for performance reasons.
+        """
+        if self._thread_id is None:
+            return
+        thread_id = threading.get_ident()
+        if thread_id != self._thread_id:
+            raise RuntimeError(
+                "Non-thread-safe operation invoked on an event loop other "
+                "than the current one")
+
+    def call_soon_threadsafe(self, callback, *args, context=None):
+        """Like call_soon(), but thread-safe."""
+        self._check_closed()
+        if self._debug:
+            self._check_callback(callback, 'call_soon_threadsafe')
+        handle = self._call_soon(callback, args, context)
+        if handle._source_traceback:
+            del handle._source_traceback[-1]
+        self._write_to_self()
+        return handle
+
+    def run_in_executor(self, executor, func, *args):
+        self._check_closed()
+        if self._debug:
+            self._check_callback(func, 'run_in_executor')
+        if executor is None:
+            executor = self._default_executor
+            # Only check when the default executor is being used
+            self._check_default_executor()
+            if executor is None:
+                executor = concurrent.futures.ThreadPoolExecutor(
+                    thread_name_prefix='asyncio'
+                )
+                self._default_executor = executor
+        return futures.wrap_future(
+            executor.submit(func, *args), loop=self)
+
+    def set_default_executor(self, executor):
+        if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
+            warnings.warn(
+                'Using the default executor that is not an instance of '
+                'ThreadPoolExecutor is deprecated and will be prohibited '
+                'in Python 3.9',
+                DeprecationWarning, 2)
+        self._default_executor = executor
+
+    def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
+        msg = [f"{host}:{port!r}"]
+        if family:
+            msg.append(f'family={family!r}')
+        if type:
+            msg.append(f'type={type!r}')
+        if proto:
+            msg.append(f'proto={proto!r}')
+        if flags:
+            msg.append(f'flags={flags!r}')
+        msg = ', '.join(msg)
+        logger.debug('Get address info %s', msg)
+
+        t0 = self.time()
+        addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
+        dt = self.time() - t0
+
+        msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
+        if dt >= self.slow_callback_duration:
+            logger.info(msg)
+        else:
+            logger.debug(msg)
+        return addrinfo
+
+    async def getaddrinfo(self, host, port, *,
+                          family=0, type=0, proto=0, flags=0):
+        if self._debug:
+            getaddr_func = self._getaddrinfo_debug
+        else:
+            getaddr_func = socket.getaddrinfo
+
+        return await self.run_in_executor(
+            None, getaddr_func, host, port, family, type, proto, flags)
+
+    async def getnameinfo(self, sockaddr, flags=0):
+        return await self.run_in_executor(
+            None, socket.getnameinfo, sockaddr, flags)
+
+    async def sock_sendfile(self, sock, file, offset=0, count=None,
+                            *, fallback=True):
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+        self._check_sendfile_params(sock, file, offset, count)
+        try:
+            return await self._sock_sendfile_native(sock, file,
+                                                    offset, count)
+        except exceptions.SendfileNotAvailableError as exc:
+            if not fallback:
+                raise
+        return await self._sock_sendfile_fallback(sock, file,
+                                                  offset, count)
+
+    async def _sock_sendfile_native(self, sock, file, offset, count):
+        # NB: sendfile syscall is not supported for SSL sockets and
+        # non-mmap files even if sendfile is supported by OS
+        raise exceptions.SendfileNotAvailableError(
+            f"syscall sendfile is not available for socket {sock!r} "
+            "and file {file!r} combination")
+
+    async def _sock_sendfile_fallback(self, sock, file, offset, count):
+        if offset:
+            file.seek(offset)
+        blocksize = (
+            min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
+            if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
+        )
+        buf = bytearray(blocksize)
+        total_sent = 0
+        try:
+            while True:
+                if count:
+                    blocksize = min(count - total_sent, blocksize)
+                    if blocksize <= 0:
+                        break
+                view = memoryview(buf)[:blocksize]
+                read = await self.run_in_executor(None, file.readinto, view)
+                if not read:
+                    break  # EOF
+                await self.sock_sendall(sock, view[:read])
+                total_sent += read
+            return total_sent
+        finally:
+            if total_sent > 0 and hasattr(file, 'seek'):
+                file.seek(offset + total_sent)
+
+    def _check_sendfile_params(self, sock, file, offset, count):
+        if 'b' not in getattr(file, 'mode', 'b'):
+            raise ValueError("file should be opened in binary mode")
+        if not sock.type == socket.SOCK_STREAM:
+            raise ValueError("only SOCK_STREAM type sockets are supported")
+        if count is not None:
+            if not isinstance(count, int):
+                raise TypeError(
+                    "count must be a positive integer (got {!r})".format(count))
+            if count <= 0:
+                raise ValueError(
+                    "count must be a positive integer (got {!r})".format(count))
+        if not isinstance(offset, int):
+            raise TypeError(
+                "offset must be a non-negative integer (got {!r})".format(
+                    offset))
+        if offset < 0:
+            raise ValueError(
+                "offset must be a non-negative integer (got {!r})".format(
+                    offset))
+
+    async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
+        """Create, bind and connect one socket."""
+        my_exceptions = []
+        exceptions.append(my_exceptions)
+        family, type_, proto, _, address = addr_info
+        sock = None
+        try:
+            sock = socket.socket(family=family, type=type_, proto=proto)
+            sock.setblocking(False)
+            if local_addr_infos is not None:
+                for _, _, _, _, laddr in local_addr_infos:
+                    try:
+                        sock.bind(laddr)
+                        break
+                    except OSError as exc:
+                        msg = (
+                            f'error while attempting to bind on '
+                            f'address {laddr!r}: '
+                            f'{exc.strerror.lower()}'
+                        )
+                        exc = OSError(exc.errno, msg)
+                        my_exceptions.append(exc)
+                else:  # all bind attempts failed
+                    raise my_exceptions.pop()
+            await self.sock_connect(sock, address)
+            return sock
+        except OSError as exc:
+            my_exceptions.append(exc)
+            if sock is not None:
+                sock.close()
+            raise
+        except:
+            if sock is not None:
+                sock.close()
+            raise
+
+    async def create_connection(
+            self, protocol_factory, host=None, port=None,
+            *, ssl=None, family=0,
+            proto=0, flags=0, sock=None,
+            local_addr=None, server_hostname=None,
+            ssl_handshake_timeout=None,
+            happy_eyeballs_delay=None, interleave=None):
+        """Connect to a TCP server.
+
+        Create a streaming transport connection to a given Internet host and
+        port: socket family AF_INET or socket.AF_INET6 depending on host (or
+        family if specified), socket type SOCK_STREAM. protocol_factory must be
+        a callable returning a protocol instance.
+
+        This method is a coroutine which will try to establish the connection
+        in the background.  When successful, the coroutine returns a
+        (transport, protocol) pair.
+        """
+        if server_hostname is not None and not ssl:
+            raise ValueError('server_hostname is only meaningful with ssl')
+
+        if server_hostname is None and ssl:
+            # Use host as default for server_hostname.  It is an error
+            # if host is empty or not set, e.g. when an
+            # already-connected socket was passed or when only a port
+            # is given.  To avoid this error, you can pass
+            # server_hostname='' -- this will bypass the hostname
+            # check.  (This also means that if host is a numeric
+            # IP/IPv6 address, we will attempt to verify that exact
+            # address; this will probably fail, but it is possible to
+            # create a certificate for a specific IP address, so we
+            # don't judge it here.)
+            if not host:
+                raise ValueError('You must set server_hostname '
+                                 'when using ssl without a host')
+            server_hostname = host
+
+        if ssl_handshake_timeout is not None and not ssl:
+            raise ValueError(
+                'ssl_handshake_timeout is only meaningful with ssl')
+
+        if happy_eyeballs_delay is not None and interleave is None:
+            # If using happy eyeballs, default to interleave addresses by family
+            interleave = 1
+
+        if host is not None or port is not None:
+            if sock is not None:
+                raise ValueError(
+                    'host/port and sock can not be specified at the same time')
+
+            infos = await self._ensure_resolved(
+                (host, port), family=family,
+                type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
+            if not infos:
+                raise OSError('getaddrinfo() returned empty list')
+
+            if local_addr is not None:
+                laddr_infos = await self._ensure_resolved(
+                    local_addr, family=family,
+                    type=socket.SOCK_STREAM, proto=proto,
+                    flags=flags, loop=self)
+                if not laddr_infos:
+                    raise OSError('getaddrinfo() returned empty list')
+            else:
+                laddr_infos = None
+
+            if interleave:
+                infos = _interleave_addrinfos(infos, interleave)
+
+            exceptions = []
+            if happy_eyeballs_delay is None:
+                # not using happy eyeballs
+                for addrinfo in infos:
+                    try:
+                        sock = await self._connect_sock(
+                            exceptions, addrinfo, laddr_infos)
+                        break
+                    except OSError:
+                        continue
+            else:  # using happy eyeballs
+                sock, _, _ = await staggered.staggered_race(
+                    (functools.partial(self._connect_sock,
+                                       exceptions, addrinfo, laddr_infos)
+                     for addrinfo in infos),
+                    happy_eyeballs_delay, loop=self)
+
+            if sock is None:
+                exceptions = [exc for sub in exceptions for exc in sub]
+                if len(exceptions) == 1:
+                    raise exceptions[0]
+                else:
+                    # If they all have the same str(), raise one.
+                    model = str(exceptions[0])
+                    if all(str(exc) == model for exc in exceptions):
+                        raise exceptions[0]
+                    # Raise a combined exception so the user can see all
+                    # the various error messages.
+                    raise OSError('Multiple exceptions: {}'.format(
+                        ', '.join(str(exc) for exc in exceptions)))
+
+        else:
+            if sock is None:
+                raise ValueError(
+                    'host and port was not specified and no sock specified')
+            if sock.type != socket.SOCK_STREAM:
+                # We allow AF_INET, AF_INET6, AF_UNIX as long as they
+                # are SOCK_STREAM.
+                # We support passing AF_UNIX sockets even though we have
+                # a dedicated API for that: create_unix_connection.
+                # Disallowing AF_UNIX in this method, breaks backwards
+                # compatibility.
+                raise ValueError(
+                    f'A Stream Socket was expected, got {sock!r}')
+
+        transport, protocol = await self._create_connection_transport(
+            sock, protocol_factory, ssl, server_hostname,
+            ssl_handshake_timeout=ssl_handshake_timeout)
+        if self._debug:
+            # Get the socket from the transport because SSL transport closes
+            # the old socket and creates a new SSL socket
+            sock = transport.get_extra_info('socket')
+            logger.debug("%r connected to %s:%r: (%r, %r)",
+                         sock, host, port, transport, protocol)
+        return transport, protocol
+
+    async def _create_connection_transport(
+            self, sock, protocol_factory, ssl,
+            server_hostname, server_side=False,
+            ssl_handshake_timeout=None):
+
+        sock.setblocking(False)
+
+        protocol = protocol_factory()
+        waiter = self.create_future()
+        if ssl:
+            sslcontext = None if isinstance(ssl, bool) else ssl
+            transport = self._make_ssl_transport(
+                sock, protocol, sslcontext, waiter,
+                server_side=server_side, server_hostname=server_hostname,
+                ssl_handshake_timeout=ssl_handshake_timeout)
+        else:
+            transport = self._make_socket_transport(sock, protocol, waiter)
+
+        try:
+            await waiter
+        except:
+            transport.close()
+            raise
+
+        return transport, protocol
+
+    async def sendfile(self, transport, file, offset=0, count=None,
+                       *, fallback=True):
+        """Send a file to transport.
+
+        Return the total number of bytes which were sent.
+
+        The method uses high-performance os.sendfile if available.
+
+        file must be a regular file object opened in binary mode.
+
+        offset tells from where to start reading the file. If specified,
+        count is the total number of bytes to transmit as opposed to
+        sending the file until EOF is reached. File position is updated on
+        return or also in case of error in which case file.tell()
+        can be used to figure out the number of bytes
+        which were sent.
+
+        fallback set to True makes asyncio to manually read and send
+        the file when the platform does not support the sendfile syscall
+        (e.g. Windows or SSL socket on Unix).
+
+        Raise SendfileNotAvailableError if the system does not support
+        sendfile syscall and fallback is False.
+        """
+        if transport.is_closing():
+            raise RuntimeError("Transport is closing")
+        mode = getattr(transport, '_sendfile_compatible',
+                       constants._SendfileMode.UNSUPPORTED)
+        if mode is constants._SendfileMode.UNSUPPORTED:
+            raise RuntimeError(
+                f"sendfile is not supported for transport {transport!r}")
+        if mode is constants._SendfileMode.TRY_NATIVE:
+            try:
+                return await self._sendfile_native(transport, file,
+                                                   offset, count)
+            except exceptions.SendfileNotAvailableError as exc:
+                if not fallback:
+                    raise
+
+        if not fallback:
+            raise RuntimeError(
+                f"fallback is disabled and native sendfile is not "
+                f"supported for transport {transport!r}")
+
+        return await self._sendfile_fallback(transport, file,
+                                             offset, count)
+
+    async def _sendfile_native(self, transp, file, offset, count):
+        raise exceptions.SendfileNotAvailableError(
+            "sendfile syscall is not supported")
+
+    async def _sendfile_fallback(self, transp, file, offset, count):
+        if offset:
+            file.seek(offset)
+        blocksize = min(count, 16384) if count else 16384
+        buf = bytearray(blocksize)
+        total_sent = 0
+        proto = _SendfileFallbackProtocol(transp)
+        try:
+            while True:
+                if count:
+                    blocksize = min(count - total_sent, blocksize)
+                    if blocksize <= 0:
+                        return total_sent
+                view = memoryview(buf)[:blocksize]
+                read = await self.run_in_executor(None, file.readinto, view)
+                if not read:
+                    return total_sent  # EOF
+                await proto.drain()
+                transp.write(view[:read])
+                total_sent += read
+        finally:
+            if total_sent > 0 and hasattr(file, 'seek'):
+                file.seek(offset + total_sent)
+            await proto.restore()
+
+    async def start_tls(self, transport, protocol, sslcontext, *,
+                        server_side=False,
+                        server_hostname=None,
+                        ssl_handshake_timeout=None):
+        """Upgrade transport to TLS.
+
+        Return a new transport that *protocol* should start using
+        immediately.
+        """
+        if ssl is None:
+            raise RuntimeError('Python ssl module is not available')
+
+        if not isinstance(sslcontext, ssl.SSLContext):
+            raise TypeError(
+                f'sslcontext is expected to be an instance of ssl.SSLContext, '
+                f'got {sslcontext!r}')
+
+        if not getattr(transport, '_start_tls_compatible', False):
+            raise TypeError(
+                f'transport {transport!r} is not supported by start_tls()')
+
+        waiter = self.create_future()
+        ssl_protocol = sslproto.SSLProtocol(
+            self, protocol, sslcontext, waiter,
+            server_side, server_hostname,
+            ssl_handshake_timeout=ssl_handshake_timeout,
+            call_connection_made=False)
+
+        # Pause early so that "ssl_protocol.data_received()" doesn't
+        # have a chance to get called before "ssl_protocol.connection_made()".
+        transport.pause_reading()
+
+        transport.set_protocol(ssl_protocol)
+        conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
+        resume_cb = self.call_soon(transport.resume_reading)
+
+        try:
+            await waiter
+        except BaseException:
+            transport.close()
+            conmade_cb.cancel()
+            resume_cb.cancel()
+            raise
+
+        return ssl_protocol._app_transport
+
+    async def create_datagram_endpoint(self, protocol_factory,
+                                       local_addr=None, remote_addr=None, *,
+                                       family=0, proto=0, flags=0,
+                                       reuse_address=_unset, reuse_port=None,
+                                       allow_broadcast=None, sock=None):
+        """Create datagram connection."""
+        if sock is not None:
+            if sock.type != socket.SOCK_DGRAM:
+                raise ValueError(
+                    f'A UDP Socket was expected, got {sock!r}')
+            if (local_addr or remote_addr or
+                    family or proto or flags or
+                    reuse_port or allow_broadcast):
+                # show the problematic kwargs in exception msg
+                opts = dict(local_addr=local_addr, remote_addr=remote_addr,
+                            family=family, proto=proto, flags=flags,
+                            reuse_address=reuse_address, reuse_port=reuse_port,
+                            allow_broadcast=allow_broadcast)
+                problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
+                raise ValueError(
+                    f'socket modifier keyword arguments can not be used '
+                    f'when sock is specified. ({problems})')
+            sock.setblocking(False)
+            r_addr = None
+        else:
+            if not (local_addr or remote_addr):
+                if family == 0:
+                    raise ValueError('unexpected address family')
+                addr_pairs_info = (((family, proto), (None, None)),)
+            elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
+                for addr in (local_addr, remote_addr):
+                    if addr is not None and not isinstance(addr, str):
+                        raise TypeError('string is expected')
+
+                if local_addr and local_addr[0] not in (0, '\x00'):
+                    try:
+                        if stat.S_ISSOCK(os.stat(local_addr).st_mode):
+                            os.remove(local_addr)
+                    except FileNotFoundError:
+                        pass
+                    except OSError as err:
+                        # Directory may have permissions only to create socket.
+                        logger.error('Unable to check or remove stale UNIX '
+                                     'socket %r: %r',
+                                     local_addr, err)
+
+                addr_pairs_info = (((family, proto),
+                                    (local_addr, remote_addr)), )
+            else:
+                # join address by (family, protocol)
+                addr_infos = {}  # Using order preserving dict
+                for idx, addr in ((0, local_addr), (1, remote_addr)):
+                    if addr is not None:
+                        assert isinstance(addr, tuple) and len(addr) == 2, (
+                            '2-tuple is expected')
+
+                        infos = await self._ensure_resolved(
+                            addr, family=family, type=socket.SOCK_DGRAM,
+                            proto=proto, flags=flags, loop=self)
+                        if not infos:
+                            raise OSError('getaddrinfo() returned empty list')
+
+                        for fam, _, pro, _, address in infos:
+                            key = (fam, pro)
+                            if key not in addr_infos:
+                                addr_infos[key] = [None, None]
+                            addr_infos[key][idx] = address
+
+                # each addr has to have info for each (family, proto) pair
+                addr_pairs_info = [
+                    (key, addr_pair) for key, addr_pair in addr_infos.items()
+                    if not ((local_addr and addr_pair[0] is None) or
+                            (remote_addr and addr_pair[1] is None))]
+
+                if not addr_pairs_info:
+                    raise ValueError('can not get address information')
+
+            exceptions = []
+
+            # bpo-37228
+            if reuse_address is not _unset:
+                if reuse_address:
+                    raise ValueError("Passing `reuse_address=True` is no "
+                                     "longer supported, as the usage of "
+                                     "SO_REUSEPORT in UDP poses a significant "
+                                     "security concern.")
+                else:
+                    warnings.warn("The *reuse_address* parameter has been "
+                                  "deprecated as of 3.5.10 and is scheduled "
+                                  "for removal in 3.11.", DeprecationWarning,
+                                  stacklevel=2)
+
+            for ((family, proto),
+                 (local_address, remote_address)) in addr_pairs_info:
+                sock = None
+                r_addr = None
+                try:
+                    sock = socket.socket(
+                        family=family, type=socket.SOCK_DGRAM, proto=proto)
+                    if reuse_port:
+                        _set_reuseport(sock)
+                    if allow_broadcast:
+                        sock.setsockopt(
+                            socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+                    sock.setblocking(False)
+
+                    if local_addr:
+                        sock.bind(local_address)
+                    if remote_addr:
+                        if not allow_broadcast:
+                            await self.sock_connect(sock, remote_address)
+                        r_addr = remote_address
+                except OSError as exc:
+                    if sock is not None:
+                        sock.close()
+                    exceptions.append(exc)
+                except:
+                    if sock is not None:
+                        sock.close()
+                    raise
+                else:
+                    break
+            else:
+                raise exceptions[0]
+
+        protocol = protocol_factory()
+        waiter = self.create_future()
+        transport = self._make_datagram_transport(
+            sock, protocol, r_addr, waiter)
+        if self._debug:
+            if local_addr:
+                logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
+                            "created: (%r, %r)",
+                            local_addr, remote_addr, transport, protocol)
+            else:
+                logger.debug("Datagram endpoint remote_addr=%r created: "
+                             "(%r, %r)",
+                             remote_addr, transport, protocol)
+
+        try:
+            await waiter
+        except:
+            transport.close()
+            raise
+
+        return transport, protocol
+
+    async def _ensure_resolved(self, address, *,
+                               family=0, type=socket.SOCK_STREAM,
+                               proto=0, flags=0, loop):
+        host, port = address[:2]
+        info = _ipaddr_info(host, port, family, type, proto, *address[2:])
+        if info is not None:
+            # "host" is already a resolved IP.
+            return [info]
+        else:
+            return await loop.getaddrinfo(host, port, family=family, type=type,
+                                          proto=proto, flags=flags)
+
+    async def _create_server_getaddrinfo(self, host, port, family, flags):
+        infos = await self._ensure_resolved((host, port), family=family,
+                                            type=socket.SOCK_STREAM,
+                                            flags=flags, loop=self)
+        if not infos:
+            raise OSError(f'getaddrinfo({host!r}) returned empty list')
+        return infos
+
+    async def create_server(
+            self, protocol_factory, host=None, port=None,
+            *,
+            family=socket.AF_UNSPEC,
+            flags=socket.AI_PASSIVE,
+            sock=None,
+            backlog=100,
+            ssl=None,
+            reuse_address=None,
+            reuse_port=None,
+            ssl_handshake_timeout=None,
+            start_serving=True):
+        """Create a TCP server.
+
+        The host parameter can be a string, in that case the TCP server is
+        bound to host and port.
+
+        The host parameter can also be a sequence of strings and in that case
+        the TCP server is bound to all hosts of the sequence. If a host
+        appears multiple times (possibly indirectly e.g. when hostnames
+        resolve to the same IP address), the server is only bound once to that
+        host.
+
+        Return a Server object which can be used to stop the service.
+
+        This method is a coroutine.
+        """
+        if isinstance(ssl, bool):
+            raise TypeError('ssl argument must be an SSLContext or None')
+
+        if ssl_handshake_timeout is not None and ssl is None:
+            raise ValueError(
+                'ssl_handshake_timeout is only meaningful with ssl')
+
+        if host is not None or port is not None:
+            if sock is not None:
+                raise ValueError(
+                    'host/port and sock can not be specified at the same time')
+
+            if reuse_address is None:
+                reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
+            sockets = []
+            if host == '':
+                hosts = [None]
+            elif (isinstance(host, str) or
+                  not isinstance(host, collections.abc.Iterable)):
+                hosts = [host]
+            else:
+                hosts = host
+
+            fs = [self._create_server_getaddrinfo(host, port, family=family,
+                                                  flags=flags)
+                  for host in hosts]
+            infos = await tasks.gather(*fs, loop=self)
+            infos = set(itertools.chain.from_iterable(infos))
+
+            completed = False
+            try:
+                for res in infos:
+                    af, socktype, proto, canonname, sa = res
+                    try:
+                        sock = socket.socket(af, socktype, proto)
+                    except socket.error:
+                        # Assume it's a bad family/type/protocol combination.
+                        if self._debug:
+                            logger.warning('create_server() failed to create '
+                                           'socket.socket(%r, %r, %r)',
+                                           af, socktype, proto, exc_info=True)
+                        continue
+                    sockets.append(sock)
+                    if reuse_address:
+                        sock.setsockopt(
+                            socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
+                    if reuse_port:
+                        _set_reuseport(sock)
+                    # Disable IPv4/IPv6 dual stack support (enabled by
+                    # default on Linux) which makes a single socket
+                    # listen on both address families.
+                    if (_HAS_IPv6 and
+                            af == socket.AF_INET6 and
+                            hasattr(socket, 'IPPROTO_IPV6')):
+                        sock.setsockopt(socket.IPPROTO_IPV6,
+                                        socket.IPV6_V6ONLY,
+                                        True)
+                    try:
+                        sock.bind(sa)
+                    except OSError as err:
+                        raise OSError(err.errno, 'error while attempting '
+                                      'to bind on address %r: %s'
+                                      % (sa, err.strerror.lower())) from None
+                completed = True
+            finally:
+                if not completed:
+                    for sock in sockets:
+                        sock.close()
+        else:
+            if sock is None:
+                raise ValueError('Neither host/port nor sock were specified')
+            if sock.type != socket.SOCK_STREAM:
+                raise ValueError(f'A Stream Socket was expected, got {sock!r}')
+            sockets = [sock]
+
+        for sock in sockets:
+            sock.setblocking(False)
+
+        server = Server(self, sockets, protocol_factory,
+                        ssl, backlog, ssl_handshake_timeout)
+        if start_serving:
+            server._start_serving()
+            # Skip one loop iteration so that all 'loop.add_reader'
+            # go through.
+            await tasks.sleep(0, loop=self)
+
+        if self._debug:
+            logger.info("%r is serving", server)
+        return server
+
+    async def connect_accepted_socket(
+            self, protocol_factory, sock,
+            *, ssl=None,
+            ssl_handshake_timeout=None):
+        """Handle an accepted connection.
+
+        This is used by servers that accept connections outside of
+        asyncio but that use asyncio to handle connections.
+
+        This method is a coroutine.  When completed, the coroutine
+        returns a (transport, protocol) pair.
+        """
+        if sock.type != socket.SOCK_STREAM:
+            raise ValueError(f'A Stream Socket was expected, got {sock!r}')
+
+        if ssl_handshake_timeout is not None and not ssl:
+            raise ValueError(
+                'ssl_handshake_timeout is only meaningful with ssl')
+
+        transport, protocol = await self._create_connection_transport(
+            sock, protocol_factory, ssl, '', server_side=True,
+            ssl_handshake_timeout=ssl_handshake_timeout)
+        if self._debug:
+            # Get the socket from the transport because SSL transport closes
+            # the old socket and creates a new SSL socket
+            sock = transport.get_extra_info('socket')
+            logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
+        return transport, protocol
+
+    async def connect_read_pipe(self, protocol_factory, pipe):
+        protocol = protocol_factory()
+        waiter = self.create_future()
+        transport = self._make_read_pipe_transport(pipe, protocol, waiter)
+
+        try:
+            await waiter
+        except:
+            transport.close()
+            raise
+
+        if self._debug:
+            logger.debug('Read pipe %r connected: (%r, %r)',
+                         pipe.fileno(), transport, protocol)
+        return transport, protocol
+
+    async def connect_write_pipe(self, protocol_factory, pipe):
+        protocol = protocol_factory()
+        waiter = self.create_future()
+        transport = self._make_write_pipe_transport(pipe, protocol, waiter)
+
+        try:
+            await waiter
+        except:
+            transport.close()
+            raise
+
+        if self._debug:
+            logger.debug('Write pipe %r connected: (%r, %r)',
+                         pipe.fileno(), transport, protocol)
+        return transport, protocol
+
+    def _log_subprocess(self, msg, stdin, stdout, stderr):
+        info = [msg]
+        if stdin is not None:
+            info.append(f'stdin={_format_pipe(stdin)}')
+        if stdout is not None and stderr == subprocess.STDOUT:
+            info.append(f'stdout=stderr={_format_pipe(stdout)}')
+        else:
+            if stdout is not None:
+                info.append(f'stdout={_format_pipe(stdout)}')
+            if stderr is not None:
+                info.append(f'stderr={_format_pipe(stderr)}')
+        logger.debug(' '.join(info))
+
+    async def subprocess_shell(self, protocol_factory, cmd, *,
+                               stdin=subprocess.PIPE,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE,
+                               universal_newlines=False,
+                               shell=True, bufsize=0,
+                               encoding=None, errors=None, text=None,
+                               **kwargs):
+        if not isinstance(cmd, (bytes, str)):
+            raise ValueError("cmd must be a string")
+        if universal_newlines:
+            raise ValueError("universal_newlines must be False")
+        if not shell:
+            raise ValueError("shell must be True")
+        if bufsize != 0:
+            raise ValueError("bufsize must be 0")
+        if text:
+            raise ValueError("text must be False")
+        if encoding is not None:
+            raise ValueError("encoding must be None")
+        if errors is not None:
+            raise ValueError("errors must be None")
+
+        protocol = protocol_factory()
+        debug_log = None
+        if self._debug:
+            # don't log parameters: they may contain sensitive information
+            # (password) and may be too long
+            debug_log = 'run shell command %r' % cmd
+            self._log_subprocess(debug_log, stdin, stdout, stderr)
+        transport = await self._make_subprocess_transport(
+            protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
+        if self._debug and debug_log is not None:
+            logger.info('%s: %r', debug_log, transport)
+        return transport, protocol
+
+    async def subprocess_exec(self, protocol_factory, program, *args,
+                              stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+                              stderr=subprocess.PIPE, universal_newlines=False,
+                              shell=False, bufsize=0,
+                              encoding=None, errors=None, text=None,
+                              **kwargs):
+        if universal_newlines:
+            raise ValueError("universal_newlines must be False")
+        if shell:
+            raise ValueError("shell must be False")
+        if bufsize != 0:
+            raise ValueError("bufsize must be 0")
+        if text:
+            raise ValueError("text must be False")
+        if encoding is not None:
+            raise ValueError("encoding must be None")
+        if errors is not None:
+            raise ValueError("errors must be None")
+
+        popen_args = (program,) + args
+        protocol = protocol_factory()
+        debug_log = None
+        if self._debug:
+            # don't log parameters: they may contain sensitive information
+            # (password) and may be too long
+            debug_log = f'execute program {program!r}'
+            self._log_subprocess(debug_log, stdin, stdout, stderr)
+        transport = await self._make_subprocess_transport(
+            protocol, popen_args, False, stdin, stdout, stderr,
+            bufsize, **kwargs)
+        if self._debug and debug_log is not None:
+            logger.info('%s: %r', debug_log, transport)
+        return transport, protocol
+
+    def get_exception_handler(self):
+        """Return an exception handler, or None if the default one is in use.
+        """
+        return self._exception_handler
+
+    def set_exception_handler(self, handler):
+        """Set handler as the new event loop exception handler.
+
+        If handler is None, the default exception handler will
+        be set.
+
+        If handler is a callable object, it should have a
+        signature matching '(loop, context)', where 'loop'
+        will be a reference to the active event loop, 'context'
+        will be a dict object (see `call_exception_handler()`
+        documentation for details about context).
+        """
+        if handler is not None and not callable(handler):
+            raise TypeError(f'A callable object or None is expected, '
+                            f'got {handler!r}')
+        self._exception_handler = handler
+
+    def default_exception_handler(self, context):
+        """Default exception handler.
+
+        This is called when an exception occurs and no exception
+        handler is set, and can be called by a custom exception
+        handler that wants to defer to the default behavior.
+
+        This default handler logs the error message and other
+        context-dependent information.  In debug mode, a truncated
+        stack trace is also appended showing where the given object
+        (e.g. a handle or future or task) was created, if any.
+
+        The context parameter has the same meaning as in
+        `call_exception_handler()`.
+        """
+        message = context.get('message')
+        if not message:
+            message = 'Unhandled exception in event loop'
+
+        exception = context.get('exception')
+        if exception is not None:
+            exc_info = (type(exception), exception, exception.__traceback__)
+        else:
+            exc_info = False
+
+        if ('source_traceback' not in context and
+                self._current_handle is not None and
+                self._current_handle._source_traceback):
+            context['handle_traceback'] = \
+                self._current_handle._source_traceback
+
+        log_lines = [message]
+        for key in sorted(context):
+            if key in {'message', 'exception'}:
+                continue
+            value = context[key]
+            if key == 'source_traceback':
+                tb = ''.join(traceback.format_list(value))
+                value = 'Object created at (most recent call last):\n'
+                value += tb.rstrip()
+            elif key == 'handle_traceback':
+                tb = ''.join(traceback.format_list(value))
+                value = 'Handle created at (most recent call last):\n'
+                value += tb.rstrip()
+            else:
+                value = repr(value)
+            log_lines.append(f'{key}: {value}')
+
+        logger.error('\n'.join(log_lines), exc_info=exc_info)
+
+    def call_exception_handler(self, context):
+        """Call the current event loop's exception handler.
+
+        The context argument is a dict containing the following keys:
+
+        - 'message': Error message;
+        - 'exception' (optional): Exception object;
+        - 'future' (optional): Future instance;
+        - 'task' (optional): Task instance;
+        - 'handle' (optional): Handle instance;
+        - 'protocol' (optional): Protocol instance;
+        - 'transport' (optional): Transport instance;
+        - 'socket' (optional): Socket instance;
+        - 'asyncgen' (optional): Asynchronous generator that caused
+                                 the exception.
+
+        New keys maybe introduced in the future.
+
+        Note: do not overload this method in an event loop subclass.
+        For custom exception handling, use the
+        `set_exception_handler()` method.
+        """
+        if self._exception_handler is None:
+            try:
+                self.default_exception_handler(context)
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException:
+                # Second protection layer for unexpected errors
+                # in the default implementation, as well as for subclassed
+                # event loops with overloaded "default_exception_handler".
+                logger.error('Exception in default exception handler',
+                             exc_info=True)
+        else:
+            try:
+                self._exception_handler(self, context)
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException as exc:
+                # Exception in the user set custom exception handler.
+                try:
+                    # Let's try default handler.
+                    self.default_exception_handler({
+                        'message': 'Unhandled error in exception handler',
+                        'exception': exc,
+                        'context': context,
+                    })
+                except (SystemExit, KeyboardInterrupt):
+                    raise
+                except BaseException:
+                    # Guard 'default_exception_handler' in case it is
+                    # overloaded.
+                    logger.error('Exception in default exception handler '
+                                 'while handling an unexpected error '
+                                 'in custom exception handler',
+                                 exc_info=True)
+
+    def _add_callback(self, handle):
+        """Add a Handle to _scheduled (TimerHandle) or _ready."""
+        assert isinstance(handle, events.Handle), 'A Handle is required here'
+        if handle._cancelled:
+            return
+        assert not isinstance(handle, events.TimerHandle)
+        self._ready.append(handle)
+
+    def _add_callback_signalsafe(self, handle):
+        """Like _add_callback() but called from a signal handler."""
+        self._add_callback(handle)
+        self._write_to_self()
+
+    def _timer_handle_cancelled(self, handle):
+        """Notification that a TimerHandle has been cancelled."""
+        if handle._scheduled:
+            self._timer_cancelled_count += 1
+
+    def _run_once(self):
+        """Run one full iteration of the event loop.
+
+        This calls all currently ready callbacks, polls for I/O,
+        schedules the resulting callbacks, and finally schedules
+        'call_later' callbacks.
+        """
+
+        sched_count = len(self._scheduled)
+        if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
+            self._timer_cancelled_count / sched_count >
+                _MIN_CANCELLED_TIMER_HANDLES_FRACTION):
+            # Remove delayed calls that were cancelled if their number
+            # is too high
+            new_scheduled = []
+            for handle in self._scheduled:
+                if handle._cancelled:
+                    handle._scheduled = False
+                else:
+                    new_scheduled.append(handle)
+
+            heapq.heapify(new_scheduled)
+            self._scheduled = new_scheduled
+            self._timer_cancelled_count = 0
+        else:
+            # Remove delayed calls that were cancelled from head of queue.
+            while self._scheduled and self._scheduled[0]._cancelled:
+                self._timer_cancelled_count -= 1
+                handle = heapq.heappop(self._scheduled)
+                handle._scheduled = False
+
+        timeout = None
+        if self._ready or self._stopping:
+            timeout = 0
+        elif self._scheduled:
+            # Compute the desired timeout.
+            when = self._scheduled[0]._when
+            timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
+
+        event_list = self._selector.select(timeout)
+        self._process_events(event_list)
+
+        # Handle 'later' callbacks that are ready.
+        end_time = self.time() + self._clock_resolution
+        while self._scheduled:
+            handle = self._scheduled[0]
+            if handle._when >= end_time:
+                break
+            handle = heapq.heappop(self._scheduled)
+            handle._scheduled = False
+            self._ready.append(handle)
+
+        # This is the only place where callbacks are actually *called*.
+        # All other places just add them to ready.
+        # Note: We run all currently scheduled callbacks, but not any
+        # callbacks scheduled by callbacks run this time around --
+        # they will be run the next time (after another I/O poll).
+        # Use an idiom that is thread-safe without using locks.
+        ntodo = len(self._ready)
+        for i in range(ntodo):
+            handle = self._ready.popleft()
+            if handle._cancelled:
+                continue
+            if self._debug:
+                try:
+                    self._current_handle = handle
+                    t0 = self.time()
+                    handle._run()
+                    dt = self.time() - t0
+                    if dt >= self.slow_callback_duration:
+                        logger.warning('Executing %s took %.3f seconds',
+                                       _format_handle(handle), dt)
+                finally:
+                    self._current_handle = None
+            else:
+                handle._run()
+        handle = None  # Needed to break cycles when an exception occurs.
+
+    def _set_coroutine_origin_tracking(self, enabled):
+        if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
+            return
+
+        if enabled:
+            self._coroutine_origin_tracking_saved_depth = (
+                sys.get_coroutine_origin_tracking_depth())
+            sys.set_coroutine_origin_tracking_depth(
+                constants.DEBUG_STACK_DEPTH)
+        else:
+            sys.set_coroutine_origin_tracking_depth(
+                self._coroutine_origin_tracking_saved_depth)
+
+        self._coroutine_origin_tracking_enabled = enabled
+
+    def get_debug(self):
+        return self._debug
+
+    def set_debug(self, enabled):
+        self._debug = enabled
+
+        if self.is_running():
+            self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/base_futures.py b/linux-x64/clang/python3/lib/python3.9/asyncio/base_futures.py
new file mode 100644
index 0000000..2c01ac9
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/base_futures.py
@@ -0,0 +1,80 @@
+__all__ = ()
+
+import reprlib
+from _thread import get_ident
+
+from . import format_helpers
+
+# States for Future.
+_PENDING = 'PENDING'
+_CANCELLED = 'CANCELLED'
+_FINISHED = 'FINISHED'
+
+
+def isfuture(obj):
+    """Check for a Future.
+
+    This returns True when obj is a Future instance or is advertising
+    itself as duck-type compatible by setting _asyncio_future_blocking.
+    See comment in Future for more details.
+    """
+    return (hasattr(obj.__class__, '_asyncio_future_blocking') and
+            obj._asyncio_future_blocking is not None)
+
+
+def _format_callbacks(cb):
+    """helper function for Future.__repr__"""
+    size = len(cb)
+    if not size:
+        cb = ''
+
+    def format_cb(callback):
+        return format_helpers._format_callback_source(callback, ())
+
+    if size == 1:
+        cb = format_cb(cb[0][0])
+    elif size == 2:
+        cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0]))
+    elif size > 2:
+        cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]),
+                                        size - 2,
+                                        format_cb(cb[-1][0]))
+    return f'cb=[{cb}]'
+
+
+# bpo-42183: _repr_running is needed for repr protection
+# when a Future or Task result contains itself directly or indirectly.
+# The logic is borrowed from @reprlib.recursive_repr decorator.
+# Unfortunately, the direct decorator usage is impossible because of
+# AttributeError: '_asyncio.Task' object has no attribute '__module__' error.
+#
+# After fixing this thing we can return to the decorator based approach.
+_repr_running = set()
+
+
+def _future_repr_info(future):
+    # (Future) -> str
+    """helper function for Future.__repr__"""
+    info = [future._state.lower()]
+    if future._state == _FINISHED:
+        if future._exception is not None:
+            info.append(f'exception={future._exception!r}')
+        else:
+            key = id(future), get_ident()
+            if key in _repr_running:
+                result = '...'
+            else:
+                _repr_running.add(key)
+                try:
+                    # use reprlib to limit the length of the output, especially
+                    # for very long strings
+                    result = reprlib.repr(future._result)
+                finally:
+                    _repr_running.discard(key)
+            info.append(f'result={result}')
+    if future._callbacks:
+        info.append(_format_callbacks(future._callbacks))
+    if future._source_traceback:
+        frame = future._source_traceback[-1]
+        info.append(f'created at {frame[0]}:{frame[1]}')
+    return info
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/base_subprocess.py b/linux-x64/clang/python3/lib/python3.9/asyncio/base_subprocess.py
new file mode 100644
index 0000000..14d5051
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/base_subprocess.py
@@ -0,0 +1,285 @@
+import collections
+import subprocess
+import warnings
+
+from . import protocols
+from . import transports
+from .log import logger
+
+
+class BaseSubprocessTransport(transports.SubprocessTransport):
+
+    def __init__(self, loop, protocol, args, shell,
+                 stdin, stdout, stderr, bufsize,
+                 waiter=None, extra=None, **kwargs):
+        super().__init__(extra)
+        self._closed = False
+        self._protocol = protocol
+        self._loop = loop
+        self._proc = None
+        self._pid = None
+        self._returncode = None
+        self._exit_waiters = []
+        self._pending_calls = collections.deque()
+        self._pipes = {}
+        self._finished = False
+
+        if stdin == subprocess.PIPE:
+            self._pipes[0] = None
+        if stdout == subprocess.PIPE:
+            self._pipes[1] = None
+        if stderr == subprocess.PIPE:
+            self._pipes[2] = None
+
+        # Create the child process: set the _proc attribute
+        try:
+            self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
+                        stderr=stderr, bufsize=bufsize, **kwargs)
+        except:
+            self.close()
+            raise
+
+        self._pid = self._proc.pid
+        self._extra['subprocess'] = self._proc
+
+        if self._loop.get_debug():
+            if isinstance(args, (bytes, str)):
+                program = args
+            else:
+                program = args[0]
+            logger.debug('process %r created: pid %s',
+                         program, self._pid)
+
+        self._loop.create_task(self._connect_pipes(waiter))
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._closed:
+            info.append('closed')
+        if self._pid is not None:
+            info.append(f'pid={self._pid}')
+        if self._returncode is not None:
+            info.append(f'returncode={self._returncode}')
+        elif self._pid is not None:
+            info.append('running')
+        else:
+            info.append('not started')
+
+        stdin = self._pipes.get(0)
+        if stdin is not None:
+            info.append(f'stdin={stdin.pipe}')
+
+        stdout = self._pipes.get(1)
+        stderr = self._pipes.get(2)
+        if stdout is not None and stderr is stdout:
+            info.append(f'stdout=stderr={stdout.pipe}')
+        else:
+            if stdout is not None:
+                info.append(f'stdout={stdout.pipe}')
+            if stderr is not None:
+                info.append(f'stderr={stderr.pipe}')
+
+        return '<{}>'.format(' '.join(info))
+
+    def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+        raise NotImplementedError
+
+    def set_protocol(self, protocol):
+        self._protocol = protocol
+
+    def get_protocol(self):
+        return self._protocol
+
+    def is_closing(self):
+        return self._closed
+
+    def close(self):
+        if self._closed:
+            return
+        self._closed = True
+
+        for proto in self._pipes.values():
+            if proto is None:
+                continue
+            proto.pipe.close()
+
+        if (self._proc is not None and
+                # has the child process finished?
+                self._returncode is None and
+                # the child process has finished, but the
+                # transport hasn't been notified yet?
+                self._proc.poll() is None):
+
+            if self._loop.get_debug():
+                logger.warning('Close running child process: kill %r', self)
+
+            try:
+                self._proc.kill()
+            except ProcessLookupError:
+                pass
+
+            # Don't clear the _proc reference yet: _post_init() may still run
+
+    def __del__(self, _warn=warnings.warn):
+        if not self._closed:
+            _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+            self.close()
+
+    def get_pid(self):
+        return self._pid
+
+    def get_returncode(self):
+        return self._returncode
+
+    def get_pipe_transport(self, fd):
+        if fd in self._pipes:
+            return self._pipes[fd].pipe
+        else:
+            return None
+
+    def _check_proc(self):
+        if self._proc is None:
+            raise ProcessLookupError()
+
+    def send_signal(self, signal):
+        self._check_proc()
+        self._proc.send_signal(signal)
+
+    def terminate(self):
+        self._check_proc()
+        self._proc.terminate()
+
+    def kill(self):
+        self._check_proc()
+        self._proc.kill()
+
+    async def _connect_pipes(self, waiter):
+        try:
+            proc = self._proc
+            loop = self._loop
+
+            if proc.stdin is not None:
+                _, pipe = await loop.connect_write_pipe(
+                    lambda: WriteSubprocessPipeProto(self, 0),
+                    proc.stdin)
+                self._pipes[0] = pipe
+
+            if proc.stdout is not None:
+                _, pipe = await loop.connect_read_pipe(
+                    lambda: ReadSubprocessPipeProto(self, 1),
+                    proc.stdout)
+                self._pipes[1] = pipe
+
+            if proc.stderr is not None:
+                _, pipe = await loop.connect_read_pipe(
+                    lambda: ReadSubprocessPipeProto(self, 2),
+                    proc.stderr)
+                self._pipes[2] = pipe
+
+            assert self._pending_calls is not None
+
+            loop.call_soon(self._protocol.connection_made, self)
+            for callback, data in self._pending_calls:
+                loop.call_soon(callback, *data)
+            self._pending_calls = None
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            if waiter is not None and not waiter.cancelled():
+                waiter.set_exception(exc)
+        else:
+            if waiter is not None and not waiter.cancelled():
+                waiter.set_result(None)
+
+    def _call(self, cb, *data):
+        if self._pending_calls is not None:
+            self._pending_calls.append((cb, data))
+        else:
+            self._loop.call_soon(cb, *data)
+
+    def _pipe_connection_lost(self, fd, exc):
+        self._call(self._protocol.pipe_connection_lost, fd, exc)
+        self._try_finish()
+
+    def _pipe_data_received(self, fd, data):
+        self._call(self._protocol.pipe_data_received, fd, data)
+
+    def _process_exited(self, returncode):
+        assert returncode is not None, returncode
+        assert self._returncode is None, self._returncode
+        if self._loop.get_debug():
+            logger.info('%r exited with return code %r', self, returncode)
+        self._returncode = returncode
+        if self._proc.returncode is None:
+            # asyncio uses a child watcher: copy the status into the Popen
+            # object. On Python 3.6, it is required to avoid a ResourceWarning.
+            self._proc.returncode = returncode
+        self._call(self._protocol.process_exited)
+        self._try_finish()
+
+        # wake up futures waiting for wait()
+        for waiter in self._exit_waiters:
+            if not waiter.cancelled():
+                waiter.set_result(returncode)
+        self._exit_waiters = None
+
+    async def _wait(self):
+        """Wait until the process exit and return the process return code.
+
+        This method is a coroutine."""
+        if self._returncode is not None:
+            return self._returncode
+
+        waiter = self._loop.create_future()
+        self._exit_waiters.append(waiter)
+        return await waiter
+
+    def _try_finish(self):
+        assert not self._finished
+        if self._returncode is None:
+            return
+        if all(p is not None and p.disconnected
+               for p in self._pipes.values()):
+            self._finished = True
+            self._call(self._call_connection_lost, None)
+
+    def _call_connection_lost(self, exc):
+        try:
+            self._protocol.connection_lost(exc)
+        finally:
+            self._loop = None
+            self._proc = None
+            self._protocol = None
+
+
+class WriteSubprocessPipeProto(protocols.BaseProtocol):
+
+    def __init__(self, proc, fd):
+        self.proc = proc
+        self.fd = fd
+        self.pipe = None
+        self.disconnected = False
+
+    def connection_made(self, transport):
+        self.pipe = transport
+
+    def __repr__(self):
+        return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>'
+
+    def connection_lost(self, exc):
+        self.disconnected = True
+        self.proc._pipe_connection_lost(self.fd, exc)
+        self.proc = None
+
+    def pause_writing(self):
+        self.proc._protocol.pause_writing()
+
+    def resume_writing(self):
+        self.proc._protocol.resume_writing()
+
+
+class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
+                              protocols.Protocol):
+
+    def data_received(self, data):
+        self.proc._pipe_data_received(self.fd, data)
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/base_tasks.py b/linux-x64/clang/python3/lib/python3.9/asyncio/base_tasks.py
new file mode 100644
index 0000000..09bb171
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/base_tasks.py
@@ -0,0 +1,85 @@
+import linecache
+import traceback
+
+from . import base_futures
+from . import coroutines
+
+
+def _task_repr_info(task):
+    info = base_futures._future_repr_info(task)
+
+    if task._must_cancel:
+        # replace status
+        info[0] = 'cancelling'
+
+    info.insert(1, 'name=%r' % task.get_name())
+
+    coro = coroutines._format_coroutine(task._coro)
+    info.insert(2, f'coro=<{coro}>')
+
+    if task._fut_waiter is not None:
+        info.insert(3, f'wait_for={task._fut_waiter!r}')
+    return info
+
+
+def _task_get_stack(task, limit):
+    frames = []
+    if hasattr(task._coro, 'cr_frame'):
+        # case 1: 'async def' coroutines
+        f = task._coro.cr_frame
+    elif hasattr(task._coro, 'gi_frame'):
+        # case 2: legacy coroutines
+        f = task._coro.gi_frame
+    elif hasattr(task._coro, 'ag_frame'):
+        # case 3: async generators
+        f = task._coro.ag_frame
+    else:
+        # case 4: unknown objects
+        f = None
+    if f is not None:
+        while f is not None:
+            if limit is not None:
+                if limit <= 0:
+                    break
+                limit -= 1
+            frames.append(f)
+            f = f.f_back
+        frames.reverse()
+    elif task._exception is not None:
+        tb = task._exception.__traceback__
+        while tb is not None:
+            if limit is not None:
+                if limit <= 0:
+                    break
+                limit -= 1
+            frames.append(tb.tb_frame)
+            tb = tb.tb_next
+    return frames
+
+
+def _task_print_stack(task, limit, file):
+    extracted_list = []
+    checked = set()
+    for f in task.get_stack(limit=limit):
+        lineno = f.f_lineno
+        co = f.f_code
+        filename = co.co_filename
+        name = co.co_name
+        if filename not in checked:
+            checked.add(filename)
+            linecache.checkcache(filename)
+        line = linecache.getline(filename, lineno, f.f_globals)
+        extracted_list.append((filename, lineno, name, line))
+
+    exc = task._exception
+    if not extracted_list:
+        print(f'No stack for {task!r}', file=file)
+    elif exc is not None:
+        print(f'Traceback for {task!r} (most recent call last):', file=file)
+    else:
+        print(f'Stack for {task!r} (most recent call last):', file=file)
+
+    traceback.print_list(extracted_list, file=file)
+    if exc is not None:
+        for line in traceback.format_exception_only(exc.__class__, exc):
+            print(line, file=file, end='')
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/constants.py b/linux-x64/clang/python3/lib/python3.9/asyncio/constants.py
new file mode 100644
index 0000000..33feed6
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/constants.py
@@ -0,0 +1,27 @@
+import enum
+
+# After the connection is lost, log warnings after this many write()s.
+LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
+
+# Seconds to wait before retrying accept().
+ACCEPT_RETRY_DELAY = 1
+
+# Number of stack entries to capture in debug mode.
+# The larger the number, the slower the operation in debug mode
+# (see extract_stack() in format_helpers.py).
+DEBUG_STACK_DEPTH = 10
+
+# Number of seconds to wait for SSL handshake to complete
+# The default timeout matches that of Nginx.
+SSL_HANDSHAKE_TIMEOUT = 60.0
+
+# Used in sendfile fallback code.  We use fallback for platforms
+# that don't support sendfile, or for TLS connections.
+SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256
+
+# The enum should be here to break circular dependencies between
+# base_events and sslproto
+class _SendfileMode(enum.Enum):
+    UNSUPPORTED = enum.auto()
+    TRY_NATIVE = enum.auto()
+    FALLBACK = enum.auto()
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/coroutines.py b/linux-x64/clang/python3/lib/python3.9/asyncio/coroutines.py
new file mode 100644
index 0000000..9664ea7
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/coroutines.py
@@ -0,0 +1,269 @@
+__all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine'
+
+import collections.abc
+import functools
+import inspect
+import os
+import sys
+import traceback
+import types
+import warnings
+
+from . import base_futures
+from . import constants
+from . import format_helpers
+from .log import logger
+
+
+def _is_debug_mode():
+    # If you set _DEBUG to true, @coroutine will wrap the resulting
+    # generator objects in a CoroWrapper instance (defined below).  That
+    # instance will log a message when the generator is never iterated
+    # over, which may happen when you forget to use "await" or "yield from"
+    # with a coroutine call.
+    # Note that the value of the _DEBUG flag is taken
+    # when the decorator is used, so to be of any use it must be set
+    # before you define your coroutines.  A downside of using this feature
+    # is that tracebacks show entries for the CoroWrapper.__next__ method
+    # when _DEBUG is true.
+    return sys.flags.dev_mode or (not sys.flags.ignore_environment and
+                                  bool(os.environ.get('PYTHONASYNCIODEBUG')))
+
+
+_DEBUG = _is_debug_mode()
+
+
+class CoroWrapper:
+    # Wrapper for coroutine object in _DEBUG mode.
+
+    def __init__(self, gen, func=None):
+        assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
+        self.gen = gen
+        self.func = func  # Used to unwrap @coroutine decorator
+        self._source_traceback = format_helpers.extract_stack(sys._getframe(1))
+        self.__name__ = getattr(gen, '__name__', None)
+        self.__qualname__ = getattr(gen, '__qualname__', None)
+
+    def __repr__(self):
+        coro_repr = _format_coroutine(self)
+        if self._source_traceback:
+            frame = self._source_traceback[-1]
+            coro_repr += f', created at {frame[0]}:{frame[1]}'
+
+        return f'<{self.__class__.__name__} {coro_repr}>'
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        return self.gen.send(None)
+
+    def send(self, value):
+        return self.gen.send(value)
+
+    def throw(self, type, value=None, traceback=None):
+        return self.gen.throw(type, value, traceback)
+
+    def close(self):
+        return self.gen.close()
+
+    @property
+    def gi_frame(self):
+        return self.gen.gi_frame
+
+    @property
+    def gi_running(self):
+        return self.gen.gi_running
+
+    @property
+    def gi_code(self):
+        return self.gen.gi_code
+
+    def __await__(self):
+        return self
+
+    @property
+    def gi_yieldfrom(self):
+        return self.gen.gi_yieldfrom
+
+    def __del__(self):
+        # Be careful accessing self.gen.frame -- self.gen might not exist.
+        gen = getattr(self, 'gen', None)
+        frame = getattr(gen, 'gi_frame', None)
+        if frame is not None and frame.f_lasti == -1:
+            msg = f'{self!r} was never yielded from'
+            tb = getattr(self, '_source_traceback', ())
+            if tb:
+                tb = ''.join(traceback.format_list(tb))
+                msg += (f'\nCoroutine object created at '
+                        f'(most recent call last, truncated to '
+                        f'{constants.DEBUG_STACK_DEPTH} last lines):\n')
+                msg += tb.rstrip()
+            logger.error(msg)
+
+
+def coroutine(func):
+    """Decorator to mark coroutines.
+
+    If the coroutine is not yielded from before it is destroyed,
+    an error message is logged.
+    """
+    warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead',
+                  DeprecationWarning,
+                  stacklevel=2)
+    if inspect.iscoroutinefunction(func):
+        # In Python 3.5 that's all we need to do for coroutines
+        # defined with "async def".
+        return func
+
+    if inspect.isgeneratorfunction(func):
+        coro = func
+    else:
+        @functools.wraps(func)
+        def coro(*args, **kw):
+            res = func(*args, **kw)
+            if (base_futures.isfuture(res) or inspect.isgenerator(res) or
+                    isinstance(res, CoroWrapper)):
+                res = yield from res
+            else:
+                # If 'res' is an awaitable, run it.
+                try:
+                    await_meth = res.__await__
+                except AttributeError:
+                    pass
+                else:
+                    if isinstance(res, collections.abc.Awaitable):
+                        res = yield from await_meth()
+            return res
+
+    coro = types.coroutine(coro)
+    if not _DEBUG:
+        wrapper = coro
+    else:
+        @functools.wraps(func)
+        def wrapper(*args, **kwds):
+            w = CoroWrapper(coro(*args, **kwds), func=func)
+            if w._source_traceback:
+                del w._source_traceback[-1]
+            # Python < 3.5 does not implement __qualname__
+            # on generator objects, so we set it manually.
+            # We use getattr as some callables (such as
+            # functools.partial may lack __qualname__).
+            w.__name__ = getattr(func, '__name__', None)
+            w.__qualname__ = getattr(func, '__qualname__', None)
+            return w
+
+    wrapper._is_coroutine = _is_coroutine  # For iscoroutinefunction().
+    return wrapper
+
+
+# A marker for iscoroutinefunction.
+_is_coroutine = object()
+
+
+def iscoroutinefunction(func):
+    """Return True if func is a decorated coroutine function."""
+    return (inspect.iscoroutinefunction(func) or
+            getattr(func, '_is_coroutine', None) is _is_coroutine)
+
+
+# Prioritize native coroutine check to speed-up
+# asyncio.iscoroutine.
+_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType,
+                    collections.abc.Coroutine, CoroWrapper)
+_iscoroutine_typecache = set()
+
+
+def iscoroutine(obj):
+    """Return True if obj is a coroutine object."""
+    if type(obj) in _iscoroutine_typecache:
+        return True
+
+    if isinstance(obj, _COROUTINE_TYPES):
+        # Just in case we don't want to cache more than 100
+        # positive types.  That shouldn't ever happen, unless
+        # someone stressing the system on purpose.
+        if len(_iscoroutine_typecache) < 100:
+            _iscoroutine_typecache.add(type(obj))
+        return True
+    else:
+        return False
+
+
+def _format_coroutine(coro):
+    assert iscoroutine(coro)
+
+    is_corowrapper = isinstance(coro, CoroWrapper)
+
+    def get_name(coro):
+        # Coroutines compiled with Cython sometimes don't have
+        # proper __qualname__ or __name__.  While that is a bug
+        # in Cython, asyncio shouldn't crash with an AttributeError
+        # in its __repr__ functions.
+        if is_corowrapper:
+            return format_helpers._format_callback(coro.func, (), {})
+
+        if hasattr(coro, '__qualname__') and coro.__qualname__:
+            coro_name = coro.__qualname__
+        elif hasattr(coro, '__name__') and coro.__name__:
+            coro_name = coro.__name__
+        else:
+            # Stop masking Cython bugs, expose them in a friendly way.
+            coro_name = f'<{type(coro).__name__} without __name__>'
+        return f'{coro_name}()'
+
+    def is_running(coro):
+        try:
+            return coro.cr_running
+        except AttributeError:
+            try:
+                return coro.gi_running
+            except AttributeError:
+                return False
+
+    coro_code = None
+    if hasattr(coro, 'cr_code') and coro.cr_code:
+        coro_code = coro.cr_code
+    elif hasattr(coro, 'gi_code') and coro.gi_code:
+        coro_code = coro.gi_code
+
+    coro_name = get_name(coro)
+
+    if not coro_code:
+        # Built-in types might not have __qualname__ or __name__.
+        if is_running(coro):
+            return f'{coro_name} running'
+        else:
+            return coro_name
+
+    coro_frame = None
+    if hasattr(coro, 'gi_frame') and coro.gi_frame:
+        coro_frame = coro.gi_frame
+    elif hasattr(coro, 'cr_frame') and coro.cr_frame:
+        coro_frame = coro.cr_frame
+
+    # If Cython's coroutine has a fake code object without proper
+    # co_filename -- expose that.
+    filename = coro_code.co_filename or '<empty co_filename>'
+
+    lineno = 0
+    if (is_corowrapper and
+            coro.func is not None and
+            not inspect.isgeneratorfunction(coro.func)):
+        source = format_helpers._get_function_source(coro.func)
+        if source is not None:
+            filename, lineno = source
+        if coro_frame is None:
+            coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
+        else:
+            coro_repr = f'{coro_name} running, defined at {filename}:{lineno}'
+
+    elif coro_frame is not None:
+        lineno = coro_frame.f_lineno
+        coro_repr = f'{coro_name} running at {filename}:{lineno}'
+
+    else:
+        lineno = coro_code.co_firstlineno
+        coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
+
+    return coro_repr
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/events.py b/linux-x64/clang/python3/lib/python3.9/asyncio/events.py
new file mode 100644
index 0000000..0dce87b
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/events.py
@@ -0,0 +1,795 @@
+"""Event loop and event loop policy."""
+
+__all__ = (
+    'AbstractEventLoopPolicy',
+    'AbstractEventLoop', 'AbstractServer',
+    'Handle', 'TimerHandle',
+    'get_event_loop_policy', 'set_event_loop_policy',
+    'get_event_loop', 'set_event_loop', 'new_event_loop',
+    'get_child_watcher', 'set_child_watcher',
+    '_set_running_loop', 'get_running_loop',
+    '_get_running_loop',
+)
+
+import contextvars
+import os
+import socket
+import subprocess
+import sys
+import threading
+
+from . import format_helpers
+
+
+class Handle:
+    """Object returned by callback registration methods."""
+
+    __slots__ = ('_callback', '_args', '_cancelled', '_loop',
+                 '_source_traceback', '_repr', '__weakref__',
+                 '_context')
+
+    def __init__(self, callback, args, loop, context=None):
+        if context is None:
+            context = contextvars.copy_context()
+        self._context = context
+        self._loop = loop
+        self._callback = callback
+        self._args = args
+        self._cancelled = False
+        self._repr = None
+        if self._loop.get_debug():
+            self._source_traceback = format_helpers.extract_stack(
+                sys._getframe(1))
+        else:
+            self._source_traceback = None
+
+    def _repr_info(self):
+        info = [self.__class__.__name__]
+        if self._cancelled:
+            info.append('cancelled')
+        if self._callback is not None:
+            info.append(format_helpers._format_callback_source(
+                self._callback, self._args))
+        if self._source_traceback:
+            frame = self._source_traceback[-1]
+            info.append(f'created at {frame[0]}:{frame[1]}')
+        return info
+
+    def __repr__(self):
+        if self._repr is not None:
+            return self._repr
+        info = self._repr_info()
+        return '<{}>'.format(' '.join(info))
+
+    def cancel(self):
+        if not self._cancelled:
+            self._cancelled = True
+            if self._loop.get_debug():
+                # Keep a representation in debug mode to keep callback and
+                # parameters. For example, to log the warning
+                # "Executing <Handle...> took 2.5 second"
+                self._repr = repr(self)
+            self._callback = None
+            self._args = None
+
+    def cancelled(self):
+        return self._cancelled
+
+    def _run(self):
+        try:
+            self._context.run(self._callback, *self._args)
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            cb = format_helpers._format_callback_source(
+                self._callback, self._args)
+            msg = f'Exception in callback {cb}'
+            context = {
+                'message': msg,
+                'exception': exc,
+                'handle': self,
+            }
+            if self._source_traceback:
+                context['source_traceback'] = self._source_traceback
+            self._loop.call_exception_handler(context)
+        self = None  # Needed to break cycles when an exception occurs.
+
+
+class TimerHandle(Handle):
+    """Object returned by timed callback registration methods."""
+
+    __slots__ = ['_scheduled', '_when']
+
+    def __init__(self, when, callback, args, loop, context=None):
+        assert when is not None
+        super().__init__(callback, args, loop, context)
+        if self._source_traceback:
+            del self._source_traceback[-1]
+        self._when = when
+        self._scheduled = False
+
+    def _repr_info(self):
+        info = super()._repr_info()
+        pos = 2 if self._cancelled else 1
+        info.insert(pos, f'when={self._when}')
+        return info
+
+    def __hash__(self):
+        return hash(self._when)
+
+    def __lt__(self, other):
+        if isinstance(other, TimerHandle):
+            return self._when < other._when
+        return NotImplemented
+
+    def __le__(self, other):
+        if isinstance(other, TimerHandle):
+            return self._when < other._when or self.__eq__(other)
+        return NotImplemented
+
+    def __gt__(self, other):
+        if isinstance(other, TimerHandle):
+            return self._when > other._when
+        return NotImplemented
+
+    def __ge__(self, other):
+        if isinstance(other, TimerHandle):
+            return self._when > other._when or self.__eq__(other)
+        return NotImplemented
+
+    def __eq__(self, other):
+        if isinstance(other, TimerHandle):
+            return (self._when == other._when and
+                    self._callback == other._callback and
+                    self._args == other._args and
+                    self._cancelled == other._cancelled)
+        return NotImplemented
+
+    def cancel(self):
+        if not self._cancelled:
+            self._loop._timer_handle_cancelled(self)
+        super().cancel()
+
+    def when(self):
+        """Return a scheduled callback time.
+
+        The time is an absolute timestamp, using the same time
+        reference as loop.time().
+        """
+        return self._when
+
+
+class AbstractServer:
+    """Abstract server returned by create_server()."""
+
+    def close(self):
+        """Stop serving.  This leaves existing connections open."""
+        raise NotImplementedError
+
+    def get_loop(self):
+        """Get the event loop the Server object is attached to."""
+        raise NotImplementedError
+
+    def is_serving(self):
+        """Return True if the server is accepting connections."""
+        raise NotImplementedError
+
+    async def start_serving(self):
+        """Start accepting connections.
+
+        This method is idempotent, so it can be called when
+        the server is already being serving.
+        """
+        raise NotImplementedError
+
+    async def serve_forever(self):
+        """Start accepting connections until the coroutine is cancelled.
+
+        The server is closed when the coroutine is cancelled.
+        """
+        raise NotImplementedError
+
+    async def wait_closed(self):
+        """Coroutine to wait until service is closed."""
+        raise NotImplementedError
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, *exc):
+        self.close()
+        await self.wait_closed()
+
+
+class AbstractEventLoop:
+    """Abstract event loop."""
+
+    # Running and stopping the event loop.
+
+    def run_forever(self):
+        """Run the event loop until stop() is called."""
+        raise NotImplementedError
+
+    def run_until_complete(self, future):
+        """Run the event loop until a Future is done.
+
+        Return the Future's result, or raise its exception.
+        """
+        raise NotImplementedError
+
+    def stop(self):
+        """Stop the event loop as soon as reasonable.
+
+        Exactly how soon that is may depend on the implementation, but
+        no more I/O callbacks should be scheduled.
+        """
+        raise NotImplementedError
+
+    def is_running(self):
+        """Return whether the event loop is currently running."""
+        raise NotImplementedError
+
+    def is_closed(self):
+        """Returns True if the event loop was closed."""
+        raise NotImplementedError
+
+    def close(self):
+        """Close the loop.
+
+        The loop should not be running.
+
+        This is idempotent and irreversible.
+
+        No other methods should be called after this one.
+        """
+        raise NotImplementedError
+
+    async def shutdown_asyncgens(self):
+        """Shutdown all active asynchronous generators."""
+        raise NotImplementedError
+
+    async def shutdown_default_executor(self):
+        """Schedule the shutdown of the default executor."""
+        raise NotImplementedError
+
+    # Methods scheduling callbacks.  All these return Handles.
+
+    def _timer_handle_cancelled(self, handle):
+        """Notification that a TimerHandle has been cancelled."""
+        raise NotImplementedError
+
+    def call_soon(self, callback, *args):
+        return self.call_later(0, callback, *args)
+
+    def call_later(self, delay, callback, *args):
+        raise NotImplementedError
+
+    def call_at(self, when, callback, *args):
+        raise NotImplementedError
+
+    def time(self):
+        raise NotImplementedError
+
+    def create_future(self):
+        raise NotImplementedError
+
+    # Method scheduling a coroutine object: create a task.
+
+    def create_task(self, coro, *, name=None):
+        raise NotImplementedError
+
+    # Methods for interacting with threads.
+
+    def call_soon_threadsafe(self, callback, *args):
+        raise NotImplementedError
+
+    def run_in_executor(self, executor, func, *args):
+        raise NotImplementedError
+
+    def set_default_executor(self, executor):
+        raise NotImplementedError
+
+    # Network I/O methods returning Futures.
+
+    async def getaddrinfo(self, host, port, *,
+                          family=0, type=0, proto=0, flags=0):
+        raise NotImplementedError
+
+    async def getnameinfo(self, sockaddr, flags=0):
+        raise NotImplementedError
+
+    async def create_connection(
+            self, protocol_factory, host=None, port=None,
+            *, ssl=None, family=0, proto=0,
+            flags=0, sock=None, local_addr=None,
+            server_hostname=None,
+            ssl_handshake_timeout=None,
+            happy_eyeballs_delay=None, interleave=None):
+        raise NotImplementedError
+
+    async def create_server(
+            self, protocol_factory, host=None, port=None,
+            *, family=socket.AF_UNSPEC,
+            flags=socket.AI_PASSIVE, sock=None, backlog=100,
+            ssl=None, reuse_address=None, reuse_port=None,
+            ssl_handshake_timeout=None,
+            start_serving=True):
+        """A coroutine which creates a TCP server bound to host and port.
+
+        The return value is a Server object which can be used to stop
+        the service.
+
+        If host is an empty string or None all interfaces are assumed
+        and a list of multiple sockets will be returned (most likely
+        one for IPv4 and another one for IPv6). The host parameter can also be
+        a sequence (e.g. list) of hosts to bind to.
+
+        family can be set to either AF_INET or AF_INET6 to force the
+        socket to use IPv4 or IPv6. If not set it will be determined
+        from host (defaults to AF_UNSPEC).
+
+        flags is a bitmask for getaddrinfo().
+
+        sock can optionally be specified in order to use a preexisting
+        socket object.
+
+        backlog is the maximum number of queued connections passed to
+        listen() (defaults to 100).
+
+        ssl can be set to an SSLContext to enable SSL over the
+        accepted connections.
+
+        reuse_address tells the kernel to reuse a local socket in
+        TIME_WAIT state, without waiting for its natural timeout to
+        expire. If not specified will automatically be set to True on
+        UNIX.
+
+        reuse_port tells the kernel to allow this endpoint to be bound to
+        the same port as other existing endpoints are bound to, so long as
+        they all set this flag when being created. This option is not
+        supported on Windows.
+
+        ssl_handshake_timeout is the time in seconds that an SSL server
+        will wait for completion of the SSL handshake before aborting the
+        connection. Default is 60s.
+
+        start_serving set to True (default) causes the created server
+        to start accepting connections immediately.  When set to False,
+        the user should await Server.start_serving() or Server.serve_forever()
+        to make the server to start accepting connections.
+        """
+        raise NotImplementedError
+
+    async def sendfile(self, transport, file, offset=0, count=None,
+                       *, fallback=True):
+        """Send a file through a transport.
+
+        Return an amount of sent bytes.
+        """
+        raise NotImplementedError
+
+    async def start_tls(self, transport, protocol, sslcontext, *,
+                        server_side=False,
+                        server_hostname=None,
+                        ssl_handshake_timeout=None):
+        """Upgrade a transport to TLS.
+
+        Return a new transport that *protocol* should start using
+        immediately.
+        """
+        raise NotImplementedError
+
+    async def create_unix_connection(
+            self, protocol_factory, path=None, *,
+            ssl=None, sock=None,
+            server_hostname=None,
+            ssl_handshake_timeout=None):
+        raise NotImplementedError
+
+    async def create_unix_server(
+            self, protocol_factory, path=None, *,
+            sock=None, backlog=100, ssl=None,
+            ssl_handshake_timeout=None,
+            start_serving=True):
+        """A coroutine which creates a UNIX Domain Socket server.
+
+        The return value is a Server object, which can be used to stop
+        the service.
+
+        path is a str, representing a file system path to bind the
+        server socket to.
+
+        sock can optionally be specified in order to use a preexisting
+        socket object.
+
+        backlog is the maximum number of queued connections passed to
+        listen() (defaults to 100).
+
+        ssl can be set to an SSLContext to enable SSL over the
+        accepted connections.
+
+        ssl_handshake_timeout is the time in seconds that an SSL server
+        will wait for the SSL handshake to complete (defaults to 60s).
+
+        start_serving set to True (default) causes the created server
+        to start accepting connections immediately.  When set to False,
+        the user should await Server.start_serving() or Server.serve_forever()
+        to make the server to start accepting connections.
+        """
+        raise NotImplementedError
+
+    async def create_datagram_endpoint(self, protocol_factory,
+                                       local_addr=None, remote_addr=None, *,
+                                       family=0, proto=0, flags=0,
+                                       reuse_address=None, reuse_port=None,
+                                       allow_broadcast=None, sock=None):
+        """A coroutine which creates a datagram endpoint.
+
+        This method will try to establish the endpoint in the background.
+        When successful, the coroutine returns a (transport, protocol) pair.
+
+        protocol_factory must be a callable returning a protocol instance.
+
+        socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on
+        host (or family if specified), socket type SOCK_DGRAM.
+
+        reuse_address tells the kernel to reuse a local socket in
+        TIME_WAIT state, without waiting for its natural timeout to
+        expire. If not specified it will automatically be set to True on
+        UNIX.
+
+        reuse_port tells the kernel to allow this endpoint to be bound to
+        the same port as other existing endpoints are bound to, so long as
+        they all set this flag when being created. This option is not
+        supported on Windows and some UNIX's. If the
+        :py:data:`~socket.SO_REUSEPORT` constant is not defined then this
+        capability is unsupported.
+
+        allow_broadcast tells the kernel to allow this endpoint to send
+        messages to the broadcast address.
+
+        sock can optionally be specified in order to use a preexisting
+        socket object.
+        """
+        raise NotImplementedError
+
+    # Pipes and subprocesses.
+
+    async def connect_read_pipe(self, protocol_factory, pipe):
+        """Register read pipe in event loop. Set the pipe to non-blocking mode.
+
+        protocol_factory should instantiate object with Protocol interface.
+        pipe is a file-like object.
+        Return pair (transport, protocol), where transport supports the
+        ReadTransport interface."""
+        # The reason to accept file-like object instead of just file descriptor
+        # is: we need to own pipe and close it at transport finishing
+        # Can got complicated errors if pass f.fileno(),
+        # close fd in pipe transport then close f and vise versa.
+        raise NotImplementedError
+
+    async def connect_write_pipe(self, protocol_factory, pipe):
+        """Register write pipe in event loop.
+
+        protocol_factory should instantiate object with BaseProtocol interface.
+        Pipe is file-like object already switched to nonblocking.
+        Return pair (transport, protocol), where transport support
+        WriteTransport interface."""
+        # The reason to accept file-like object instead of just file descriptor
+        # is: we need to own pipe and close it at transport finishing
+        # Can got complicated errors if pass f.fileno(),
+        # close fd in pipe transport then close f and vise versa.
+        raise NotImplementedError
+
+    async def subprocess_shell(self, protocol_factory, cmd, *,
+                               stdin=subprocess.PIPE,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE,
+                               **kwargs):
+        raise NotImplementedError
+
+    async def subprocess_exec(self, protocol_factory, *args,
+                              stdin=subprocess.PIPE,
+                              stdout=subprocess.PIPE,
+                              stderr=subprocess.PIPE,
+                              **kwargs):
+        raise NotImplementedError
+
+    # Ready-based callback registration methods.
+    # The add_*() methods return None.
+    # The remove_*() methods return True if something was removed,
+    # False if there was nothing to delete.
+
+    def add_reader(self, fd, callback, *args):
+        raise NotImplementedError
+
+    def remove_reader(self, fd):
+        raise NotImplementedError
+
+    def add_writer(self, fd, callback, *args):
+        raise NotImplementedError
+
+    def remove_writer(self, fd):
+        raise NotImplementedError
+
+    # Completion based I/O methods returning Futures.
+
+    async def sock_recv(self, sock, nbytes):
+        raise NotImplementedError
+
+    async def sock_recv_into(self, sock, buf):
+        raise NotImplementedError
+
+    async def sock_sendall(self, sock, data):
+        raise NotImplementedError
+
+    async def sock_connect(self, sock, address):
+        raise NotImplementedError
+
+    async def sock_accept(self, sock):
+        raise NotImplementedError
+
+    async def sock_sendfile(self, sock, file, offset=0, count=None,
+                            *, fallback=None):
+        raise NotImplementedError
+
+    # Signal handling.
+
+    def add_signal_handler(self, sig, callback, *args):
+        raise NotImplementedError
+
+    def remove_signal_handler(self, sig):
+        raise NotImplementedError
+
+    # Task factory.
+
+    def set_task_factory(self, factory):
+        raise NotImplementedError
+
+    def get_task_factory(self):
+        raise NotImplementedError
+
+    # Error handlers.
+
+    def get_exception_handler(self):
+        raise NotImplementedError
+
+    def set_exception_handler(self, handler):
+        raise NotImplementedError
+
+    def default_exception_handler(self, context):
+        raise NotImplementedError
+
+    def call_exception_handler(self, context):
+        raise NotImplementedError
+
+    # Debug flag management.
+
+    def get_debug(self):
+        raise NotImplementedError
+
+    def set_debug(self, enabled):
+        raise NotImplementedError
+
+
+class AbstractEventLoopPolicy:
+    """Abstract policy for accessing the event loop."""
+
+    def get_event_loop(self):
+        """Get the event loop for the current context.
+
+        Returns an event loop object implementing the BaseEventLoop interface,
+        or raises an exception in case no event loop has been set for the
+        current context and the current policy does not specify to create one.
+
+        It should never return None."""
+        raise NotImplementedError
+
+    def set_event_loop(self, loop):
+        """Set the event loop for the current context to loop."""
+        raise NotImplementedError
+
+    def new_event_loop(self):
+        """Create and return a new event loop object according to this
+        policy's rules. If there's need to set this loop as the event loop for
+        the current context, set_event_loop must be called explicitly."""
+        raise NotImplementedError
+
+    # Child processes handling (Unix only).
+
+    def get_child_watcher(self):
+        "Get the watcher for child processes."
+        raise NotImplementedError
+
+    def set_child_watcher(self, watcher):
+        """Set the watcher for child processes."""
+        raise NotImplementedError
+
+
+class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
+    """Default policy implementation for accessing the event loop.
+
+    In this policy, each thread has its own event loop.  However, we
+    only automatically create an event loop by default for the main
+    thread; other threads by default have no event loop.
+
+    Other policies may have different rules (e.g. a single global
+    event loop, or automatically creating an event loop per thread, or
+    using some other notion of context to which an event loop is
+    associated).
+    """
+
+    _loop_factory = None
+
+    class _Local(threading.local):
+        _loop = None
+        _set_called = False
+
+    def __init__(self):
+        self._local = self._Local()
+
+    def get_event_loop(self):
+        """Get the event loop for the current context.
+
+        Returns an instance of EventLoop or raises an exception.
+        """
+        if (self._local._loop is None and
+                not self._local._set_called and
+                threading.current_thread() is threading.main_thread()):
+            self.set_event_loop(self.new_event_loop())
+
+        if self._local._loop is None:
+            raise RuntimeError('There is no current event loop in thread %r.'
+                               % threading.current_thread().name)
+
+        return self._local._loop
+
+    def set_event_loop(self, loop):
+        """Set the event loop."""
+        self._local._set_called = True
+        assert loop is None or isinstance(loop, AbstractEventLoop)
+        self._local._loop = loop
+
+    def new_event_loop(self):
+        """Create a new event loop.
+
+        You must call set_event_loop() to make this the current event
+        loop.
+        """
+        return self._loop_factory()
+
+
+# Event loop policy.  The policy itself is always global, even if the
+# policy's rules say that there is an event loop per thread (or other
+# notion of context).  The default policy is installed by the first
+# call to get_event_loop_policy().
+_event_loop_policy = None
+
+# Lock for protecting the on-the-fly creation of the event loop policy.
+_lock = threading.Lock()
+
+
+# A TLS for the running event loop, used by _get_running_loop.
+class _RunningLoop(threading.local):
+    loop_pid = (None, None)
+
+
+_running_loop = _RunningLoop()
+
+
+def get_running_loop():
+    """Return the running event loop.  Raise a RuntimeError if there is none.
+
+    This function is thread-specific.
+    """
+    # NOTE: this function is implemented in C (see _asynciomodule.c)
+    loop = _get_running_loop()
+    if loop is None:
+        raise RuntimeError('no running event loop')
+    return loop
+
+
+def _get_running_loop():
+    """Return the running event loop or None.
+
+    This is a low-level function intended to be used by event loops.
+    This function is thread-specific.
+    """
+    # NOTE: this function is implemented in C (see _asynciomodule.c)
+    running_loop, pid = _running_loop.loop_pid
+    if running_loop is not None and pid == os.getpid():
+        return running_loop
+
+
+def _set_running_loop(loop):
+    """Set the running event loop.
+
+    This is a low-level function intended to be used by event loops.
+    This function is thread-specific.
+    """
+    # NOTE: this function is implemented in C (see _asynciomodule.c)
+    _running_loop.loop_pid = (loop, os.getpid())
+
+
+def _init_event_loop_policy():
+    global _event_loop_policy
+    with _lock:
+        if _event_loop_policy is None:  # pragma: no branch
+            from . import DefaultEventLoopPolicy
+            _event_loop_policy = DefaultEventLoopPolicy()
+
+
+def get_event_loop_policy():
+    """Get the current event loop policy."""
+    if _event_loop_policy is None:
+        _init_event_loop_policy()
+    return _event_loop_policy
+
+
+def set_event_loop_policy(policy):
+    """Set the current event loop policy.
+
+    If policy is None, the default policy is restored."""
+    global _event_loop_policy
+    assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
+    _event_loop_policy = policy
+
+
+def get_event_loop():
+    """Return an asyncio event loop.
+
+    When called from a coroutine or a callback (e.g. scheduled with call_soon
+    or similar API), this function will always return the running event loop.
+
+    If there is no running event loop set, the function will return
+    the result of `get_event_loop_policy().get_event_loop()` call.
+    """
+    # NOTE: this function is implemented in C (see _asynciomodule.c)
+    current_loop = _get_running_loop()
+    if current_loop is not None:
+        return current_loop
+    return get_event_loop_policy().get_event_loop()
+
+
+def set_event_loop(loop):
+    """Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
+    get_event_loop_policy().set_event_loop(loop)
+
+
+def new_event_loop():
+    """Equivalent to calling get_event_loop_policy().new_event_loop()."""
+    return get_event_loop_policy().new_event_loop()
+
+
+def get_child_watcher():
+    """Equivalent to calling get_event_loop_policy().get_child_watcher()."""
+    return get_event_loop_policy().get_child_watcher()
+
+
+def set_child_watcher(watcher):
+    """Equivalent to calling
+    get_event_loop_policy().set_child_watcher(watcher)."""
+    return get_event_loop_policy().set_child_watcher(watcher)
+
+
+# Alias pure-Python implementations for testing purposes.
+_py__get_running_loop = _get_running_loop
+_py__set_running_loop = _set_running_loop
+_py_get_running_loop = get_running_loop
+_py_get_event_loop = get_event_loop
+
+
+try:
+    # get_event_loop() is one of the most frequently called
+    # functions in asyncio.  Pure Python implementation is
+    # about 4 times slower than C-accelerated.
+    from _asyncio import (_get_running_loop, _set_running_loop,
+                          get_running_loop, get_event_loop)
+except ImportError:
+    pass
+else:
+    # Alias C implementations for testing purposes.
+    _c__get_running_loop = _get_running_loop
+    _c__set_running_loop = _set_running_loop
+    _c_get_running_loop = get_running_loop
+    _c_get_event_loop = get_event_loop
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/exceptions.py b/linux-x64/clang/python3/lib/python3.9/asyncio/exceptions.py
new file mode 100644
index 0000000..f07e448
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/exceptions.py
@@ -0,0 +1,58 @@
+"""asyncio exceptions."""
+
+
+__all__ = ('CancelledError', 'InvalidStateError', 'TimeoutError',
+           'IncompleteReadError', 'LimitOverrunError',
+           'SendfileNotAvailableError')
+
+
+class CancelledError(BaseException):
+    """The Future or Task was cancelled."""
+
+
+class TimeoutError(Exception):
+    """The operation exceeded the given deadline."""
+
+
+class InvalidStateError(Exception):
+    """The operation is not allowed in this state."""
+
+
+class SendfileNotAvailableError(RuntimeError):
+    """Sendfile syscall is not available.
+
+    Raised if OS does not support sendfile syscall for given socket or
+    file type.
+    """
+
+
+class IncompleteReadError(EOFError):
+    """
+    Incomplete read error. Attributes:
+
+    - partial: read bytes string before the end of stream was reached
+    - expected: total number of expected bytes (or None if unknown)
+    """
+    def __init__(self, partial, expected):
+        r_expected = 'undefined' if expected is None else repr(expected)
+        super().__init__(f'{len(partial)} bytes read on a total of '
+                         f'{r_expected} expected bytes')
+        self.partial = partial
+        self.expected = expected
+
+    def __reduce__(self):
+        return type(self), (self.partial, self.expected)
+
+
+class LimitOverrunError(Exception):
+    """Reached the buffer limit while looking for a separator.
+
+    Attributes:
+    - consumed: total number of to be consumed bytes.
+    """
+    def __init__(self, message, consumed):
+        super().__init__(message)
+        self.consumed = consumed
+
+    def __reduce__(self):
+        return type(self), (self.args[0], self.consumed)
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/format_helpers.py b/linux-x64/clang/python3/lib/python3.9/asyncio/format_helpers.py
new file mode 100644
index 0000000..27d11fd
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/format_helpers.py
@@ -0,0 +1,76 @@
+import functools
+import inspect
+import reprlib
+import sys
+import traceback
+
+from . import constants
+
+
+def _get_function_source(func):
+    func = inspect.unwrap(func)
+    if inspect.isfunction(func):
+        code = func.__code__
+        return (code.co_filename, code.co_firstlineno)
+    if isinstance(func, functools.partial):
+        return _get_function_source(func.func)
+    if isinstance(func, functools.partialmethod):
+        return _get_function_source(func.func)
+    return None
+
+
+def _format_callback_source(func, args):
+    func_repr = _format_callback(func, args, None)
+    source = _get_function_source(func)
+    if source:
+        func_repr += f' at {source[0]}:{source[1]}'
+    return func_repr
+
+
+def _format_args_and_kwargs(args, kwargs):
+    """Format function arguments and keyword arguments.
+
+    Special case for a single parameter: ('hello',) is formatted as ('hello').
+    """
+    # use reprlib to limit the length of the output
+    items = []
+    if args:
+        items.extend(reprlib.repr(arg) for arg in args)
+    if kwargs:
+        items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
+    return '({})'.format(', '.join(items))
+
+
+def _format_callback(func, args, kwargs, suffix=''):
+    if isinstance(func, functools.partial):
+        suffix = _format_args_and_kwargs(args, kwargs) + suffix
+        return _format_callback(func.func, func.args, func.keywords, suffix)
+
+    if hasattr(func, '__qualname__') and func.__qualname__:
+        func_repr = func.__qualname__
+    elif hasattr(func, '__name__') and func.__name__:
+        func_repr = func.__name__
+    else:
+        func_repr = repr(func)
+
+    func_repr += _format_args_and_kwargs(args, kwargs)
+    if suffix:
+        func_repr += suffix
+    return func_repr
+
+
+def extract_stack(f=None, limit=None):
+    """Replacement for traceback.extract_stack() that only does the
+    necessary work for asyncio debug mode.
+    """
+    if f is None:
+        f = sys._getframe().f_back
+    if limit is None:
+        # Limit the amount of work to a reasonable amount, as extract_stack()
+        # can be called for each coroutine and future in debug mode.
+        limit = constants.DEBUG_STACK_DEPTH
+    stack = traceback.StackSummary.extract(traceback.walk_stack(f),
+                                           limit=limit,
+                                           lookup_lines=False)
+    stack.reverse()
+    return stack
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/futures.py b/linux-x64/clang/python3/lib/python3.9/asyncio/futures.py
new file mode 100644
index 0000000..bed4da5
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/futures.py
@@ -0,0 +1,423 @@
+"""A Future class similar to the one in PEP 3148."""
+
+__all__ = (
+    'Future', 'wrap_future', 'isfuture',
+)
+
+import concurrent.futures
+import contextvars
+import logging
+import sys
+
+from . import base_futures
+from . import events
+from . import exceptions
+from . import format_helpers
+
+
+isfuture = base_futures.isfuture
+
+
+_PENDING = base_futures._PENDING
+_CANCELLED = base_futures._CANCELLED
+_FINISHED = base_futures._FINISHED
+
+
+STACK_DEBUG = logging.DEBUG - 1  # heavy-duty debugging
+
+
+class Future:
+    """This class is *almost* compatible with concurrent.futures.Future.
+
+    Differences:
+
+    - This class is not thread-safe.
+
+    - result() and exception() do not take a timeout argument and
+      raise an exception when the future isn't done yet.
+
+    - Callbacks registered with add_done_callback() are always called
+      via the event loop's call_soon().
+
+    - This class is not compatible with the wait() and as_completed()
+      methods in the concurrent.futures package.
+
+    (In Python 3.4 or later we may be able to unify the implementations.)
+    """
+
+    # Class variables serving as defaults for instance variables.
+    _state = _PENDING
+    _result = None
+    _exception = None
+    _loop = None
+    _source_traceback = None
+    _cancel_message = None
+    # A saved CancelledError for later chaining as an exception context.
+    _cancelled_exc = None
+
+    # This field is used for a dual purpose:
+    # - Its presence is a marker to declare that a class implements
+    #   the Future protocol (i.e. is intended to be duck-type compatible).
+    #   The value must also be not-None, to enable a subclass to declare
+    #   that it is not compatible by setting this to None.
+    # - It is set by __iter__() below so that Task._step() can tell
+    #   the difference between
+    #   `await Future()` or`yield from Future()` (correct) vs.
+    #   `yield Future()` (incorrect).
+    _asyncio_future_blocking = False
+
+    __log_traceback = False
+
+    def __init__(self, *, loop=None):
+        """Initialize the future.
+
+        The optional event_loop argument allows explicitly setting the event
+        loop object used by the future. If it's not provided, the future uses
+        the default event loop.
+        """
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+        self._callbacks = []
+        if self._loop.get_debug():
+            self._source_traceback = format_helpers.extract_stack(
+                sys._getframe(1))
+
+    _repr_info = base_futures._future_repr_info
+
+    def __repr__(self):
+        return '<{} {}>'.format(self.__class__.__name__,
+                                ' '.join(self._repr_info()))
+
+    def __del__(self):
+        if not self.__log_traceback:
+            # set_exception() was not called, or result() or exception()
+            # has consumed the exception
+            return
+        exc = self._exception
+        context = {
+            'message':
+                f'{self.__class__.__name__} exception was never retrieved',
+            'exception': exc,
+            'future': self,
+        }
+        if self._source_traceback:
+            context['source_traceback'] = self._source_traceback
+        self._loop.call_exception_handler(context)
+
+    def __class_getitem__(cls, type):
+        return cls
+
+    @property
+    def _log_traceback(self):
+        return self.__log_traceback
+
+    @_log_traceback.setter
+    def _log_traceback(self, val):
+        if bool(val):
+            raise ValueError('_log_traceback can only be set to False')
+        self.__log_traceback = False
+
+    def get_loop(self):
+        """Return the event loop the Future is bound to."""
+        loop = self._loop
+        if loop is None:
+            raise RuntimeError("Future object is not initialized.")
+        return loop
+
+    def _make_cancelled_error(self):
+        """Create the CancelledError to raise if the Future is cancelled.
+
+        This should only be called once when handling a cancellation since
+        it erases the saved context exception value.
+        """
+        if self._cancel_message is None:
+            exc = exceptions.CancelledError()
+        else:
+            exc = exceptions.CancelledError(self._cancel_message)
+        exc.__context__ = self._cancelled_exc
+        # Remove the reference since we don't need this anymore.
+        self._cancelled_exc = None
+        return exc
+
+    def cancel(self, msg=None):
+        """Cancel the future and schedule callbacks.
+
+        If the future is already done or cancelled, return False.  Otherwise,
+        change the future's state to cancelled, schedule the callbacks and
+        return True.
+        """
+        self.__log_traceback = False
+        if self._state != _PENDING:
+            return False
+        self._state = _CANCELLED
+        self._cancel_message = msg
+        self.__schedule_callbacks()
+        return True
+
+    def __schedule_callbacks(self):
+        """Internal: Ask the event loop to call all callbacks.
+
+        The callbacks are scheduled to be called as soon as possible. Also
+        clears the callback list.
+        """
+        callbacks = self._callbacks[:]
+        if not callbacks:
+            return
+
+        self._callbacks[:] = []
+        for callback, ctx in callbacks:
+            self._loop.call_soon(callback, self, context=ctx)
+
+    def cancelled(self):
+        """Return True if the future was cancelled."""
+        return self._state == _CANCELLED
+
+    # Don't implement running(); see http://bugs.python.org/issue18699
+
+    def done(self):
+        """Return True if the future is done.
+
+        Done means either that a result / exception are available, or that the
+        future was cancelled.
+        """
+        return self._state != _PENDING
+
+    def result(self):
+        """Return the result this future represents.
+
+        If the future has been cancelled, raises CancelledError.  If the
+        future's result isn't yet available, raises InvalidStateError.  If
+        the future is done and has an exception set, this exception is raised.
+        """
+        if self._state == _CANCELLED:
+            exc = self._make_cancelled_error()
+            raise exc
+        if self._state != _FINISHED:
+            raise exceptions.InvalidStateError('Result is not ready.')
+        self.__log_traceback = False
+        if self._exception is not None:
+            raise self._exception
+        return self._result
+
+    def exception(self):
+        """Return the exception that was set on this future.
+
+        The exception (or None if no exception was set) is returned only if
+        the future is done.  If the future has been cancelled, raises
+        CancelledError.  If the future isn't done yet, raises
+        InvalidStateError.
+        """
+        if self._state == _CANCELLED:
+            exc = self._make_cancelled_error()
+            raise exc
+        if self._state != _FINISHED:
+            raise exceptions.InvalidStateError('Exception is not set.')
+        self.__log_traceback = False
+        return self._exception
+
+    def add_done_callback(self, fn, *, context=None):
+        """Add a callback to be run when the future becomes done.
+
+        The callback is called with a single argument - the future object. If
+        the future is already done when this is called, the callback is
+        scheduled with call_soon.
+        """
+        if self._state != _PENDING:
+            self._loop.call_soon(fn, self, context=context)
+        else:
+            if context is None:
+                context = contextvars.copy_context()
+            self._callbacks.append((fn, context))
+
+    # New method not in PEP 3148.
+
+    def remove_done_callback(self, fn):
+        """Remove all instances of a callback from the "call when done" list.
+
+        Returns the number of callbacks removed.
+        """
+        filtered_callbacks = [(f, ctx)
+                              for (f, ctx) in self._callbacks
+                              if f != fn]
+        removed_count = len(self._callbacks) - len(filtered_callbacks)
+        if removed_count:
+            self._callbacks[:] = filtered_callbacks
+        return removed_count
+
+    # So-called internal methods (note: no set_running_or_notify_cancel()).
+
+    def set_result(self, result):
+        """Mark the future done and set its result.
+
+        If the future is already done when this method is called, raises
+        InvalidStateError.
+        """
+        if self._state != _PENDING:
+            raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
+        self._result = result
+        self._state = _FINISHED
+        self.__schedule_callbacks()
+
+    def set_exception(self, exception):
+        """Mark the future done and set an exception.
+
+        If the future is already done when this method is called, raises
+        InvalidStateError.
+        """
+        if self._state != _PENDING:
+            raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
+        if isinstance(exception, type):
+            exception = exception()
+        if type(exception) is StopIteration:
+            raise TypeError("StopIteration interacts badly with generators "
+                            "and cannot be raised into a Future")
+        self._exception = exception
+        self._state = _FINISHED
+        self.__schedule_callbacks()
+        self.__log_traceback = True
+
+    def __await__(self):
+        if not self.done():
+            self._asyncio_future_blocking = True
+            yield self  # This tells Task to wait for completion.
+        if not self.done():
+            raise RuntimeError("await wasn't used with future")
+        return self.result()  # May raise too.
+
+    __iter__ = __await__  # make compatible with 'yield from'.
+
+
+# Needed for testing purposes.
+_PyFuture = Future
+
+
+def _get_loop(fut):
+    # Tries to call Future.get_loop() if it's available.
+    # Otherwise fallbacks to using the old '_loop' property.
+    try:
+        get_loop = fut.get_loop
+    except AttributeError:
+        pass
+    else:
+        return get_loop()
+    return fut._loop
+
+
+def _set_result_unless_cancelled(fut, result):
+    """Helper setting the result only if the future was not cancelled."""
+    if fut.cancelled():
+        return
+    fut.set_result(result)
+
+
+def _convert_future_exc(exc):
+    exc_class = type(exc)
+    if exc_class is concurrent.futures.CancelledError:
+        return exceptions.CancelledError(*exc.args)
+    elif exc_class is concurrent.futures.TimeoutError:
+        return exceptions.TimeoutError(*exc.args)
+    elif exc_class is concurrent.futures.InvalidStateError:
+        return exceptions.InvalidStateError(*exc.args)
+    else:
+        return exc
+
+
+def _set_concurrent_future_state(concurrent, source):
+    """Copy state from a future to a concurrent.futures.Future."""
+    assert source.done()
+    if source.cancelled():
+        concurrent.cancel()
+    if not concurrent.set_running_or_notify_cancel():
+        return
+    exception = source.exception()
+    if exception is not None:
+        concurrent.set_exception(_convert_future_exc(exception))
+    else:
+        result = source.result()
+        concurrent.set_result(result)
+
+
+def _copy_future_state(source, dest):
+    """Internal helper to copy state from another Future.
+
+    The other Future may be a concurrent.futures.Future.
+    """
+    assert source.done()
+    if dest.cancelled():
+        return
+    assert not dest.done()
+    if source.cancelled():
+        dest.cancel()
+    else:
+        exception = source.exception()
+        if exception is not None:
+            dest.set_exception(_convert_future_exc(exception))
+        else:
+            result = source.result()
+            dest.set_result(result)
+
+
+def _chain_future(source, destination):
+    """Chain two futures so that when one completes, so does the other.
+
+    The result (or exception) of source will be copied to destination.
+    If destination is cancelled, source gets cancelled too.
+    Compatible with both asyncio.Future and concurrent.futures.Future.
+    """
+    if not isfuture(source) and not isinstance(source,
+                                               concurrent.futures.Future):
+        raise TypeError('A future is required for source argument')
+    if not isfuture(destination) and not isinstance(destination,
+                                                    concurrent.futures.Future):
+        raise TypeError('A future is required for destination argument')
+    source_loop = _get_loop(source) if isfuture(source) else None
+    dest_loop = _get_loop(destination) if isfuture(destination) else None
+
+    def _set_state(future, other):
+        if isfuture(future):
+            _copy_future_state(other, future)
+        else:
+            _set_concurrent_future_state(future, other)
+
+    def _call_check_cancel(destination):
+        if destination.cancelled():
+            if source_loop is None or source_loop is dest_loop:
+                source.cancel()
+            else:
+                source_loop.call_soon_threadsafe(source.cancel)
+
+    def _call_set_state(source):
+        if (destination.cancelled() and
+                dest_loop is not None and dest_loop.is_closed()):
+            return
+        if dest_loop is None or dest_loop is source_loop:
+            _set_state(destination, source)
+        else:
+            dest_loop.call_soon_threadsafe(_set_state, destination, source)
+
+    destination.add_done_callback(_call_check_cancel)
+    source.add_done_callback(_call_set_state)
+
+
+def wrap_future(future, *, loop=None):
+    """Wrap concurrent.futures.Future object."""
+    if isfuture(future):
+        return future
+    assert isinstance(future, concurrent.futures.Future), \
+        f'concurrent.futures.Future is expected, got {future!r}'
+    if loop is None:
+        loop = events.get_event_loop()
+    new_future = loop.create_future()
+    _chain_future(future, new_future)
+    return new_future
+
+
+try:
+    import _asyncio
+except ImportError:
+    pass
+else:
+    # _CFuture is needed for tests.
+    Future = _CFuture = _asyncio.Future
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/locks.py b/linux-x64/clang/python3/lib/python3.9/asyncio/locks.py
new file mode 100644
index 0000000..f1ce732
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/locks.py
@@ -0,0 +1,451 @@
+"""Synchronization primitives."""
+
+__all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore')
+
+import collections
+import warnings
+
+from . import events
+from . import exceptions
+
+
+class _ContextManagerMixin:
+    async def __aenter__(self):
+        await self.acquire()
+        # We have no use for the "as ..."  clause in the with
+        # statement for locks.
+        return None
+
+    async def __aexit__(self, exc_type, exc, tb):
+        self.release()
+
+
+class Lock(_ContextManagerMixin):
+    """Primitive lock objects.
+
+    A primitive lock is a synchronization primitive that is not owned
+    by a particular coroutine when locked.  A primitive lock is in one
+    of two states, 'locked' or 'unlocked'.
+
+    It is created in the unlocked state.  It has two basic methods,
+    acquire() and release().  When the state is unlocked, acquire()
+    changes the state to locked and returns immediately.  When the
+    state is locked, acquire() blocks until a call to release() in
+    another coroutine changes it to unlocked, then the acquire() call
+    resets it to locked and returns.  The release() method should only
+    be called in the locked state; it changes the state to unlocked
+    and returns immediately.  If an attempt is made to release an
+    unlocked lock, a RuntimeError will be raised.
+
+    When more than one coroutine is blocked in acquire() waiting for
+    the state to turn to unlocked, only one coroutine proceeds when a
+    release() call resets the state to unlocked; first coroutine which
+    is blocked in acquire() is being processed.
+
+    acquire() is a coroutine and should be called with 'await'.
+
+    Locks also support the asynchronous context management protocol.
+    'async with lock' statement should be used.
+
+    Usage:
+
+        lock = Lock()
+        ...
+        await lock.acquire()
+        try:
+            ...
+        finally:
+            lock.release()
+
+    Context manager usage:
+
+        lock = Lock()
+        ...
+        async with lock:
+             ...
+
+    Lock objects can be tested for locking state:
+
+        if not lock.locked():
+           await lock.acquire()
+        else:
+           # lock is acquired
+           ...
+
+    """
+
+    def __init__(self, *, loop=None):
+        self._waiters = None
+        self._locked = False
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+
+    def __repr__(self):
+        res = super().__repr__()
+        extra = 'locked' if self._locked else 'unlocked'
+        if self._waiters:
+            extra = f'{extra}, waiters:{len(self._waiters)}'
+        return f'<{res[1:-1]} [{extra}]>'
+
+    def locked(self):
+        """Return True if lock is acquired."""
+        return self._locked
+
+    async def acquire(self):
+        """Acquire a lock.
+
+        This method blocks until the lock is unlocked, then sets it to
+        locked and returns True.
+        """
+        if (not self._locked and (self._waiters is None or
+                all(w.cancelled() for w in self._waiters))):
+            self._locked = True
+            return True
+
+        if self._waiters is None:
+            self._waiters = collections.deque()
+        fut = self._loop.create_future()
+        self._waiters.append(fut)
+
+        # Finally block should be called before the CancelledError
+        # handling as we don't want CancelledError to call
+        # _wake_up_first() and attempt to wake up itself.
+        try:
+            try:
+                await fut
+            finally:
+                self._waiters.remove(fut)
+        except exceptions.CancelledError:
+            if not self._locked:
+                self._wake_up_first()
+            raise
+
+        self._locked = True
+        return True
+
+    def release(self):
+        """Release a lock.
+
+        When the lock is locked, reset it to unlocked, and return.
+        If any other coroutines are blocked waiting for the lock to become
+        unlocked, allow exactly one of them to proceed.
+
+        When invoked on an unlocked lock, a RuntimeError is raised.
+
+        There is no return value.
+        """
+        if self._locked:
+            self._locked = False
+            self._wake_up_first()
+        else:
+            raise RuntimeError('Lock is not acquired.')
+
+    def _wake_up_first(self):
+        """Wake up the first waiter if it isn't done."""
+        if not self._waiters:
+            return
+        try:
+            fut = next(iter(self._waiters))
+        except StopIteration:
+            return
+
+        # .done() necessarily means that a waiter will wake up later on and
+        # either take the lock, or, if it was cancelled and lock wasn't
+        # taken already, will hit this again and wake up a new waiter.
+        if not fut.done():
+            fut.set_result(True)
+
+
+class Event:
+    """Asynchronous equivalent to threading.Event.
+
+    Class implementing event objects. An event manages a flag that can be set
+    to true with the set() method and reset to false with the clear() method.
+    The wait() method blocks until the flag is true. The flag is initially
+    false.
+    """
+
+    def __init__(self, *, loop=None):
+        self._waiters = collections.deque()
+        self._value = False
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+
+    def __repr__(self):
+        res = super().__repr__()
+        extra = 'set' if self._value else 'unset'
+        if self._waiters:
+            extra = f'{extra}, waiters:{len(self._waiters)}'
+        return f'<{res[1:-1]} [{extra}]>'
+
+    def is_set(self):
+        """Return True if and only if the internal flag is true."""
+        return self._value
+
+    def set(self):
+        """Set the internal flag to true. All coroutines waiting for it to
+        become true are awakened. Coroutine that call wait() once the flag is
+        true will not block at all.
+        """
+        if not self._value:
+            self._value = True
+
+            for fut in self._waiters:
+                if not fut.done():
+                    fut.set_result(True)
+
+    def clear(self):
+        """Reset the internal flag to false. Subsequently, coroutines calling
+        wait() will block until set() is called to set the internal flag
+        to true again."""
+        self._value = False
+
+    async def wait(self):
+        """Block until the internal flag is true.
+
+        If the internal flag is true on entry, return True
+        immediately.  Otherwise, block until another coroutine calls
+        set() to set the flag to true, then return True.
+        """
+        if self._value:
+            return True
+
+        fut = self._loop.create_future()
+        self._waiters.append(fut)
+        try:
+            await fut
+            return True
+        finally:
+            self._waiters.remove(fut)
+
+
+class Condition(_ContextManagerMixin):
+    """Asynchronous equivalent to threading.Condition.
+
+    This class implements condition variable objects. A condition variable
+    allows one or more coroutines to wait until they are notified by another
+    coroutine.
+
+    A new Lock object is created and used as the underlying lock.
+    """
+
+    def __init__(self, lock=None, *, loop=None):
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+
+        if lock is None:
+            lock = Lock(loop=loop)
+        elif lock._loop is not self._loop:
+            raise ValueError("loop argument must agree with lock")
+
+        self._lock = lock
+        # Export the lock's locked(), acquire() and release() methods.
+        self.locked = lock.locked
+        self.acquire = lock.acquire
+        self.release = lock.release
+
+        self._waiters = collections.deque()
+
+    def __repr__(self):
+        res = super().__repr__()
+        extra = 'locked' if self.locked() else 'unlocked'
+        if self._waiters:
+            extra = f'{extra}, waiters:{len(self._waiters)}'
+        return f'<{res[1:-1]} [{extra}]>'
+
+    async def wait(self):
+        """Wait until notified.
+
+        If the calling coroutine has not acquired the lock when this
+        method is called, a RuntimeError is raised.
+
+        This method releases the underlying lock, and then blocks
+        until it is awakened by a notify() or notify_all() call for
+        the same condition variable in another coroutine.  Once
+        awakened, it re-acquires the lock and returns True.
+        """
+        if not self.locked():
+            raise RuntimeError('cannot wait on un-acquired lock')
+
+        self.release()
+        try:
+            fut = self._loop.create_future()
+            self._waiters.append(fut)
+            try:
+                await fut
+                return True
+            finally:
+                self._waiters.remove(fut)
+
+        finally:
+            # Must reacquire lock even if wait is cancelled
+            cancelled = False
+            while True:
+                try:
+                    await self.acquire()
+                    break
+                except exceptions.CancelledError:
+                    cancelled = True
+
+            if cancelled:
+                raise exceptions.CancelledError
+
+    async def wait_for(self, predicate):
+        """Wait until a predicate becomes true.
+
+        The predicate should be a callable which result will be
+        interpreted as a boolean value.  The final predicate value is
+        the return value.
+        """
+        result = predicate()
+        while not result:
+            await self.wait()
+            result = predicate()
+        return result
+
+    def notify(self, n=1):
+        """By default, wake up one coroutine waiting on this condition, if any.
+        If the calling coroutine has not acquired the lock when this method
+        is called, a RuntimeError is raised.
+
+        This method wakes up at most n of the coroutines waiting for the
+        condition variable; it is a no-op if no coroutines are waiting.
+
+        Note: an awakened coroutine does not actually return from its
+        wait() call until it can reacquire the lock. Since notify() does
+        not release the lock, its caller should.
+        """
+        if not self.locked():
+            raise RuntimeError('cannot notify on un-acquired lock')
+
+        idx = 0
+        for fut in self._waiters:
+            if idx >= n:
+                break
+
+            if not fut.done():
+                idx += 1
+                fut.set_result(False)
+
+    def notify_all(self):
+        """Wake up all threads waiting on this condition. This method acts
+        like notify(), but wakes up all waiting threads instead of one. If the
+        calling thread has not acquired the lock when this method is called,
+        a RuntimeError is raised.
+        """
+        self.notify(len(self._waiters))
+
+
+class Semaphore(_ContextManagerMixin):
+    """A Semaphore implementation.
+
+    A semaphore manages an internal counter which is decremented by each
+    acquire() call and incremented by each release() call. The counter
+    can never go below zero; when acquire() finds that it is zero, it blocks,
+    waiting until some other thread calls release().
+
+    Semaphores also support the context management protocol.
+
+    The optional argument gives the initial value for the internal
+    counter; it defaults to 1. If the value given is less than 0,
+    ValueError is raised.
+    """
+
+    def __init__(self, value=1, *, loop=None):
+        if value < 0:
+            raise ValueError("Semaphore initial value must be >= 0")
+        self._value = value
+        self._waiters = collections.deque()
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+
+    def __repr__(self):
+        res = super().__repr__()
+        extra = 'locked' if self.locked() else f'unlocked, value:{self._value}'
+        if self._waiters:
+            extra = f'{extra}, waiters:{len(self._waiters)}'
+        return f'<{res[1:-1]} [{extra}]>'
+
+    def _wake_up_next(self):
+        while self._waiters:
+            waiter = self._waiters.popleft()
+            if not waiter.done():
+                waiter.set_result(None)
+                return
+
+    def locked(self):
+        """Returns True if semaphore can not be acquired immediately."""
+        return self._value == 0
+
+    async def acquire(self):
+        """Acquire a semaphore.
+
+        If the internal counter is larger than zero on entry,
+        decrement it by one and return True immediately.  If it is
+        zero on entry, block, waiting until some other coroutine has
+        called release() to make it larger than 0, and then return
+        True.
+        """
+        while self._value <= 0:
+            fut = self._loop.create_future()
+            self._waiters.append(fut)
+            try:
+                await fut
+            except:
+                # See the similar code in Queue.get.
+                fut.cancel()
+                if self._value > 0 and not fut.cancelled():
+                    self._wake_up_next()
+                raise
+        self._value -= 1
+        return True
+
+    def release(self):
+        """Release a semaphore, incrementing the internal counter by one.
+        When it was zero on entry and another coroutine is waiting for it to
+        become larger than zero again, wake up that coroutine.
+        """
+        self._value += 1
+        self._wake_up_next()
+
+
+class BoundedSemaphore(Semaphore):
+    """A bounded semaphore implementation.
+
+    This raises ValueError in release() if it would increase the value
+    above the initial value.
+    """
+
+    def __init__(self, value=1, *, loop=None):
+        if loop:
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+
+        self._bound_value = value
+        super().__init__(value, loop=loop)
+
+    def release(self):
+        if self._value >= self._bound_value:
+            raise ValueError('BoundedSemaphore released too many times')
+        super().release()
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/log.py b/linux-x64/clang/python3/lib/python3.9/asyncio/log.py
new file mode 100644
index 0000000..23a7074
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/log.py
@@ -0,0 +1,7 @@
+"""Logging configuration."""
+
+import logging
+
+
+# Name the logger after the package.
+logger = logging.getLogger(__package__)
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/proactor_events.py b/linux-x64/clang/python3/lib/python3.9/asyncio/proactor_events.py
new file mode 100644
index 0000000..b4cd414
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/proactor_events.py
@@ -0,0 +1,868 @@
+"""Event loop using a proactor and related classes.
+
+A proactor is a "notify-on-completion" multiplexer.  Currently a
+proactor is only implemented on Windows with IOCP.
+"""
+
+__all__ = 'BaseProactorEventLoop',
+
+import io
+import os
+import socket
+import warnings
+import signal
+import threading
+import collections
+
+from . import base_events
+from . import constants
+from . import futures
+from . import exceptions
+from . import protocols
+from . import sslproto
+from . import transports
+from . import trsock
+from .log import logger
+
+
+def _set_socket_extra(transport, sock):
+    transport._extra['socket'] = trsock.TransportSocket(sock)
+
+    try:
+        transport._extra['sockname'] = sock.getsockname()
+    except socket.error:
+        if transport._loop.get_debug():
+            logger.warning(
+                "getsockname() failed on %r", sock, exc_info=True)
+
+    if 'peername' not in transport._extra:
+        try:
+            transport._extra['peername'] = sock.getpeername()
+        except socket.error:
+            # UDP sockets may not have a peer name
+            transport._extra['peername'] = None
+
+
+class _ProactorBasePipeTransport(transports._FlowControlMixin,
+                                 transports.BaseTransport):
+    """Base class for pipe and socket transports."""
+
+    def __init__(self, loop, sock, protocol, waiter=None,
+                 extra=None, server=None):
+        super().__init__(extra, loop)
+        self._set_extra(sock)
+        self._sock = sock
+        self.set_protocol(protocol)
+        self._server = server
+        self._buffer = None  # None or bytearray.
+        self._read_fut = None
+        self._write_fut = None
+        self._pending_write = 0
+        self._conn_lost = 0
+        self._closing = False  # Set when close() called.
+        self._eof_written = False
+        if self._server is not None:
+            self._server._attach()
+        self._loop.call_soon(self._protocol.connection_made, self)
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(futures._set_result_unless_cancelled,
+                                 waiter, None)
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._sock is None:
+            info.append('closed')
+        elif self._closing:
+            info.append('closing')
+        if self._sock is not None:
+            info.append(f'fd={self._sock.fileno()}')
+        if self._read_fut is not None:
+            info.append(f'read={self._read_fut!r}')
+        if self._write_fut is not None:
+            info.append(f'write={self._write_fut!r}')
+        if self._buffer:
+            info.append(f'write_bufsize={len(self._buffer)}')
+        if self._eof_written:
+            info.append('EOF written')
+        return '<{}>'.format(' '.join(info))
+
+    def _set_extra(self, sock):
+        self._extra['pipe'] = sock
+
+    def set_protocol(self, protocol):
+        self._protocol = protocol
+
+    def get_protocol(self):
+        return self._protocol
+
+    def is_closing(self):
+        return self._closing
+
+    def close(self):
+        if self._closing:
+            return
+        self._closing = True
+        self._conn_lost += 1
+        if not self._buffer and self._write_fut is None:
+            self._loop.call_soon(self._call_connection_lost, None)
+        if self._read_fut is not None:
+            self._read_fut.cancel()
+            self._read_fut = None
+
+    def __del__(self, _warn=warnings.warn):
+        if self._sock is not None:
+            _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+            self.close()
+
+    def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+        try:
+            if isinstance(exc, OSError):
+                if self._loop.get_debug():
+                    logger.debug("%r: %s", self, message, exc_info=True)
+            else:
+                self._loop.call_exception_handler({
+                    'message': message,
+                    'exception': exc,
+                    'transport': self,
+                    'protocol': self._protocol,
+                })
+        finally:
+            self._force_close(exc)
+
+    def _force_close(self, exc):
+        if self._empty_waiter is not None and not self._empty_waiter.done():
+            if exc is None:
+                self._empty_waiter.set_result(None)
+            else:
+                self._empty_waiter.set_exception(exc)
+        if self._closing:
+            return
+        self._closing = True
+        self._conn_lost += 1
+        if self._write_fut:
+            self._write_fut.cancel()
+            self._write_fut = None
+        if self._read_fut:
+            self._read_fut.cancel()
+            self._read_fut = None
+        self._pending_write = 0
+        self._buffer = None
+        self._loop.call_soon(self._call_connection_lost, exc)
+
+    def _call_connection_lost(self, exc):
+        try:
+            self._protocol.connection_lost(exc)
+        finally:
+            # XXX If there is a pending overlapped read on the other
+            # end then it may fail with ERROR_NETNAME_DELETED if we
+            # just close our end.  First calling shutdown() seems to
+            # cure it, but maybe using DisconnectEx() would be better.
+            if hasattr(self._sock, 'shutdown'):
+                self._sock.shutdown(socket.SHUT_RDWR)
+            self._sock.close()
+            self._sock = None
+            server = self._server
+            if server is not None:
+                server._detach()
+                self._server = None
+
+    def get_write_buffer_size(self):
+        size = self._pending_write
+        if self._buffer is not None:
+            size += len(self._buffer)
+        return size
+
+
+class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
+                                 transports.ReadTransport):
+    """Transport for read pipes."""
+
+    def __init__(self, loop, sock, protocol, waiter=None,
+                 extra=None, server=None):
+        self._pending_data = None
+        self._paused = True
+        super().__init__(loop, sock, protocol, waiter, extra, server)
+
+        self._loop.call_soon(self._loop_reading)
+        self._paused = False
+
+    def is_reading(self):
+        return not self._paused and not self._closing
+
+    def pause_reading(self):
+        if self._closing or self._paused:
+            return
+        self._paused = True
+
+        # bpo-33694: Don't cancel self._read_fut because cancelling an
+        # overlapped WSASend() loss silently data with the current proactor
+        # implementation.
+        #
+        # If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend()
+        # completed (even if HasOverlappedIoCompleted() returns 0), but
+        # Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND
+        # error. Once the overlapped is ignored, the IOCP loop will ignores the
+        # completion I/O event and so not read the result of the overlapped
+        # WSARecv().
+
+        if self._loop.get_debug():
+            logger.debug("%r pauses reading", self)
+
+    def resume_reading(self):
+        if self._closing or not self._paused:
+            return
+
+        self._paused = False
+        if self._read_fut is None:
+            self._loop.call_soon(self._loop_reading, None)
+
+        data = self._pending_data
+        self._pending_data = None
+        if data is not None:
+            # Call the protocol methode after calling _loop_reading(),
+            # since the protocol can decide to pause reading again.
+            self._loop.call_soon(self._data_received, data)
+
+        if self._loop.get_debug():
+            logger.debug("%r resumes reading", self)
+
+    def _eof_received(self):
+        if self._loop.get_debug():
+            logger.debug("%r received EOF", self)
+
+        try:
+            keep_open = self._protocol.eof_received()
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._fatal_error(
+                exc, 'Fatal error: protocol.eof_received() call failed.')
+            return
+
+        if not keep_open:
+            self.close()
+
+    def _data_received(self, data):
+        if self._paused:
+            # Don't call any protocol method while reading is paused.
+            # The protocol will be called on resume_reading().
+            assert self._pending_data is None
+            self._pending_data = data
+            return
+
+        if not data:
+            self._eof_received()
+            return
+
+        if isinstance(self._protocol, protocols.BufferedProtocol):
+            try:
+                protocols._feed_data_to_buffered_proto(self._protocol, data)
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException as exc:
+                self._fatal_error(exc,
+                                  'Fatal error: protocol.buffer_updated() '
+                                  'call failed.')
+                return
+        else:
+            self._protocol.data_received(data)
+
+    def _loop_reading(self, fut=None):
+        data = None
+        try:
+            if fut is not None:
+                assert self._read_fut is fut or (self._read_fut is None and
+                                                 self._closing)
+                self._read_fut = None
+                if fut.done():
+                    # deliver data later in "finally" clause
+                    data = fut.result()
+                else:
+                    # the future will be replaced by next proactor.recv call
+                    fut.cancel()
+
+            if self._closing:
+                # since close() has been called we ignore any read data
+                data = None
+                return
+
+            if data == b'':
+                # we got end-of-file so no need to reschedule a new read
+                return
+
+            # bpo-33694: buffer_updated() has currently no fast path because of
+            # a data loss issue caused by overlapped WSASend() cancellation.
+
+            if not self._paused:
+                # reschedule a new read
+                self._read_fut = self._loop._proactor.recv(self._sock, 32768)
+        except ConnectionAbortedError as exc:
+            if not self._closing:
+                self._fatal_error(exc, 'Fatal read error on pipe transport')
+            elif self._loop.get_debug():
+                logger.debug("Read error on pipe transport while closing",
+                             exc_info=True)
+        except ConnectionResetError as exc:
+            self._force_close(exc)
+        except OSError as exc:
+            self._fatal_error(exc, 'Fatal read error on pipe transport')
+        except exceptions.CancelledError:
+            if not self._closing:
+                raise
+        else:
+            if not self._paused:
+                self._read_fut.add_done_callback(self._loop_reading)
+        finally:
+            if data is not None:
+                self._data_received(data)
+
+
+class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
+                                      transports.WriteTransport):
+    """Transport for write pipes."""
+
+    _start_tls_compatible = True
+
+    def __init__(self, *args, **kw):
+        super().__init__(*args, **kw)
+        self._empty_waiter = None
+
+    def write(self, data):
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError(
+                f"data argument must be a bytes-like object, "
+                f"not {type(data).__name__}")
+        if self._eof_written:
+            raise RuntimeError('write_eof() already called')
+        if self._empty_waiter is not None:
+            raise RuntimeError('unable to write; sendfile is in progress')
+
+        if not data:
+            return
+
+        if self._conn_lost:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('socket.send() raised exception.')
+            self._conn_lost += 1
+            return
+
+        # Observable states:
+        # 1. IDLE: _write_fut and _buffer both None
+        # 2. WRITING: _write_fut set; _buffer None
+        # 3. BACKED UP: _write_fut set; _buffer a bytearray
+        # We always copy the data, so the caller can't modify it
+        # while we're still waiting for the I/O to happen.
+        if self._write_fut is None:  # IDLE -> WRITING
+            assert self._buffer is None
+            # Pass a copy, except if it's already immutable.
+            self._loop_writing(data=bytes(data))
+        elif not self._buffer:  # WRITING -> BACKED UP
+            # Make a mutable copy which we can extend.
+            self._buffer = bytearray(data)
+            self._maybe_pause_protocol()
+        else:  # BACKED UP
+            # Append to buffer (also copies).
+            self._buffer.extend(data)
+            self._maybe_pause_protocol()
+
+    def _loop_writing(self, f=None, data=None):
+        try:
+            if f is not None and self._write_fut is None and self._closing:
+                # XXX most likely self._force_close() has been called, and
+                # it has set self._write_fut to None.
+                return
+            assert f is self._write_fut
+            self._write_fut = None
+            self._pending_write = 0
+            if f:
+                f.result()
+            if data is None:
+                data = self._buffer
+                self._buffer = None
+            if not data:
+                if self._closing:
+                    self._loop.call_soon(self._call_connection_lost, None)
+                if self._eof_written:
+                    self._sock.shutdown(socket.SHUT_WR)
+                # Now that we've reduced the buffer size, tell the
+                # protocol to resume writing if it was paused.  Note that
+                # we do this last since the callback is called immediately
+                # and it may add more data to the buffer (even causing the
+                # protocol to be paused again).
+                self._maybe_resume_protocol()
+            else:
+                self._write_fut = self._loop._proactor.send(self._sock, data)
+                if not self._write_fut.done():
+                    assert self._pending_write == 0
+                    self._pending_write = len(data)
+                    self._write_fut.add_done_callback(self._loop_writing)
+                    self._maybe_pause_protocol()
+                else:
+                    self._write_fut.add_done_callback(self._loop_writing)
+            if self._empty_waiter is not None and self._write_fut is None:
+                self._empty_waiter.set_result(None)
+        except ConnectionResetError as exc:
+            self._force_close(exc)
+        except OSError as exc:
+            self._fatal_error(exc, 'Fatal write error on pipe transport')
+
+    def can_write_eof(self):
+        return True
+
+    def write_eof(self):
+        self.close()
+
+    def abort(self):
+        self._force_close(None)
+
+    def _make_empty_waiter(self):
+        if self._empty_waiter is not None:
+            raise RuntimeError("Empty waiter is already set")
+        self._empty_waiter = self._loop.create_future()
+        if self._write_fut is None:
+            self._empty_waiter.set_result(None)
+        return self._empty_waiter
+
+    def _reset_empty_waiter(self):
+        self._empty_waiter = None
+
+
+class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
+    def __init__(self, *args, **kw):
+        super().__init__(*args, **kw)
+        self._read_fut = self._loop._proactor.recv(self._sock, 16)
+        self._read_fut.add_done_callback(self._pipe_closed)
+
+    def _pipe_closed(self, fut):
+        if fut.cancelled():
+            # the transport has been closed
+            return
+        assert fut.result() == b''
+        if self._closing:
+            assert self._read_fut is None
+            return
+        assert fut is self._read_fut, (fut, self._read_fut)
+        self._read_fut = None
+        if self._write_fut is not None:
+            self._force_close(BrokenPipeError())
+        else:
+            self.close()
+
+
+class _ProactorDatagramTransport(_ProactorBasePipeTransport):
+    max_size = 256 * 1024
+    def __init__(self, loop, sock, protocol, address=None,
+                 waiter=None, extra=None):
+        self._address = address
+        self._empty_waiter = None
+        # We don't need to call _protocol.connection_made() since our base
+        # constructor does it for us.
+        super().__init__(loop, sock, protocol, waiter=waiter, extra=extra)
+
+        # The base constructor sets _buffer = None, so we set it here
+        self._buffer = collections.deque()
+        self._loop.call_soon(self._loop_reading)
+
+    def _set_extra(self, sock):
+        _set_socket_extra(self, sock)
+
+    def get_write_buffer_size(self):
+        return sum(len(data) for data, _ in self._buffer)
+
+    def abort(self):
+        self._force_close(None)
+
+    def sendto(self, data, addr=None):
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError('data argument must be bytes-like object (%r)',
+                            type(data))
+
+        if not data:
+            return
+
+        if self._address is not None and addr not in (None, self._address):
+            raise ValueError(
+                f'Invalid address: must be None or {self._address}')
+
+        if self._conn_lost and self._address:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('socket.sendto() raised exception.')
+            self._conn_lost += 1
+            return
+
+        # Ensure that what we buffer is immutable.
+        self._buffer.append((bytes(data), addr))
+
+        if self._write_fut is None:
+            # No current write operations are active, kick one off
+            self._loop_writing()
+        # else: A write operation is already kicked off
+
+        self._maybe_pause_protocol()
+
+    def _loop_writing(self, fut=None):
+        try:
+            if self._conn_lost:
+                return
+
+            assert fut is self._write_fut
+            self._write_fut = None
+            if fut:
+                # We are in a _loop_writing() done callback, get the result
+                fut.result()
+
+            if not self._buffer or (self._conn_lost and self._address):
+                # The connection has been closed
+                if self._closing:
+                    self._loop.call_soon(self._call_connection_lost, None)
+                return
+
+            data, addr = self._buffer.popleft()
+            if self._address is not None:
+                self._write_fut = self._loop._proactor.send(self._sock,
+                                                            data)
+            else:
+                self._write_fut = self._loop._proactor.sendto(self._sock,
+                                                              data,
+                                                              addr=addr)
+        except OSError as exc:
+            self._protocol.error_received(exc)
+        except Exception as exc:
+            self._fatal_error(exc, 'Fatal write error on datagram transport')
+        else:
+            self._write_fut.add_done_callback(self._loop_writing)
+            self._maybe_resume_protocol()
+
+    def _loop_reading(self, fut=None):
+        data = None
+        try:
+            if self._conn_lost:
+                return
+
+            assert self._read_fut is fut or (self._read_fut is None and
+                                             self._closing)
+
+            self._read_fut = None
+            if fut is not None:
+                res = fut.result()
+
+                if self._closing:
+                    # since close() has been called we ignore any read data
+                    data = None
+                    return
+
+                if self._address is not None:
+                    data, addr = res, self._address
+                else:
+                    data, addr = res
+
+            if self._conn_lost:
+                return
+            if self._address is not None:
+                self._read_fut = self._loop._proactor.recv(self._sock,
+                                                           self.max_size)
+            else:
+                self._read_fut = self._loop._proactor.recvfrom(self._sock,
+                                                               self.max_size)
+        except OSError as exc:
+            self._protocol.error_received(exc)
+        except exceptions.CancelledError:
+            if not self._closing:
+                raise
+        else:
+            if self._read_fut is not None:
+                self._read_fut.add_done_callback(self._loop_reading)
+        finally:
+            if data:
+                self._protocol.datagram_received(data, addr)
+
+
+class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
+                                   _ProactorBaseWritePipeTransport,
+                                   transports.Transport):
+    """Transport for duplex pipes."""
+
+    def can_write_eof(self):
+        return False
+
+    def write_eof(self):
+        raise NotImplementedError
+
+
+class _ProactorSocketTransport(_ProactorReadPipeTransport,
+                               _ProactorBaseWritePipeTransport,
+                               transports.Transport):
+    """Transport for connected sockets."""
+
+    _sendfile_compatible = constants._SendfileMode.TRY_NATIVE
+
+    def __init__(self, loop, sock, protocol, waiter=None,
+                 extra=None, server=None):
+        super().__init__(loop, sock, protocol, waiter, extra, server)
+        base_events._set_nodelay(sock)
+
+    def _set_extra(self, sock):
+        _set_socket_extra(self, sock)
+
+    def can_write_eof(self):
+        return True
+
+    def write_eof(self):
+        if self._closing or self._eof_written:
+            return
+        self._eof_written = True
+        if self._write_fut is None:
+            self._sock.shutdown(socket.SHUT_WR)
+
+
+class BaseProactorEventLoop(base_events.BaseEventLoop):
+
+    def __init__(self, proactor):
+        super().__init__()
+        logger.debug('Using proactor: %s', proactor.__class__.__name__)
+        self._proactor = proactor
+        self._selector = proactor   # convenient alias
+        self._self_reading_future = None
+        self._accept_futures = {}   # socket file descriptor => Future
+        proactor.set_loop(self)
+        self._make_self_pipe()
+        if threading.current_thread() is threading.main_thread():
+            # wakeup fd can only be installed to a file descriptor from the main thread
+            signal.set_wakeup_fd(self._csock.fileno())
+
+    def _make_socket_transport(self, sock, protocol, waiter=None,
+                               extra=None, server=None):
+        return _ProactorSocketTransport(self, sock, protocol, waiter,
+                                        extra, server)
+
+    def _make_ssl_transport(
+            self, rawsock, protocol, sslcontext, waiter=None,
+            *, server_side=False, server_hostname=None,
+            extra=None, server=None,
+            ssl_handshake_timeout=None):
+        ssl_protocol = sslproto.SSLProtocol(
+                self, protocol, sslcontext, waiter,
+                server_side, server_hostname,
+                ssl_handshake_timeout=ssl_handshake_timeout)
+        _ProactorSocketTransport(self, rawsock, ssl_protocol,
+                                 extra=extra, server=server)
+        return ssl_protocol._app_transport
+
+    def _make_datagram_transport(self, sock, protocol,
+                                 address=None, waiter=None, extra=None):
+        return _ProactorDatagramTransport(self, sock, protocol, address,
+                                          waiter, extra)
+
+    def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
+                                    extra=None):
+        return _ProactorDuplexPipeTransport(self,
+                                            sock, protocol, waiter, extra)
+
+    def _make_read_pipe_transport(self, sock, protocol, waiter=None,
+                                  extra=None):
+        return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
+
+    def _make_write_pipe_transport(self, sock, protocol, waiter=None,
+                                   extra=None):
+        # We want connection_lost() to be called when other end closes
+        return _ProactorWritePipeTransport(self,
+                                           sock, protocol, waiter, extra)
+
+    def close(self):
+        if self.is_running():
+            raise RuntimeError("Cannot close a running event loop")
+        if self.is_closed():
+            return
+
+        if threading.current_thread() is threading.main_thread():
+            signal.set_wakeup_fd(-1)
+        # Call these methods before closing the event loop (before calling
+        # BaseEventLoop.close), because they can schedule callbacks with
+        # call_soon(), which is forbidden when the event loop is closed.
+        self._stop_accept_futures()
+        self._close_self_pipe()
+        self._proactor.close()
+        self._proactor = None
+        self._selector = None
+
+        # Close the event loop
+        super().close()
+
+    async def sock_recv(self, sock, n):
+        return await self._proactor.recv(sock, n)
+
+    async def sock_recv_into(self, sock, buf):
+        return await self._proactor.recv_into(sock, buf)
+
+    async def sock_sendall(self, sock, data):
+        return await self._proactor.send(sock, data)
+
+    async def sock_connect(self, sock, address):
+        return await self._proactor.connect(sock, address)
+
+    async def sock_accept(self, sock):
+        return await self._proactor.accept(sock)
+
+    async def _sock_sendfile_native(self, sock, file, offset, count):
+        try:
+            fileno = file.fileno()
+        except (AttributeError, io.UnsupportedOperation) as err:
+            raise exceptions.SendfileNotAvailableError("not a regular file")
+        try:
+            fsize = os.fstat(fileno).st_size
+        except OSError:
+            raise exceptions.SendfileNotAvailableError("not a regular file")
+        blocksize = count if count else fsize
+        if not blocksize:
+            return 0  # empty file
+
+        blocksize = min(blocksize, 0xffff_ffff)
+        end_pos = min(offset + count, fsize) if count else fsize
+        offset = min(offset, fsize)
+        total_sent = 0
+        try:
+            while True:
+                blocksize = min(end_pos - offset, blocksize)
+                if blocksize <= 0:
+                    return total_sent
+                await self._proactor.sendfile(sock, file, offset, blocksize)
+                offset += blocksize
+                total_sent += blocksize
+        finally:
+            if total_sent > 0:
+                file.seek(offset)
+
+    async def _sendfile_native(self, transp, file, offset, count):
+        resume_reading = transp.is_reading()
+        transp.pause_reading()
+        await transp._make_empty_waiter()
+        try:
+            return await self.sock_sendfile(transp._sock, file, offset, count,
+                                            fallback=False)
+        finally:
+            transp._reset_empty_waiter()
+            if resume_reading:
+                transp.resume_reading()
+
+    def _close_self_pipe(self):
+        if self._self_reading_future is not None:
+            self._self_reading_future.cancel()
+            self._self_reading_future = None
+        self._ssock.close()
+        self._ssock = None
+        self._csock.close()
+        self._csock = None
+        self._internal_fds -= 1
+
+    def _make_self_pipe(self):
+        # A self-socket, really. :-)
+        self._ssock, self._csock = socket.socketpair()
+        self._ssock.setblocking(False)
+        self._csock.setblocking(False)
+        self._internal_fds += 1
+
+    def _loop_self_reading(self, f=None):
+        try:
+            if f is not None:
+                f.result()  # may raise
+            if self._self_reading_future is not f:
+                # When we scheduled this Future, we assigned it to
+                # _self_reading_future. If it's not there now, something has
+                # tried to cancel the loop while this callback was still in the
+                # queue (see windows_events.ProactorEventLoop.run_forever). In
+                # that case stop here instead of continuing to schedule a new
+                # iteration.
+                return
+            f = self._proactor.recv(self._ssock, 4096)
+        except exceptions.CancelledError:
+            # _close_self_pipe() has been called, stop waiting for data
+            return
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self.call_exception_handler({
+                'message': 'Error on reading from the event loop self pipe',
+                'exception': exc,
+                'loop': self,
+            })
+        else:
+            self._self_reading_future = f
+            f.add_done_callback(self._loop_self_reading)
+
+    def _write_to_self(self):
+        # This may be called from a different thread, possibly after
+        # _close_self_pipe() has been called or even while it is
+        # running.  Guard for self._csock being None or closed.  When
+        # a socket is closed, send() raises OSError (with errno set to
+        # EBADF, but let's not rely on the exact error code).
+        csock = self._csock
+        if csock is None:
+            return
+
+        try:
+            csock.send(b'\0')
+        except OSError:
+            if self._debug:
+                logger.debug("Fail to write a null byte into the "
+                             "self-pipe socket",
+                             exc_info=True)
+
+    def _start_serving(self, protocol_factory, sock,
+                       sslcontext=None, server=None, backlog=100,
+                       ssl_handshake_timeout=None):
+
+        def loop(f=None):
+            try:
+                if f is not None:
+                    conn, addr = f.result()
+                    if self._debug:
+                        logger.debug("%r got a new connection from %r: %r",
+                                     server, addr, conn)
+                    protocol = protocol_factory()
+                    if sslcontext is not None:
+                        self._make_ssl_transport(
+                            conn, protocol, sslcontext, server_side=True,
+                            extra={'peername': addr}, server=server,
+                            ssl_handshake_timeout=ssl_handshake_timeout)
+                    else:
+                        self._make_socket_transport(
+                            conn, protocol,
+                            extra={'peername': addr}, server=server)
+                if self.is_closed():
+                    return
+                f = self._proactor.accept(sock)
+            except OSError as exc:
+                if sock.fileno() != -1:
+                    self.call_exception_handler({
+                        'message': 'Accept failed on a socket',
+                        'exception': exc,
+                        'socket': trsock.TransportSocket(sock),
+                    })
+                    sock.close()
+                elif self._debug:
+                    logger.debug("Accept failed on socket %r",
+                                 sock, exc_info=True)
+            except exceptions.CancelledError:
+                sock.close()
+            else:
+                self._accept_futures[sock.fileno()] = f
+                f.add_done_callback(loop)
+
+        self.call_soon(loop)
+
+    def _process_events(self, event_list):
+        # Events are processed in the IocpProactor._poll() method
+        pass
+
+    def _stop_accept_futures(self):
+        for future in self._accept_futures.values():
+            future.cancel()
+        self._accept_futures.clear()
+
+    def _stop_serving(self, sock):
+        future = self._accept_futures.pop(sock.fileno(), None)
+        if future:
+            future.cancel()
+        self._proactor._stop_serving(sock)
+        sock.close()
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/protocols.py b/linux-x64/clang/python3/lib/python3.9/asyncio/protocols.py
new file mode 100644
index 0000000..69fa43e
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/protocols.py
@@ -0,0 +1,220 @@
+"""Abstract Protocol base classes."""
+
+__all__ = (
+    'BaseProtocol', 'Protocol', 'DatagramProtocol',
+    'SubprocessProtocol', 'BufferedProtocol',
+)
+
+
+class BaseProtocol:
+    """Common base class for protocol interfaces.
+
+    Usually user implements protocols that derived from BaseProtocol
+    like Protocol or ProcessProtocol.
+
+    The only case when BaseProtocol should be implemented directly is
+    write-only transport like write pipe
+    """
+
+    __slots__ = ()
+
+    def connection_made(self, transport):
+        """Called when a connection is made.
+
+        The argument is the transport representing the pipe connection.
+        To receive data, wait for data_received() calls.
+        When the connection is closed, connection_lost() is called.
+        """
+
+    def connection_lost(self, exc):
+        """Called when the connection is lost or closed.
+
+        The argument is an exception object or None (the latter
+        meaning a regular EOF is received or the connection was
+        aborted or closed).
+        """
+
+    def pause_writing(self):
+        """Called when the transport's buffer goes over the high-water mark.
+
+        Pause and resume calls are paired -- pause_writing() is called
+        once when the buffer goes strictly over the high-water mark
+        (even if subsequent writes increases the buffer size even
+        more), and eventually resume_writing() is called once when the
+        buffer size reaches the low-water mark.
+
+        Note that if the buffer size equals the high-water mark,
+        pause_writing() is not called -- it must go strictly over.
+        Conversely, resume_writing() is called when the buffer size is
+        equal or lower than the low-water mark.  These end conditions
+        are important to ensure that things go as expected when either
+        mark is zero.
+
+        NOTE: This is the only Protocol callback that is not called
+        through EventLoop.call_soon() -- if it were, it would have no
+        effect when it's most needed (when the app keeps writing
+        without yielding until pause_writing() is called).
+        """
+
+    def resume_writing(self):
+        """Called when the transport's buffer drains below the low-water mark.
+
+        See pause_writing() for details.
+        """
+
+
+class Protocol(BaseProtocol):
+    """Interface for stream protocol.
+
+    The user should implement this interface.  They can inherit from
+    this class but don't need to.  The implementations here do
+    nothing (they don't raise exceptions).
+
+    When the user wants to requests a transport, they pass a protocol
+    factory to a utility function (e.g., EventLoop.create_connection()).
+
+    When the connection is made successfully, connection_made() is
+    called with a suitable transport object.  Then data_received()
+    will be called 0 or more times with data (bytes) received from the
+    transport; finally, connection_lost() will be called exactly once
+    with either an exception object or None as an argument.
+
+    State machine of calls:
+
+      start -> CM [-> DR*] [-> ER?] -> CL -> end
+
+    * CM: connection_made()
+    * DR: data_received()
+    * ER: eof_received()
+    * CL: connection_lost()
+    """
+
+    __slots__ = ()
+
+    def data_received(self, data):
+        """Called when some data is received.
+
+        The argument is a bytes object.
+        """
+
+    def eof_received(self):
+        """Called when the other end calls write_eof() or equivalent.
+
+        If this returns a false value (including None), the transport
+        will close itself.  If it returns a true value, closing the
+        transport is up to the protocol.
+        """
+
+
+class BufferedProtocol(BaseProtocol):
+    """Interface for stream protocol with manual buffer control.
+
+    Important: this has been added to asyncio in Python 3.7
+    *on a provisional basis*!  Consider it as an experimental API that
+    might be changed or removed in Python 3.8.
+
+    Event methods, such as `create_server` and `create_connection`,
+    accept factories that return protocols that implement this interface.
+
+    The idea of BufferedProtocol is that it allows to manually allocate
+    and control the receive buffer.  Event loops can then use the buffer
+    provided by the protocol to avoid unnecessary data copies.  This
+    can result in noticeable performance improvement for protocols that
+    receive big amounts of data.  Sophisticated protocols can allocate
+    the buffer only once at creation time.
+
+    State machine of calls:
+
+      start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end
+
+    * CM: connection_made()
+    * GB: get_buffer()
+    * BU: buffer_updated()
+    * ER: eof_received()
+    * CL: connection_lost()
+    """
+
+    __slots__ = ()
+
+    def get_buffer(self, sizehint):
+        """Called to allocate a new receive buffer.
+
+        *sizehint* is a recommended minimal size for the returned
+        buffer.  When set to -1, the buffer size can be arbitrary.
+
+        Must return an object that implements the
+        :ref:`buffer protocol <bufferobjects>`.
+        It is an error to return a zero-sized buffer.
+        """
+
+    def buffer_updated(self, nbytes):
+        """Called when the buffer was updated with the received data.
+
+        *nbytes* is the total number of bytes that were written to
+        the buffer.
+        """
+
+    def eof_received(self):
+        """Called when the other end calls write_eof() or equivalent.
+
+        If this returns a false value (including None), the transport
+        will close itself.  If it returns a true value, closing the
+        transport is up to the protocol.
+        """
+
+
+class DatagramProtocol(BaseProtocol):
+    """Interface for datagram protocol."""
+
+    __slots__ = ()
+
+    def datagram_received(self, data, addr):
+        """Called when some datagram is received."""
+
+    def error_received(self, exc):
+        """Called when a send or receive operation raises an OSError.
+
+        (Other than BlockingIOError or InterruptedError.)
+        """
+
+
+class SubprocessProtocol(BaseProtocol):
+    """Interface for protocol for subprocess calls."""
+
+    __slots__ = ()
+
+    def pipe_data_received(self, fd, data):
+        """Called when the subprocess writes data into stdout/stderr pipe.
+
+        fd is int file descriptor.
+        data is bytes object.
+        """
+
+    def pipe_connection_lost(self, fd, exc):
+        """Called when a file descriptor associated with the child process is
+        closed.
+
+        fd is the int file descriptor that was closed.
+        """
+
+    def process_exited(self):
+        """Called when subprocess has exited."""
+
+
+def _feed_data_to_buffered_proto(proto, data):
+    data_len = len(data)
+    while data_len:
+        buf = proto.get_buffer(data_len)
+        buf_len = len(buf)
+        if not buf_len:
+            raise RuntimeError('get_buffer() returned an empty buffer')
+
+        if buf_len >= data_len:
+            buf[:data_len] = data
+            proto.buffer_updated(data_len)
+            return
+        else:
+            buf[:buf_len] = data[:buf_len]
+            proto.buffer_updated(buf_len)
+            data = data[buf_len:]
+            data_len = len(data)
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/queues.py b/linux-x64/clang/python3/lib/python3.9/asyncio/queues.py
new file mode 100644
index 0000000..cd3f7c6
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/queues.py
@@ -0,0 +1,252 @@
+__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty')
+
+import collections
+import heapq
+import warnings
+
+from . import events
+from . import locks
+
+
+class QueueEmpty(Exception):
+    """Raised when Queue.get_nowait() is called on an empty Queue."""
+    pass
+
+
+class QueueFull(Exception):
+    """Raised when the Queue.put_nowait() method is called on a full Queue."""
+    pass
+
+
+class Queue:
+    """A queue, useful for coordinating producer and consumer coroutines.
+
+    If maxsize is less than or equal to zero, the queue size is infinite. If it
+    is an integer greater than 0, then "await put()" will block when the
+    queue reaches maxsize, until an item is removed by get().
+
+    Unlike the standard library Queue, you can reliably know this Queue's size
+    with qsize(), since your single-threaded asyncio application won't be
+    interrupted between calling qsize() and doing an operation on the Queue.
+    """
+
+    def __init__(self, maxsize=0, *, loop=None):
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+        self._maxsize = maxsize
+
+        # Futures.
+        self._getters = collections.deque()
+        # Futures.
+        self._putters = collections.deque()
+        self._unfinished_tasks = 0
+        self._finished = locks.Event(loop=loop)
+        self._finished.set()
+        self._init(maxsize)
+
+    # These three are overridable in subclasses.
+
+    def _init(self, maxsize):
+        self._queue = collections.deque()
+
+    def _get(self):
+        return self._queue.popleft()
+
+    def _put(self, item):
+        self._queue.append(item)
+
+    # End of the overridable methods.
+
+    def _wakeup_next(self, waiters):
+        # Wake up the next waiter (if any) that isn't cancelled.
+        while waiters:
+            waiter = waiters.popleft()
+            if not waiter.done():
+                waiter.set_result(None)
+                break
+
+    def __repr__(self):
+        return f'<{type(self).__name__} at {id(self):#x} {self._format()}>'
+
+    def __str__(self):
+        return f'<{type(self).__name__} {self._format()}>'
+
+    def __class_getitem__(cls, type):
+        return cls
+
+    def _format(self):
+        result = f'maxsize={self._maxsize!r}'
+        if getattr(self, '_queue', None):
+            result += f' _queue={list(self._queue)!r}'
+        if self._getters:
+            result += f' _getters[{len(self._getters)}]'
+        if self._putters:
+            result += f' _putters[{len(self._putters)}]'
+        if self._unfinished_tasks:
+            result += f' tasks={self._unfinished_tasks}'
+        return result
+
+    def qsize(self):
+        """Number of items in the queue."""
+        return len(self._queue)
+
+    @property
+    def maxsize(self):
+        """Number of items allowed in the queue."""
+        return self._maxsize
+
+    def empty(self):
+        """Return True if the queue is empty, False otherwise."""
+        return not self._queue
+
+    def full(self):
+        """Return True if there are maxsize items in the queue.
+
+        Note: if the Queue was initialized with maxsize=0 (the default),
+        then full() is never True.
+        """
+        if self._maxsize <= 0:
+            return False
+        else:
+            return self.qsize() >= self._maxsize
+
+    async def put(self, item):
+        """Put an item into the queue.
+
+        Put an item into the queue. If the queue is full, wait until a free
+        slot is available before adding item.
+        """
+        while self.full():
+            putter = self._loop.create_future()
+            self._putters.append(putter)
+            try:
+                await putter
+            except:
+                putter.cancel()  # Just in case putter is not done yet.
+                try:
+                    # Clean self._putters from canceled putters.
+                    self._putters.remove(putter)
+                except ValueError:
+                    # The putter could be removed from self._putters by a
+                    # previous get_nowait call.
+                    pass
+                if not self.full() and not putter.cancelled():
+                    # We were woken up by get_nowait(), but can't take
+                    # the call.  Wake up the next in line.
+                    self._wakeup_next(self._putters)
+                raise
+        return self.put_nowait(item)
+
+    def put_nowait(self, item):
+        """Put an item into the queue without blocking.
+
+        If no free slot is immediately available, raise QueueFull.
+        """
+        if self.full():
+            raise QueueFull
+        self._put(item)
+        self._unfinished_tasks += 1
+        self._finished.clear()
+        self._wakeup_next(self._getters)
+
+    async def get(self):
+        """Remove and return an item from the queue.
+
+        If queue is empty, wait until an item is available.
+        """
+        while self.empty():
+            getter = self._loop.create_future()
+            self._getters.append(getter)
+            try:
+                await getter
+            except:
+                getter.cancel()  # Just in case getter is not done yet.
+                try:
+                    # Clean self._getters from canceled getters.
+                    self._getters.remove(getter)
+                except ValueError:
+                    # The getter could be removed from self._getters by a
+                    # previous put_nowait call.
+                    pass
+                if not self.empty() and not getter.cancelled():
+                    # We were woken up by put_nowait(), but can't take
+                    # the call.  Wake up the next in line.
+                    self._wakeup_next(self._getters)
+                raise
+        return self.get_nowait()
+
+    def get_nowait(self):
+        """Remove and return an item from the queue.
+
+        Return an item if one is immediately available, else raise QueueEmpty.
+        """
+        if self.empty():
+            raise QueueEmpty
+        item = self._get()
+        self._wakeup_next(self._putters)
+        return item
+
+    def task_done(self):
+        """Indicate that a formerly enqueued task is complete.
+
+        Used by queue consumers. For each get() used to fetch a task,
+        a subsequent call to task_done() tells the queue that the processing
+        on the task is complete.
+
+        If a join() is currently blocking, it will resume when all items have
+        been processed (meaning that a task_done() call was received for every
+        item that had been put() into the queue).
+
+        Raises ValueError if called more times than there were items placed in
+        the queue.
+        """
+        if self._unfinished_tasks <= 0:
+            raise ValueError('task_done() called too many times')
+        self._unfinished_tasks -= 1
+        if self._unfinished_tasks == 0:
+            self._finished.set()
+
+    async def join(self):
+        """Block until all items in the queue have been gotten and processed.
+
+        The count of unfinished tasks goes up whenever an item is added to the
+        queue. The count goes down whenever a consumer calls task_done() to
+        indicate that the item was retrieved and all work on it is complete.
+        When the count of unfinished tasks drops to zero, join() unblocks.
+        """
+        if self._unfinished_tasks > 0:
+            await self._finished.wait()
+
+
+class PriorityQueue(Queue):
+    """A subclass of Queue; retrieves entries in priority order (lowest first).
+
+    Entries are typically tuples of the form: (priority number, data).
+    """
+
+    def _init(self, maxsize):
+        self._queue = []
+
+    def _put(self, item, heappush=heapq.heappush):
+        heappush(self._queue, item)
+
+    def _get(self, heappop=heapq.heappop):
+        return heappop(self._queue)
+
+
+class LifoQueue(Queue):
+    """A subclass of Queue that retrieves most recently added entries first."""
+
+    def _init(self, maxsize):
+        self._queue = []
+
+    def _put(self, item):
+        self._queue.append(item)
+
+    def _get(self):
+        return self._queue.pop()
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/runners.py b/linux-x64/clang/python3/lib/python3.9/asyncio/runners.py
new file mode 100644
index 0000000..268635d
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/runners.py
@@ -0,0 +1,74 @@
+__all__ = 'run',
+
+from . import coroutines
+from . import events
+from . import tasks
+
+
+def run(main, *, debug=None):
+    """Execute the coroutine and return the result.
+
+    This function runs the passed coroutine, taking care of
+    managing the asyncio event loop and finalizing asynchronous
+    generators.
+
+    This function cannot be called when another asyncio event loop is
+    running in the same thread.
+
+    If debug is True, the event loop will be run in debug mode.
+
+    This function always creates a new event loop and closes it at the end.
+    It should be used as a main entry point for asyncio programs, and should
+    ideally only be called once.
+
+    Example:
+
+        async def main():
+            await asyncio.sleep(1)
+            print('hello')
+
+        asyncio.run(main())
+    """
+    if events._get_running_loop() is not None:
+        raise RuntimeError(
+            "asyncio.run() cannot be called from a running event loop")
+
+    if not coroutines.iscoroutine(main):
+        raise ValueError("a coroutine was expected, got {!r}".format(main))
+
+    loop = events.new_event_loop()
+    try:
+        events.set_event_loop(loop)
+        if debug is not None:
+            loop.set_debug(debug)
+        return loop.run_until_complete(main)
+    finally:
+        try:
+            _cancel_all_tasks(loop)
+            loop.run_until_complete(loop.shutdown_asyncgens())
+            loop.run_until_complete(loop.shutdown_default_executor())
+        finally:
+            events.set_event_loop(None)
+            loop.close()
+
+
+def _cancel_all_tasks(loop):
+    to_cancel = tasks.all_tasks(loop)
+    if not to_cancel:
+        return
+
+    for task in to_cancel:
+        task.cancel()
+
+    loop.run_until_complete(
+        tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
+
+    for task in to_cancel:
+        if task.cancelled():
+            continue
+        if task.exception() is not None:
+            loop.call_exception_handler({
+                'message': 'unhandled exception during asyncio.run() shutdown',
+                'exception': task.exception(),
+                'task': task,
+            })
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/selector_events.py b/linux-x64/clang/python3/lib/python3.9/asyncio/selector_events.py
new file mode 100644
index 0000000..59cb6b1
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/selector_events.py
@@ -0,0 +1,1099 @@
+"""Event loop using a selector and related classes.
+
+A selector is a "notify-when-ready" multiplexer.  For a subclass which
+also includes support for signal handling, see the unix_events sub-module.
+"""
+
+__all__ = 'BaseSelectorEventLoop',
+
+import collections
+import errno
+import functools
+import selectors
+import socket
+import warnings
+import weakref
+try:
+    import ssl
+except ImportError:  # pragma: no cover
+    ssl = None
+
+from . import base_events
+from . import constants
+from . import events
+from . import futures
+from . import protocols
+from . import sslproto
+from . import transports
+from . import trsock
+from .log import logger
+
+
+def _test_selector_event(selector, fd, event):
+    # Test if the selector is monitoring 'event' events
+    # for the file descriptor 'fd'.
+    try:
+        key = selector.get_key(fd)
+    except KeyError:
+        return False
+    else:
+        return bool(key.events & event)
+
+
+def _check_ssl_socket(sock):
+    if ssl is not None and isinstance(sock, ssl.SSLSocket):
+        raise TypeError("Socket cannot be of type SSLSocket")
+
+
+class BaseSelectorEventLoop(base_events.BaseEventLoop):
+    """Selector event loop.
+
+    See events.EventLoop for API specification.
+    """
+
+    def __init__(self, selector=None):
+        super().__init__()
+
+        if selector is None:
+            selector = selectors.DefaultSelector()
+        logger.debug('Using selector: %s', selector.__class__.__name__)
+        self._selector = selector
+        self._make_self_pipe()
+        self._transports = weakref.WeakValueDictionary()
+
+    def _make_socket_transport(self, sock, protocol, waiter=None, *,
+                               extra=None, server=None):
+        return _SelectorSocketTransport(self, sock, protocol, waiter,
+                                        extra, server)
+
+    def _make_ssl_transport(
+            self, rawsock, protocol, sslcontext, waiter=None,
+            *, server_side=False, server_hostname=None,
+            extra=None, server=None,
+            ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
+        ssl_protocol = sslproto.SSLProtocol(
+                self, protocol, sslcontext, waiter,
+                server_side, server_hostname,
+                ssl_handshake_timeout=ssl_handshake_timeout)
+        _SelectorSocketTransport(self, rawsock, ssl_protocol,
+                                 extra=extra, server=server)
+        return ssl_protocol._app_transport
+
+    def _make_datagram_transport(self, sock, protocol,
+                                 address=None, waiter=None, extra=None):
+        return _SelectorDatagramTransport(self, sock, protocol,
+                                          address, waiter, extra)
+
+    def close(self):
+        if self.is_running():
+            raise RuntimeError("Cannot close a running event loop")
+        if self.is_closed():
+            return
+        self._close_self_pipe()
+        super().close()
+        if self._selector is not None:
+            self._selector.close()
+            self._selector = None
+
+    def _close_self_pipe(self):
+        self._remove_reader(self._ssock.fileno())
+        self._ssock.close()
+        self._ssock = None
+        self._csock.close()
+        self._csock = None
+        self._internal_fds -= 1
+
+    def _make_self_pipe(self):
+        # A self-socket, really. :-)
+        self._ssock, self._csock = socket.socketpair()
+        self._ssock.setblocking(False)
+        self._csock.setblocking(False)
+        self._internal_fds += 1
+        self._add_reader(self._ssock.fileno(), self._read_from_self)
+
+    def _process_self_data(self, data):
+        pass
+
+    def _read_from_self(self):
+        while True:
+            try:
+                data = self._ssock.recv(4096)
+                if not data:
+                    break
+                self._process_self_data(data)
+            except InterruptedError:
+                continue
+            except BlockingIOError:
+                break
+
+    def _write_to_self(self):
+        # This may be called from a different thread, possibly after
+        # _close_self_pipe() has been called or even while it is
+        # running.  Guard for self._csock being None or closed.  When
+        # a socket is closed, send() raises OSError (with errno set to
+        # EBADF, but let's not rely on the exact error code).
+        csock = self._csock
+        if csock is None:
+            return
+
+        try:
+            csock.send(b'\0')
+        except OSError:
+            if self._debug:
+                logger.debug("Fail to write a null byte into the "
+                             "self-pipe socket",
+                             exc_info=True)
+
+    def _start_serving(self, protocol_factory, sock,
+                       sslcontext=None, server=None, backlog=100,
+                       ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
+        self._add_reader(sock.fileno(), self._accept_connection,
+                         protocol_factory, sock, sslcontext, server, backlog,
+                         ssl_handshake_timeout)
+
+    def _accept_connection(
+            self, protocol_factory, sock,
+            sslcontext=None, server=None, backlog=100,
+            ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
+        # This method is only called once for each event loop tick where the
+        # listening socket has triggered an EVENT_READ. There may be multiple
+        # connections waiting for an .accept() so it is called in a loop.
+        # See https://bugs.python.org/issue27906 for more details.
+        for _ in range(backlog):
+            try:
+                conn, addr = sock.accept()
+                if self._debug:
+                    logger.debug("%r got a new connection from %r: %r",
+                                 server, addr, conn)
+                conn.setblocking(False)
+            except (BlockingIOError, InterruptedError, ConnectionAbortedError):
+                # Early exit because the socket accept buffer is empty.
+                return None
+            except OSError as exc:
+                # There's nowhere to send the error, so just log it.
+                if exc.errno in (errno.EMFILE, errno.ENFILE,
+                                 errno.ENOBUFS, errno.ENOMEM):
+                    # Some platforms (e.g. Linux keep reporting the FD as
+                    # ready, so we remove the read handler temporarily.
+                    # We'll try again in a while.
+                    self.call_exception_handler({
+                        'message': 'socket.accept() out of system resource',
+                        'exception': exc,
+                        'socket': trsock.TransportSocket(sock),
+                    })
+                    self._remove_reader(sock.fileno())
+                    self.call_later(constants.ACCEPT_RETRY_DELAY,
+                                    self._start_serving,
+                                    protocol_factory, sock, sslcontext, server,
+                                    backlog, ssl_handshake_timeout)
+                else:
+                    raise  # The event loop will catch, log and ignore it.
+            else:
+                extra = {'peername': addr}
+                accept = self._accept_connection2(
+                    protocol_factory, conn, extra, sslcontext, server,
+                    ssl_handshake_timeout)
+                self.create_task(accept)
+
+    async def _accept_connection2(
+            self, protocol_factory, conn, extra,
+            sslcontext=None, server=None,
+            ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
+        protocol = None
+        transport = None
+        try:
+            protocol = protocol_factory()
+            waiter = self.create_future()
+            if sslcontext:
+                transport = self._make_ssl_transport(
+                    conn, protocol, sslcontext, waiter=waiter,
+                    server_side=True, extra=extra, server=server,
+                    ssl_handshake_timeout=ssl_handshake_timeout)
+            else:
+                transport = self._make_socket_transport(
+                    conn, protocol, waiter=waiter, extra=extra,
+                    server=server)
+
+            try:
+                await waiter
+            except BaseException:
+                transport.close()
+                raise
+                # It's now up to the protocol to handle the connection.
+
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            if self._debug:
+                context = {
+                    'message':
+                        'Error on transport creation for incoming connection',
+                    'exception': exc,
+                }
+                if protocol is not None:
+                    context['protocol'] = protocol
+                if transport is not None:
+                    context['transport'] = transport
+                self.call_exception_handler(context)
+
+    def _ensure_fd_no_transport(self, fd):
+        fileno = fd
+        if not isinstance(fileno, int):
+            try:
+                fileno = int(fileno.fileno())
+            except (AttributeError, TypeError, ValueError):
+                # This code matches selectors._fileobj_to_fd function.
+                raise ValueError(f"Invalid file object: {fd!r}") from None
+        try:
+            transport = self._transports[fileno]
+        except KeyError:
+            pass
+        else:
+            if not transport.is_closing():
+                raise RuntimeError(
+                    f'File descriptor {fd!r} is used by transport '
+                    f'{transport!r}')
+
+    def _add_reader(self, fd, callback, *args):
+        self._check_closed()
+        handle = events.Handle(callback, args, self, None)
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            self._selector.register(fd, selectors.EVENT_READ,
+                                    (handle, None))
+        else:
+            mask, (reader, writer) = key.events, key.data
+            self._selector.modify(fd, mask | selectors.EVENT_READ,
+                                  (handle, writer))
+            if reader is not None:
+                reader.cancel()
+        return handle
+
+    def _remove_reader(self, fd):
+        if self.is_closed():
+            return False
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            return False
+        else:
+            mask, (reader, writer) = key.events, key.data
+            mask &= ~selectors.EVENT_READ
+            if not mask:
+                self._selector.unregister(fd)
+            else:
+                self._selector.modify(fd, mask, (None, writer))
+
+            if reader is not None:
+                reader.cancel()
+                return True
+            else:
+                return False
+
+    def _add_writer(self, fd, callback, *args):
+        self._check_closed()
+        handle = events.Handle(callback, args, self, None)
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            self._selector.register(fd, selectors.EVENT_WRITE,
+                                    (None, handle))
+        else:
+            mask, (reader, writer) = key.events, key.data
+            self._selector.modify(fd, mask | selectors.EVENT_WRITE,
+                                  (reader, handle))
+            if writer is not None:
+                writer.cancel()
+        return handle
+
+    def _remove_writer(self, fd):
+        """Remove a writer callback."""
+        if self.is_closed():
+            return False
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            return False
+        else:
+            mask, (reader, writer) = key.events, key.data
+            # Remove both writer and connector.
+            mask &= ~selectors.EVENT_WRITE
+            if not mask:
+                self._selector.unregister(fd)
+            else:
+                self._selector.modify(fd, mask, (reader, None))
+
+            if writer is not None:
+                writer.cancel()
+                return True
+            else:
+                return False
+
+    def add_reader(self, fd, callback, *args):
+        """Add a reader callback."""
+        self._ensure_fd_no_transport(fd)
+        self._add_reader(fd, callback, *args)
+
+    def remove_reader(self, fd):
+        """Remove a reader callback."""
+        self._ensure_fd_no_transport(fd)
+        return self._remove_reader(fd)
+
+    def add_writer(self, fd, callback, *args):
+        """Add a writer callback.."""
+        self._ensure_fd_no_transport(fd)
+        self._add_writer(fd, callback, *args)
+
+    def remove_writer(self, fd):
+        """Remove a writer callback."""
+        self._ensure_fd_no_transport(fd)
+        return self._remove_writer(fd)
+
+    async def sock_recv(self, sock, n):
+        """Receive data from the socket.
+
+        The return value is a bytes object representing the data received.
+        The maximum amount of data to be received at once is specified by
+        nbytes.
+        """
+        _check_ssl_socket(sock)
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+        try:
+            return sock.recv(n)
+        except (BlockingIOError, InterruptedError):
+            pass
+        fut = self.create_future()
+        fd = sock.fileno()
+        self._ensure_fd_no_transport(fd)
+        handle = self._add_reader(fd, self._sock_recv, fut, sock, n)
+        fut.add_done_callback(
+            functools.partial(self._sock_read_done, fd, handle=handle))
+        return await fut
+
+    def _sock_read_done(self, fd, fut, handle=None):
+        if handle is None or not handle.cancelled():
+            self.remove_reader(fd)
+
+    def _sock_recv(self, fut, sock, n):
+        # _sock_recv() can add itself as an I/O callback if the operation can't
+        # be done immediately. Don't use it directly, call sock_recv().
+        if fut.done():
+            return
+        try:
+            data = sock.recv(n)
+        except (BlockingIOError, InterruptedError):
+            return  # try again next time
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            fut.set_exception(exc)
+        else:
+            fut.set_result(data)
+
+    async def sock_recv_into(self, sock, buf):
+        """Receive data from the socket.
+
+        The received data is written into *buf* (a writable buffer).
+        The return value is the number of bytes written.
+        """
+        _check_ssl_socket(sock)
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+        try:
+            return sock.recv_into(buf)
+        except (BlockingIOError, InterruptedError):
+            pass
+        fut = self.create_future()
+        fd = sock.fileno()
+        self._ensure_fd_no_transport(fd)
+        handle = self._add_reader(fd, self._sock_recv_into, fut, sock, buf)
+        fut.add_done_callback(
+            functools.partial(self._sock_read_done, fd, handle=handle))
+        return await fut
+
+    def _sock_recv_into(self, fut, sock, buf):
+        # _sock_recv_into() can add itself as an I/O callback if the operation
+        # can't be done immediately. Don't use it directly, call
+        # sock_recv_into().
+        if fut.done():
+            return
+        try:
+            nbytes = sock.recv_into(buf)
+        except (BlockingIOError, InterruptedError):
+            return  # try again next time
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            fut.set_exception(exc)
+        else:
+            fut.set_result(nbytes)
+
+    async def sock_sendall(self, sock, data):
+        """Send data to the socket.
+
+        The socket must be connected to a remote socket. This method continues
+        to send data from data until either all data has been sent or an
+        error occurs. None is returned on success. On error, an exception is
+        raised, and there is no way to determine how much data, if any, was
+        successfully processed by the receiving end of the connection.
+        """
+        _check_ssl_socket(sock)
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+        try:
+            n = sock.send(data)
+        except (BlockingIOError, InterruptedError):
+            n = 0
+
+        if n == len(data):
+            # all data sent
+            return
+
+        fut = self.create_future()
+        fd = sock.fileno()
+        self._ensure_fd_no_transport(fd)
+        # use a trick with a list in closure to store a mutable state
+        handle = self._add_writer(fd, self._sock_sendall, fut, sock,
+                                  memoryview(data), [n])
+        fut.add_done_callback(
+            functools.partial(self._sock_write_done, fd, handle=handle))
+        return await fut
+
+    def _sock_sendall(self, fut, sock, view, pos):
+        if fut.done():
+            # Future cancellation can be scheduled on previous loop iteration
+            return
+        start = pos[0]
+        try:
+            n = sock.send(view[start:])
+        except (BlockingIOError, InterruptedError):
+            return
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            fut.set_exception(exc)
+            return
+
+        start += n
+
+        if start == len(view):
+            fut.set_result(None)
+        else:
+            pos[0] = start
+
+    async def sock_connect(self, sock, address):
+        """Connect to a remote socket at address.
+
+        This method is a coroutine.
+        """
+        _check_ssl_socket(sock)
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+
+        if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX:
+            resolved = await self._ensure_resolved(
+                address, family=sock.family, proto=sock.proto, loop=self)
+            _, _, _, _, address = resolved[0]
+
+        fut = self.create_future()
+        self._sock_connect(fut, sock, address)
+        return await fut
+
+    def _sock_connect(self, fut, sock, address):
+        fd = sock.fileno()
+        try:
+            sock.connect(address)
+        except (BlockingIOError, InterruptedError):
+            # Issue #23618: When the C function connect() fails with EINTR, the
+            # connection runs in background. We have to wait until the socket
+            # becomes writable to be notified when the connection succeed or
+            # fails.
+            self._ensure_fd_no_transport(fd)
+            handle = self._add_writer(
+                fd, self._sock_connect_cb, fut, sock, address)
+            fut.add_done_callback(
+                functools.partial(self._sock_write_done, fd, handle=handle))
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            fut.set_exception(exc)
+        else:
+            fut.set_result(None)
+
+    def _sock_write_done(self, fd, fut, handle=None):
+        if handle is None or not handle.cancelled():
+            self.remove_writer(fd)
+
+    def _sock_connect_cb(self, fut, sock, address):
+        if fut.done():
+            return
+
+        try:
+            err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+            if err != 0:
+                # Jump to any except clause below.
+                raise OSError(err, f'Connect call failed {address}')
+        except (BlockingIOError, InterruptedError):
+            # socket is still registered, the callback will be retried later
+            pass
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            fut.set_exception(exc)
+        else:
+            fut.set_result(None)
+
+    async def sock_accept(self, sock):
+        """Accept a connection.
+
+        The socket must be bound to an address and listening for connections.
+        The return value is a pair (conn, address) where conn is a new socket
+        object usable to send and receive data on the connection, and address
+        is the address bound to the socket on the other end of the connection.
+        """
+        _check_ssl_socket(sock)
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+        fut = self.create_future()
+        self._sock_accept(fut, sock)
+        return await fut
+
+    def _sock_accept(self, fut, sock):
+        fd = sock.fileno()
+        try:
+            conn, address = sock.accept()
+            conn.setblocking(False)
+        except (BlockingIOError, InterruptedError):
+            self._ensure_fd_no_transport(fd)
+            handle = self._add_reader(fd, self._sock_accept, fut, sock)
+            fut.add_done_callback(
+                functools.partial(self._sock_read_done, fd, handle=handle))
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            fut.set_exception(exc)
+        else:
+            fut.set_result((conn, address))
+
+    async def _sendfile_native(self, transp, file, offset, count):
+        del self._transports[transp._sock_fd]
+        resume_reading = transp.is_reading()
+        transp.pause_reading()
+        await transp._make_empty_waiter()
+        try:
+            return await self.sock_sendfile(transp._sock, file, offset, count,
+                                            fallback=False)
+        finally:
+            transp._reset_empty_waiter()
+            if resume_reading:
+                transp.resume_reading()
+            self._transports[transp._sock_fd] = transp
+
+    def _process_events(self, event_list):
+        for key, mask in event_list:
+            fileobj, (reader, writer) = key.fileobj, key.data
+            if mask & selectors.EVENT_READ and reader is not None:
+                if reader._cancelled:
+                    self._remove_reader(fileobj)
+                else:
+                    self._add_callback(reader)
+            if mask & selectors.EVENT_WRITE and writer is not None:
+                if writer._cancelled:
+                    self._remove_writer(fileobj)
+                else:
+                    self._add_callback(writer)
+
+    def _stop_serving(self, sock):
+        self._remove_reader(sock.fileno())
+        sock.close()
+
+
+class _SelectorTransport(transports._FlowControlMixin,
+                         transports.Transport):
+
+    max_size = 256 * 1024  # Buffer size passed to recv().
+
+    _buffer_factory = bytearray  # Constructs initial value for self._buffer.
+
+    # Attribute used in the destructor: it must be set even if the constructor
+    # is not called (see _SelectorSslTransport which may start by raising an
+    # exception)
+    _sock = None
+
+    def __init__(self, loop, sock, protocol, extra=None, server=None):
+        super().__init__(extra, loop)
+        self._extra['socket'] = trsock.TransportSocket(sock)
+        try:
+            self._extra['sockname'] = sock.getsockname()
+        except OSError:
+            self._extra['sockname'] = None
+        if 'peername' not in self._extra:
+            try:
+                self._extra['peername'] = sock.getpeername()
+            except socket.error:
+                self._extra['peername'] = None
+        self._sock = sock
+        self._sock_fd = sock.fileno()
+
+        self._protocol_connected = False
+        self.set_protocol(protocol)
+
+        self._server = server
+        self._buffer = self._buffer_factory()
+        self._conn_lost = 0  # Set when call to connection_lost scheduled.
+        self._closing = False  # Set when close() called.
+        if self._server is not None:
+            self._server._attach()
+        loop._transports[self._sock_fd] = self
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._sock is None:
+            info.append('closed')
+        elif self._closing:
+            info.append('closing')
+        info.append(f'fd={self._sock_fd}')
+        # test if the transport was closed
+        if self._loop is not None and not self._loop.is_closed():
+            polling = _test_selector_event(self._loop._selector,
+                                           self._sock_fd, selectors.EVENT_READ)
+            if polling:
+                info.append('read=polling')
+            else:
+                info.append('read=idle')
+
+            polling = _test_selector_event(self._loop._selector,
+                                           self._sock_fd,
+                                           selectors.EVENT_WRITE)
+            if polling:
+                state = 'polling'
+            else:
+                state = 'idle'
+
+            bufsize = self.get_write_buffer_size()
+            info.append(f'write=<{state}, bufsize={bufsize}>')
+        return '<{}>'.format(' '.join(info))
+
+    def abort(self):
+        self._force_close(None)
+
+    def set_protocol(self, protocol):
+        self._protocol = protocol
+        self._protocol_connected = True
+
+    def get_protocol(self):
+        return self._protocol
+
+    def is_closing(self):
+        return self._closing
+
+    def close(self):
+        if self._closing:
+            return
+        self._closing = True
+        self._loop._remove_reader(self._sock_fd)
+        if not self._buffer:
+            self._conn_lost += 1
+            self._loop._remove_writer(self._sock_fd)
+            self._loop.call_soon(self._call_connection_lost, None)
+
+    def __del__(self, _warn=warnings.warn):
+        if self._sock is not None:
+            _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+            self._sock.close()
+
+    def _fatal_error(self, exc, message='Fatal error on transport'):
+        # Should be called from exception handler only.
+        if isinstance(exc, OSError):
+            if self._loop.get_debug():
+                logger.debug("%r: %s", self, message, exc_info=True)
+        else:
+            self._loop.call_exception_handler({
+                'message': message,
+                'exception': exc,
+                'transport': self,
+                'protocol': self._protocol,
+            })
+        self._force_close(exc)
+
+    def _force_close(self, exc):
+        if self._conn_lost:
+            return
+        if self._buffer:
+            self._buffer.clear()
+            self._loop._remove_writer(self._sock_fd)
+        if not self._closing:
+            self._closing = True
+            self._loop._remove_reader(self._sock_fd)
+        self._conn_lost += 1
+        self._loop.call_soon(self._call_connection_lost, exc)
+
+    def _call_connection_lost(self, exc):
+        try:
+            if self._protocol_connected:
+                self._protocol.connection_lost(exc)
+        finally:
+            self._sock.close()
+            self._sock = None
+            self._protocol = None
+            self._loop = None
+            server = self._server
+            if server is not None:
+                server._detach()
+                self._server = None
+
+    def get_write_buffer_size(self):
+        return len(self._buffer)
+
+    def _add_reader(self, fd, callback, *args):
+        if self._closing:
+            return
+
+        self._loop._add_reader(fd, callback, *args)
+
+
+class _SelectorSocketTransport(_SelectorTransport):
+
+    _start_tls_compatible = True
+    _sendfile_compatible = constants._SendfileMode.TRY_NATIVE
+
+    def __init__(self, loop, sock, protocol, waiter=None,
+                 extra=None, server=None):
+
+        self._read_ready_cb = None
+        super().__init__(loop, sock, protocol, extra, server)
+        self._eof = False
+        self._paused = False
+        self._empty_waiter = None
+
+        # Disable the Nagle algorithm -- small writes will be
+        # sent without waiting for the TCP ACK.  This generally
+        # decreases the latency (in some cases significantly.)
+        base_events._set_nodelay(self._sock)
+
+        self._loop.call_soon(self._protocol.connection_made, self)
+        # only start reading when connection_made() has been called
+        self._loop.call_soon(self._add_reader,
+                             self._sock_fd, self._read_ready)
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(futures._set_result_unless_cancelled,
+                                 waiter, None)
+
+    def set_protocol(self, protocol):
+        if isinstance(protocol, protocols.BufferedProtocol):
+            self._read_ready_cb = self._read_ready__get_buffer
+        else:
+            self._read_ready_cb = self._read_ready__data_received
+
+        super().set_protocol(protocol)
+
+    def is_reading(self):
+        return not self._paused and not self._closing
+
+    def pause_reading(self):
+        if self._closing or self._paused:
+            return
+        self._paused = True
+        self._loop._remove_reader(self._sock_fd)
+        if self._loop.get_debug():
+            logger.debug("%r pauses reading", self)
+
+    def resume_reading(self):
+        if self._closing or not self._paused:
+            return
+        self._paused = False
+        self._add_reader(self._sock_fd, self._read_ready)
+        if self._loop.get_debug():
+            logger.debug("%r resumes reading", self)
+
+    def _read_ready(self):
+        self._read_ready_cb()
+
+    def _read_ready__get_buffer(self):
+        if self._conn_lost:
+            return
+
+        try:
+            buf = self._protocol.get_buffer(-1)
+            if not len(buf):
+                raise RuntimeError('get_buffer() returned an empty buffer')
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._fatal_error(
+                exc, 'Fatal error: protocol.get_buffer() call failed.')
+            return
+
+        try:
+            nbytes = self._sock.recv_into(buf)
+        except (BlockingIOError, InterruptedError):
+            return
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._fatal_error(exc, 'Fatal read error on socket transport')
+            return
+
+        if not nbytes:
+            self._read_ready__on_eof()
+            return
+
+        try:
+            self._protocol.buffer_updated(nbytes)
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._fatal_error(
+                exc, 'Fatal error: protocol.buffer_updated() call failed.')
+
+    def _read_ready__data_received(self):
+        if self._conn_lost:
+            return
+        try:
+            data = self._sock.recv(self.max_size)
+        except (BlockingIOError, InterruptedError):
+            return
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._fatal_error(exc, 'Fatal read error on socket transport')
+            return
+
+        if not data:
+            self._read_ready__on_eof()
+            return
+
+        try:
+            self._protocol.data_received(data)
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._fatal_error(
+                exc, 'Fatal error: protocol.data_received() call failed.')
+
+    def _read_ready__on_eof(self):
+        if self._loop.get_debug():
+            logger.debug("%r received EOF", self)
+
+        try:
+            keep_open = self._protocol.eof_received()
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._fatal_error(
+                exc, 'Fatal error: protocol.eof_received() call failed.')
+            return
+
+        if keep_open:
+            # We're keeping the connection open so the
+            # protocol can write more, but we still can't
+            # receive more, so remove the reader callback.
+            self._loop._remove_reader(self._sock_fd)
+        else:
+            self.close()
+
+    def write(self, data):
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError(f'data argument must be a bytes-like object, '
+                            f'not {type(data).__name__!r}')
+        if self._eof:
+            raise RuntimeError('Cannot call write() after write_eof()')
+        if self._empty_waiter is not None:
+            raise RuntimeError('unable to write; sendfile is in progress')
+        if not data:
+            return
+
+        if self._conn_lost:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('socket.send() raised exception.')
+            self._conn_lost += 1
+            return
+
+        if not self._buffer:
+            # Optimization: try to send now.
+            try:
+                n = self._sock.send(data)
+            except (BlockingIOError, InterruptedError):
+                pass
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException as exc:
+                self._fatal_error(exc, 'Fatal write error on socket transport')
+                return
+            else:
+                data = data[n:]
+                if not data:
+                    return
+            # Not all was written; register write handler.
+            self._loop._add_writer(self._sock_fd, self._write_ready)
+
+        # Add it to the buffer.
+        self._buffer.extend(data)
+        self._maybe_pause_protocol()
+
+    def _write_ready(self):
+        assert self._buffer, 'Data should not be empty'
+
+        if self._conn_lost:
+            return
+        try:
+            n = self._sock.send(self._buffer)
+        except (BlockingIOError, InterruptedError):
+            pass
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._loop._remove_writer(self._sock_fd)
+            self._buffer.clear()
+            self._fatal_error(exc, 'Fatal write error on socket transport')
+            if self._empty_waiter is not None:
+                self._empty_waiter.set_exception(exc)
+        else:
+            if n:
+                del self._buffer[:n]
+            self._maybe_resume_protocol()  # May append to buffer.
+            if not self._buffer:
+                self._loop._remove_writer(self._sock_fd)
+                if self._empty_waiter is not None:
+                    self._empty_waiter.set_result(None)
+                if self._closing:
+                    self._call_connection_lost(None)
+                elif self._eof:
+                    self._sock.shutdown(socket.SHUT_WR)
+
+    def write_eof(self):
+        if self._closing or self._eof:
+            return
+        self._eof = True
+        if not self._buffer:
+            self._sock.shutdown(socket.SHUT_WR)
+
+    def can_write_eof(self):
+        return True
+
+    def _call_connection_lost(self, exc):
+        super()._call_connection_lost(exc)
+        if self._empty_waiter is not None:
+            self._empty_waiter.set_exception(
+                ConnectionError("Connection is closed by peer"))
+
+    def _make_empty_waiter(self):
+        if self._empty_waiter is not None:
+            raise RuntimeError("Empty waiter is already set")
+        self._empty_waiter = self._loop.create_future()
+        if not self._buffer:
+            self._empty_waiter.set_result(None)
+        return self._empty_waiter
+
+    def _reset_empty_waiter(self):
+        self._empty_waiter = None
+
+
+class _SelectorDatagramTransport(_SelectorTransport):
+
+    _buffer_factory = collections.deque
+
+    def __init__(self, loop, sock, protocol, address=None,
+                 waiter=None, extra=None):
+        super().__init__(loop, sock, protocol, extra)
+        self._address = address
+        self._loop.call_soon(self._protocol.connection_made, self)
+        # only start reading when connection_made() has been called
+        self._loop.call_soon(self._add_reader,
+                             self._sock_fd, self._read_ready)
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(futures._set_result_unless_cancelled,
+                                 waiter, None)
+
+    def get_write_buffer_size(self):
+        return sum(len(data) for data, _ in self._buffer)
+
+    def _read_ready(self):
+        if self._conn_lost:
+            return
+        try:
+            data, addr = self._sock.recvfrom(self.max_size)
+        except (BlockingIOError, InterruptedError):
+            pass
+        except OSError as exc:
+            self._protocol.error_received(exc)
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._fatal_error(exc, 'Fatal read error on datagram transport')
+        else:
+            self._protocol.datagram_received(data, addr)
+
+    def sendto(self, data, addr=None):
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError(f'data argument must be a bytes-like object, '
+                            f'not {type(data).__name__!r}')
+        if not data:
+            return
+
+        if self._address:
+            if addr not in (None, self._address):
+                raise ValueError(
+                    f'Invalid address: must be None or {self._address}')
+            addr = self._address
+
+        if self._conn_lost and self._address:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('socket.send() raised exception.')
+            self._conn_lost += 1
+            return
+
+        if not self._buffer:
+            # Attempt to send it right away first.
+            try:
+                if self._extra['peername']:
+                    self._sock.send(data)
+                else:
+                    self._sock.sendto(data, addr)
+                return
+            except (BlockingIOError, InterruptedError):
+                self._loop._add_writer(self._sock_fd, self._sendto_ready)
+            except OSError as exc:
+                self._protocol.error_received(exc)
+                return
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException as exc:
+                self._fatal_error(
+                    exc, 'Fatal write error on datagram transport')
+                return
+
+        # Ensure that what we buffer is immutable.
+        self._buffer.append((bytes(data), addr))
+        self._maybe_pause_protocol()
+
+    def _sendto_ready(self):
+        while self._buffer:
+            data, addr = self._buffer.popleft()
+            try:
+                if self._extra['peername']:
+                    self._sock.send(data)
+                else:
+                    self._sock.sendto(data, addr)
+            except (BlockingIOError, InterruptedError):
+                self._buffer.appendleft((data, addr))  # Try again later.
+                break
+            except OSError as exc:
+                self._protocol.error_received(exc)
+                return
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException as exc:
+                self._fatal_error(
+                    exc, 'Fatal write error on datagram transport')
+                return
+
+        self._maybe_resume_protocol()  # May append to buffer.
+        if not self._buffer:
+            self._loop._remove_writer(self._sock_fd)
+            if self._closing:
+                self._call_connection_lost(None)
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/sslproto.py b/linux-x64/clang/python3/lib/python3.9/asyncio/sslproto.py
new file mode 100644
index 0000000..cad25b2
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/sslproto.py
@@ -0,0 +1,733 @@
+import collections
+import warnings
+try:
+    import ssl
+except ImportError:  # pragma: no cover
+    ssl = None
+
+from . import constants
+from . import protocols
+from . import transports
+from .log import logger
+
+
+def _create_transport_context(server_side, server_hostname):
+    if server_side:
+        raise ValueError('Server side SSL needs a valid SSLContext')
+
+    # Client side may pass ssl=True to use a default
+    # context; in that case the sslcontext passed is None.
+    # The default is secure for client connections.
+    # Python 3.4+: use up-to-date strong settings.
+    sslcontext = ssl.create_default_context()
+    if not server_hostname:
+        sslcontext.check_hostname = False
+    return sslcontext
+
+
+# States of an _SSLPipe.
+_UNWRAPPED = "UNWRAPPED"
+_DO_HANDSHAKE = "DO_HANDSHAKE"
+_WRAPPED = "WRAPPED"
+_SHUTDOWN = "SHUTDOWN"
+
+
+class _SSLPipe(object):
+    """An SSL "Pipe".
+
+    An SSL pipe allows you to communicate with an SSL/TLS protocol instance
+    through memory buffers. It can be used to implement a security layer for an
+    existing connection where you don't have access to the connection's file
+    descriptor, or for some reason you don't want to use it.
+
+    An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
+    data is passed through untransformed. In wrapped mode, application level
+    data is encrypted to SSL record level data and vice versa. The SSL record
+    level is the lowest level in the SSL protocol suite and is what travels
+    as-is over the wire.
+
+    An SslPipe initially is in "unwrapped" mode. To start SSL, call
+    do_handshake(). To shutdown SSL again, call unwrap().
+    """
+
+    max_size = 256 * 1024   # Buffer size passed to read()
+
+    def __init__(self, context, server_side, server_hostname=None):
+        """
+        The *context* argument specifies the ssl.SSLContext to use.
+
+        The *server_side* argument indicates whether this is a server side or
+        client side transport.
+
+        The optional *server_hostname* argument can be used to specify the
+        hostname you are connecting to. You may only specify this parameter if
+        the _ssl module supports Server Name Indication (SNI).
+        """
+        self._context = context
+        self._server_side = server_side
+        self._server_hostname = server_hostname
+        self._state = _UNWRAPPED
+        self._incoming = ssl.MemoryBIO()
+        self._outgoing = ssl.MemoryBIO()
+        self._sslobj = None
+        self._need_ssldata = False
+        self._handshake_cb = None
+        self._shutdown_cb = None
+
+    @property
+    def context(self):
+        """The SSL context passed to the constructor."""
+        return self._context
+
+    @property
+    def ssl_object(self):
+        """The internal ssl.SSLObject instance.
+
+        Return None if the pipe is not wrapped.
+        """
+        return self._sslobj
+
+    @property
+    def need_ssldata(self):
+        """Whether more record level data is needed to complete a handshake
+        that is currently in progress."""
+        return self._need_ssldata
+
+    @property
+    def wrapped(self):
+        """
+        Whether a security layer is currently in effect.
+
+        Return False during handshake.
+        """
+        return self._state == _WRAPPED
+
+    def do_handshake(self, callback=None):
+        """Start the SSL handshake.
+
+        Return a list of ssldata. A ssldata element is a list of buffers
+
+        The optional *callback* argument can be used to install a callback that
+        will be called when the handshake is complete. The callback will be
+        called with None if successful, else an exception instance.
+        """
+        if self._state != _UNWRAPPED:
+            raise RuntimeError('handshake in progress or completed')
+        self._sslobj = self._context.wrap_bio(
+            self._incoming, self._outgoing,
+            server_side=self._server_side,
+            server_hostname=self._server_hostname)
+        self._state = _DO_HANDSHAKE
+        self._handshake_cb = callback
+        ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
+        assert len(appdata) == 0
+        return ssldata
+
+    def shutdown(self, callback=None):
+        """Start the SSL shutdown sequence.
+
+        Return a list of ssldata. A ssldata element is a list of buffers
+
+        The optional *callback* argument can be used to install a callback that
+        will be called when the shutdown is complete. The callback will be
+        called without arguments.
+        """
+        if self._state == _UNWRAPPED:
+            raise RuntimeError('no security layer present')
+        if self._state == _SHUTDOWN:
+            raise RuntimeError('shutdown in progress')
+        assert self._state in (_WRAPPED, _DO_HANDSHAKE)
+        self._state = _SHUTDOWN
+        self._shutdown_cb = callback
+        ssldata, appdata = self.feed_ssldata(b'')
+        assert appdata == [] or appdata == [b'']
+        return ssldata
+
+    def feed_eof(self):
+        """Send a potentially "ragged" EOF.
+
+        This method will raise an SSL_ERROR_EOF exception if the EOF is
+        unexpected.
+        """
+        self._incoming.write_eof()
+        ssldata, appdata = self.feed_ssldata(b'')
+        assert appdata == [] or appdata == [b'']
+
+    def feed_ssldata(self, data, only_handshake=False):
+        """Feed SSL record level data into the pipe.
+
+        The data must be a bytes instance. It is OK to send an empty bytes
+        instance. This can be used to get ssldata for a handshake initiated by
+        this endpoint.
+
+        Return a (ssldata, appdata) tuple. The ssldata element is a list of
+        buffers containing SSL data that needs to be sent to the remote SSL.
+
+        The appdata element is a list of buffers containing plaintext data that
+        needs to be forwarded to the application. The appdata list may contain
+        an empty buffer indicating an SSL "close_notify" alert. This alert must
+        be acknowledged by calling shutdown().
+        """
+        if self._state == _UNWRAPPED:
+            # If unwrapped, pass plaintext data straight through.
+            if data:
+                appdata = [data]
+            else:
+                appdata = []
+            return ([], appdata)
+
+        self._need_ssldata = False
+        if data:
+            self._incoming.write(data)
+
+        ssldata = []
+        appdata = []
+        try:
+            if self._state == _DO_HANDSHAKE:
+                # Call do_handshake() until it doesn't raise anymore.
+                self._sslobj.do_handshake()
+                self._state = _WRAPPED
+                if self._handshake_cb:
+                    self._handshake_cb(None)
+                if only_handshake:
+                    return (ssldata, appdata)
+                # Handshake done: execute the wrapped block
+
+            if self._state == _WRAPPED:
+                # Main state: read data from SSL until close_notify
+                while True:
+                    chunk = self._sslobj.read(self.max_size)
+                    appdata.append(chunk)
+                    if not chunk:  # close_notify
+                        break
+
+            elif self._state == _SHUTDOWN:
+                # Call shutdown() until it doesn't raise anymore.
+                self._sslobj.unwrap()
+                self._sslobj = None
+                self._state = _UNWRAPPED
+                if self._shutdown_cb:
+                    self._shutdown_cb()
+
+            elif self._state == _UNWRAPPED:
+                # Drain possible plaintext data after close_notify.
+                appdata.append(self._incoming.read())
+        except (ssl.SSLError, ssl.CertificateError) as exc:
+            exc_errno = getattr(exc, 'errno', None)
+            if exc_errno not in (
+                    ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
+                    ssl.SSL_ERROR_SYSCALL):
+                if self._state == _DO_HANDSHAKE and self._handshake_cb:
+                    self._handshake_cb(exc)
+                raise
+            self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
+
+        # Check for record level data that needs to be sent back.
+        # Happens for the initial handshake and renegotiations.
+        if self._outgoing.pending:
+            ssldata.append(self._outgoing.read())
+        return (ssldata, appdata)
+
+    def feed_appdata(self, data, offset=0):
+        """Feed plaintext data into the pipe.
+
+        Return an (ssldata, offset) tuple. The ssldata element is a list of
+        buffers containing record level data that needs to be sent to the
+        remote SSL instance. The offset is the number of plaintext bytes that
+        were processed, which may be less than the length of data.
+
+        NOTE: In case of short writes, this call MUST be retried with the SAME
+        buffer passed into the *data* argument (i.e. the id() must be the
+        same). This is an OpenSSL requirement. A further particularity is that
+        a short write will always have offset == 0, because the _ssl module
+        does not enable partial writes. And even though the offset is zero,
+        there will still be encrypted data in ssldata.
+        """
+        assert 0 <= offset <= len(data)
+        if self._state == _UNWRAPPED:
+            # pass through data in unwrapped mode
+            if offset < len(data):
+                ssldata = [data[offset:]]
+            else:
+                ssldata = []
+            return (ssldata, len(data))
+
+        ssldata = []
+        view = memoryview(data)
+        while True:
+            self._need_ssldata = False
+            try:
+                if offset < len(view):
+                    offset += self._sslobj.write(view[offset:])
+            except ssl.SSLError as exc:
+                # It is not allowed to call write() after unwrap() until the
+                # close_notify is acknowledged. We return the condition to the
+                # caller as a short write.
+                exc_errno = getattr(exc, 'errno', None)
+                if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
+                    exc_errno = exc.errno = ssl.SSL_ERROR_WANT_READ
+                if exc_errno not in (ssl.SSL_ERROR_WANT_READ,
+                                     ssl.SSL_ERROR_WANT_WRITE,
+                                     ssl.SSL_ERROR_SYSCALL):
+                    raise
+                self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
+
+            # See if there's any record level data back for us.
+            if self._outgoing.pending:
+                ssldata.append(self._outgoing.read())
+            if offset == len(view) or self._need_ssldata:
+                break
+        return (ssldata, offset)
+
+
+class _SSLProtocolTransport(transports._FlowControlMixin,
+                            transports.Transport):
+
+    _sendfile_compatible = constants._SendfileMode.FALLBACK
+
+    def __init__(self, loop, ssl_protocol):
+        self._loop = loop
+        # SSLProtocol instance
+        self._ssl_protocol = ssl_protocol
+        self._closed = False
+
+    def get_extra_info(self, name, default=None):
+        """Get optional transport information."""
+        return self._ssl_protocol._get_extra_info(name, default)
+
+    def set_protocol(self, protocol):
+        self._ssl_protocol._set_app_protocol(protocol)
+
+    def get_protocol(self):
+        return self._ssl_protocol._app_protocol
+
+    def is_closing(self):
+        return self._closed
+
+    def close(self):
+        """Close the transport.
+
+        Buffered data will be flushed asynchronously.  No more data
+        will be received.  After all buffered data is flushed, the
+        protocol's connection_lost() method will (eventually) called
+        with None as its argument.
+        """
+        self._closed = True
+        self._ssl_protocol._start_shutdown()
+
+    def __del__(self, _warn=warnings.warn):
+        if not self._closed:
+            _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+            self.close()
+
+    def is_reading(self):
+        tr = self._ssl_protocol._transport
+        if tr is None:
+            raise RuntimeError('SSL transport has not been initialized yet')
+        return tr.is_reading()
+
+    def pause_reading(self):
+        """Pause the receiving end.
+
+        No data will be passed to the protocol's data_received()
+        method until resume_reading() is called.
+        """
+        self._ssl_protocol._transport.pause_reading()
+
+    def resume_reading(self):
+        """Resume the receiving end.
+
+        Data received will once again be passed to the protocol's
+        data_received() method.
+        """
+        self._ssl_protocol._transport.resume_reading()
+
+    def set_write_buffer_limits(self, high=None, low=None):
+        """Set the high- and low-water limits for write flow control.
+
+        These two values control when to call the protocol's
+        pause_writing() and resume_writing() methods.  If specified,
+        the low-water limit must be less than or equal to the
+        high-water limit.  Neither value can be negative.
+
+        The defaults are implementation-specific.  If only the
+        high-water limit is given, the low-water limit defaults to an
+        implementation-specific value less than or equal to the
+        high-water limit.  Setting high to zero forces low to zero as
+        well, and causes pause_writing() to be called whenever the
+        buffer becomes non-empty.  Setting low to zero causes
+        resume_writing() to be called only once the buffer is empty.
+        Use of zero for either limit is generally sub-optimal as it
+        reduces opportunities for doing I/O and computation
+        concurrently.
+        """
+        self._ssl_protocol._transport.set_write_buffer_limits(high, low)
+
+    def get_write_buffer_size(self):
+        """Return the current size of the write buffer."""
+        return self._ssl_protocol._transport.get_write_buffer_size()
+
+    @property
+    def _protocol_paused(self):
+        # Required for sendfile fallback pause_writing/resume_writing logic
+        return self._ssl_protocol._transport._protocol_paused
+
+    def write(self, data):
+        """Write some data bytes to the transport.
+
+        This does not block; it buffers the data and arranges for it
+        to be sent out asynchronously.
+        """
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError(f"data: expecting a bytes-like instance, "
+                            f"got {type(data).__name__}")
+        if not data:
+            return
+        self._ssl_protocol._write_appdata(data)
+
+    def can_write_eof(self):
+        """Return True if this transport supports write_eof(), False if not."""
+        return False
+
+    def abort(self):
+        """Close the transport immediately.
+
+        Buffered data will be lost.  No more data will be received.
+        The protocol's connection_lost() method will (eventually) be
+        called with None as its argument.
+        """
+        self._ssl_protocol._abort()
+        self._closed = True
+
+
+class SSLProtocol(protocols.Protocol):
+    """SSL protocol.
+
+    Implementation of SSL on top of a socket using incoming and outgoing
+    buffers which are ssl.MemoryBIO objects.
+    """
+
+    def __init__(self, loop, app_protocol, sslcontext, waiter,
+                 server_side=False, server_hostname=None,
+                 call_connection_made=True,
+                 ssl_handshake_timeout=None):
+        if ssl is None:
+            raise RuntimeError('stdlib ssl module not available')
+
+        if ssl_handshake_timeout is None:
+            ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT
+        elif ssl_handshake_timeout <= 0:
+            raise ValueError(
+                f"ssl_handshake_timeout should be a positive number, "
+                f"got {ssl_handshake_timeout}")
+
+        if not sslcontext:
+            sslcontext = _create_transport_context(
+                server_side, server_hostname)
+
+        self._server_side = server_side
+        if server_hostname and not server_side:
+            self._server_hostname = server_hostname
+        else:
+            self._server_hostname = None
+        self._sslcontext = sslcontext
+        # SSL-specific extra info. More info are set when the handshake
+        # completes.
+        self._extra = dict(sslcontext=sslcontext)
+
+        # App data write buffering
+        self._write_backlog = collections.deque()
+        self._write_buffer_size = 0
+
+        self._waiter = waiter
+        self._loop = loop
+        self._set_app_protocol(app_protocol)
+        self._app_transport = _SSLProtocolTransport(self._loop, self)
+        # _SSLPipe instance (None until the connection is made)
+        self._sslpipe = None
+        self._session_established = False
+        self._in_handshake = False
+        self._in_shutdown = False
+        # transport, ex: SelectorSocketTransport
+        self._transport = None
+        self._call_connection_made = call_connection_made
+        self._ssl_handshake_timeout = ssl_handshake_timeout
+
+    def _set_app_protocol(self, app_protocol):
+        self._app_protocol = app_protocol
+        self._app_protocol_is_buffer = \
+            isinstance(app_protocol, protocols.BufferedProtocol)
+
+    def _wakeup_waiter(self, exc=None):
+        if self._waiter is None:
+            return
+        if not self._waiter.cancelled():
+            if exc is not None:
+                self._waiter.set_exception(exc)
+            else:
+                self._waiter.set_result(None)
+        self._waiter = None
+
+    def connection_made(self, transport):
+        """Called when the low-level connection is made.
+
+        Start the SSL handshake.
+        """
+        self._transport = transport
+        self._sslpipe = _SSLPipe(self._sslcontext,
+                                 self._server_side,
+                                 self._server_hostname)
+        self._start_handshake()
+
+    def connection_lost(self, exc):
+        """Called when the low-level connection is lost or closed.
+
+        The argument is an exception object or None (the latter
+        meaning a regular EOF is received or the connection was
+        aborted or closed).
+        """
+        if self._session_established:
+            self._session_established = False
+            self._loop.call_soon(self._app_protocol.connection_lost, exc)
+        else:
+            # Most likely an exception occurred while in SSL handshake.
+            # Just mark the app transport as closed so that its __del__
+            # doesn't complain.
+            if self._app_transport is not None:
+                self._app_transport._closed = True
+        self._transport = None
+        self._app_transport = None
+        if getattr(self, '_handshake_timeout_handle', None):
+            self._handshake_timeout_handle.cancel()
+        self._wakeup_waiter(exc)
+        self._app_protocol = None
+        self._sslpipe = None
+
+    def pause_writing(self):
+        """Called when the low-level transport's buffer goes over
+        the high-water mark.
+        """
+        self._app_protocol.pause_writing()
+
+    def resume_writing(self):
+        """Called when the low-level transport's buffer drains below
+        the low-water mark.
+        """
+        self._app_protocol.resume_writing()
+
+    def data_received(self, data):
+        """Called when some SSL data is received.
+
+        The argument is a bytes object.
+        """
+        if self._sslpipe is None:
+            # transport closing, sslpipe is destroyed
+            return
+
+        try:
+            ssldata, appdata = self._sslpipe.feed_ssldata(data)
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as e:
+            self._fatal_error(e, 'SSL error in data received')
+            return
+
+        for chunk in ssldata:
+            self._transport.write(chunk)
+
+        for chunk in appdata:
+            if chunk:
+                try:
+                    if self._app_protocol_is_buffer:
+                        protocols._feed_data_to_buffered_proto(
+                            self._app_protocol, chunk)
+                    else:
+                        self._app_protocol.data_received(chunk)
+                except (SystemExit, KeyboardInterrupt):
+                    raise
+                except BaseException as ex:
+                    self._fatal_error(
+                        ex, 'application protocol failed to receive SSL data')
+                    return
+            else:
+                self._start_shutdown()
+                break
+
+    def eof_received(self):
+        """Called when the other end of the low-level stream
+        is half-closed.
+
+        If this returns a false value (including None), the transport
+        will close itself.  If it returns a true value, closing the
+        transport is up to the protocol.
+        """
+        try:
+            if self._loop.get_debug():
+                logger.debug("%r received EOF", self)
+
+            self._wakeup_waiter(ConnectionResetError)
+
+            if not self._in_handshake:
+                keep_open = self._app_protocol.eof_received()
+                if keep_open:
+                    logger.warning('returning true from eof_received() '
+                                   'has no effect when using ssl')
+        finally:
+            self._transport.close()
+
+    def _get_extra_info(self, name, default=None):
+        if name in self._extra:
+            return self._extra[name]
+        elif self._transport is not None:
+            return self._transport.get_extra_info(name, default)
+        else:
+            return default
+
+    def _start_shutdown(self):
+        if self._in_shutdown:
+            return
+        if self._in_handshake:
+            self._abort()
+        else:
+            self._in_shutdown = True
+            self._write_appdata(b'')
+
+    def _write_appdata(self, data):
+        self._write_backlog.append((data, 0))
+        self._write_buffer_size += len(data)
+        self._process_write_backlog()
+
+    def _start_handshake(self):
+        if self._loop.get_debug():
+            logger.debug("%r starts SSL handshake", self)
+            self._handshake_start_time = self._loop.time()
+        else:
+            self._handshake_start_time = None
+        self._in_handshake = True
+        # (b'', 1) is a special value in _process_write_backlog() to do
+        # the SSL handshake
+        self._write_backlog.append((b'', 1))
+        self._handshake_timeout_handle = \
+            self._loop.call_later(self._ssl_handshake_timeout,
+                                  self._check_handshake_timeout)
+        self._process_write_backlog()
+
+    def _check_handshake_timeout(self):
+        if self._in_handshake is True:
+            msg = (
+                f"SSL handshake is taking longer than "
+                f"{self._ssl_handshake_timeout} seconds: "
+                f"aborting the connection"
+            )
+            self._fatal_error(ConnectionAbortedError(msg))
+
+    def _on_handshake_complete(self, handshake_exc):
+        self._in_handshake = False
+        self._handshake_timeout_handle.cancel()
+
+        sslobj = self._sslpipe.ssl_object
+        try:
+            if handshake_exc is not None:
+                raise handshake_exc
+
+            peercert = sslobj.getpeercert()
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            if isinstance(exc, ssl.CertificateError):
+                msg = 'SSL handshake failed on verifying the certificate'
+            else:
+                msg = 'SSL handshake failed'
+            self._fatal_error(exc, msg)
+            return
+
+        if self._loop.get_debug():
+            dt = self._loop.time() - self._handshake_start_time
+            logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
+
+        # Add extra info that becomes available after handshake.
+        self._extra.update(peercert=peercert,
+                           cipher=sslobj.cipher(),
+                           compression=sslobj.compression(),
+                           ssl_object=sslobj,
+                           )
+        if self._call_connection_made:
+            self._app_protocol.connection_made(self._app_transport)
+        self._wakeup_waiter()
+        self._session_established = True
+        # In case transport.write() was already called. Don't call
+        # immediately _process_write_backlog(), but schedule it:
+        # _on_handshake_complete() can be called indirectly from
+        # _process_write_backlog(), and _process_write_backlog() is not
+        # reentrant.
+        self._loop.call_soon(self._process_write_backlog)
+
+    def _process_write_backlog(self):
+        # Try to make progress on the write backlog.
+        if self._transport is None or self._sslpipe is None:
+            return
+
+        try:
+            for i in range(len(self._write_backlog)):
+                data, offset = self._write_backlog[0]
+                if data:
+                    ssldata, offset = self._sslpipe.feed_appdata(data, offset)
+                elif offset:
+                    ssldata = self._sslpipe.do_handshake(
+                        self._on_handshake_complete)
+                    offset = 1
+                else:
+                    ssldata = self._sslpipe.shutdown(self._finalize)
+                    offset = 1
+
+                for chunk in ssldata:
+                    self._transport.write(chunk)
+
+                if offset < len(data):
+                    self._write_backlog[0] = (data, offset)
+                    # A short write means that a write is blocked on a read
+                    # We need to enable reading if it is paused!
+                    assert self._sslpipe.need_ssldata
+                    if self._transport._paused:
+                        self._transport.resume_reading()
+                    break
+
+                # An entire chunk from the backlog was processed. We can
+                # delete it and reduce the outstanding buffer size.
+                del self._write_backlog[0]
+                self._write_buffer_size -= len(data)
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            if self._in_handshake:
+                # Exceptions will be re-raised in _on_handshake_complete.
+                self._on_handshake_complete(exc)
+            else:
+                self._fatal_error(exc, 'Fatal error on SSL transport')
+
+    def _fatal_error(self, exc, message='Fatal error on transport'):
+        if isinstance(exc, OSError):
+            if self._loop.get_debug():
+                logger.debug("%r: %s", self, message, exc_info=True)
+        else:
+            self._loop.call_exception_handler({
+                'message': message,
+                'exception': exc,
+                'transport': self._transport,
+                'protocol': self,
+            })
+        if self._transport:
+            self._transport._force_close(exc)
+
+    def _finalize(self):
+        self._sslpipe = None
+
+        if self._transport is not None:
+            self._transport.close()
+
+    def _abort(self):
+        try:
+            if self._transport is not None:
+                self._transport.abort()
+        finally:
+            self._finalize()
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/staggered.py b/linux-x64/clang/python3/lib/python3.9/asyncio/staggered.py
new file mode 100644
index 0000000..451a53a
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/staggered.py
@@ -0,0 +1,149 @@
+"""Support for running coroutines in parallel with staggered start times."""
+
+__all__ = 'staggered_race',
+
+import contextlib
+import typing
+
+from . import events
+from . import exceptions as exceptions_mod
+from . import locks
+from . import tasks
+
+
+async def staggered_race(
+        coro_fns: typing.Iterable[typing.Callable[[], typing.Awaitable]],
+        delay: typing.Optional[float],
+        *,
+        loop: events.AbstractEventLoop = None,
+) -> typing.Tuple[
+    typing.Any,
+    typing.Optional[int],
+    typing.List[typing.Optional[Exception]]
+]:
+    """Run coroutines with staggered start times and take the first to finish.
+
+    This method takes an iterable of coroutine functions. The first one is
+    started immediately. From then on, whenever the immediately preceding one
+    fails (raises an exception), or when *delay* seconds has passed, the next
+    coroutine is started. This continues until one of the coroutines complete
+    successfully, in which case all others are cancelled, or until all
+    coroutines fail.
+
+    The coroutines provided should be well-behaved in the following way:
+
+    * They should only ``return`` if completed successfully.
+
+    * They should always raise an exception if they did not complete
+      successfully. In particular, if they handle cancellation, they should
+      probably reraise, like this::
+
+        try:
+            # do work
+        except asyncio.CancelledError:
+            # undo partially completed work
+            raise
+
+    Args:
+        coro_fns: an iterable of coroutine functions, i.e. callables that
+            return a coroutine object when called. Use ``functools.partial`` or
+            lambdas to pass arguments.
+
+        delay: amount of time, in seconds, between starting coroutines. If
+            ``None``, the coroutines will run sequentially.
+
+        loop: the event loop to use.
+
+    Returns:
+        tuple *(winner_result, winner_index, exceptions)* where
+
+        - *winner_result*: the result of the winning coroutine, or ``None``
+          if no coroutines won.
+
+        - *winner_index*: the index of the winning coroutine in
+          ``coro_fns``, or ``None`` if no coroutines won. If the winning
+          coroutine may return None on success, *winner_index* can be used
+          to definitively determine whether any coroutine won.
+
+        - *exceptions*: list of exceptions returned by the coroutines.
+          ``len(exceptions)`` is equal to the number of coroutines actually
+          started, and the order is the same as in ``coro_fns``. The winning
+          coroutine's entry is ``None``.
+
+    """
+    # TODO: when we have aiter() and anext(), allow async iterables in coro_fns.
+    loop = loop or events.get_running_loop()
+    enum_coro_fns = enumerate(coro_fns)
+    winner_result = None
+    winner_index = None
+    exceptions = []
+    running_tasks = []
+
+    async def run_one_coro(
+            previous_failed: typing.Optional[locks.Event]) -> None:
+        # Wait for the previous task to finish, or for delay seconds
+        if previous_failed is not None:
+            with contextlib.suppress(exceptions_mod.TimeoutError):
+                # Use asyncio.wait_for() instead of asyncio.wait() here, so
+                # that if we get cancelled at this point, Event.wait() is also
+                # cancelled, otherwise there will be a "Task destroyed but it is
+                # pending" later.
+                await tasks.wait_for(previous_failed.wait(), delay)
+        # Get the next coroutine to run
+        try:
+            this_index, coro_fn = next(enum_coro_fns)
+        except StopIteration:
+            return
+        # Start task that will run the next coroutine
+        this_failed = locks.Event()
+        next_task = loop.create_task(run_one_coro(this_failed))
+        running_tasks.append(next_task)
+        assert len(running_tasks) == this_index + 2
+        # Prepare place to put this coroutine's exceptions if not won
+        exceptions.append(None)
+        assert len(exceptions) == this_index + 1
+
+        try:
+            result = await coro_fn()
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as e:
+            exceptions[this_index] = e
+            this_failed.set()  # Kickstart the next coroutine
+        else:
+            # Store winner's results
+            nonlocal winner_index, winner_result
+            assert winner_index is None
+            winner_index = this_index
+            winner_result = result
+            # Cancel all other tasks. We take care to not cancel the current
+            # task as well. If we do so, then since there is no `await` after
+            # here and CancelledError are usually thrown at one, we will
+            # encounter a curious corner case where the current task will end
+            # up as done() == True, cancelled() == False, exception() ==
+            # asyncio.CancelledError. This behavior is specified in
+            # https://bugs.python.org/issue30048
+            for i, t in enumerate(running_tasks):
+                if i != this_index:
+                    t.cancel()
+
+    first_task = loop.create_task(run_one_coro(None))
+    running_tasks.append(first_task)
+    try:
+        # Wait for a growing list of tasks to all finish: poor man's version of
+        # curio's TaskGroup or trio's nursery
+        done_count = 0
+        while done_count != len(running_tasks):
+            done, _ = await tasks.wait(running_tasks)
+            done_count = len(done)
+            # If run_one_coro raises an unhandled exception, it's probably a
+            # programming error, and I want to see it.
+            if __debug__:
+                for d in done:
+                    if d.done() and not d.cancelled() and d.exception():
+                        raise d.exception()
+        return winner_result, winner_index, exceptions
+    finally:
+        # Make sure no tasks are left running if we leave this function
+        for t in running_tasks:
+            t.cancel()
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/streams.py b/linux-x64/clang/python3/lib/python3.9/asyncio/streams.py
new file mode 100644
index 0000000..3c80bb8
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/streams.py
@@ -0,0 +1,741 @@
+__all__ = (
+    'StreamReader', 'StreamWriter', 'StreamReaderProtocol',
+    'open_connection', 'start_server')
+
+import socket
+import sys
+import warnings
+import weakref
+
+if hasattr(socket, 'AF_UNIX'):
+    __all__ += ('open_unix_connection', 'start_unix_server')
+
+from . import coroutines
+from . import events
+from . import exceptions
+from . import format_helpers
+from . import protocols
+from .log import logger
+from .tasks import sleep
+
+
+_DEFAULT_LIMIT = 2 ** 16  # 64 KiB
+
+
+async def open_connection(host=None, port=None, *,
+                          loop=None, limit=_DEFAULT_LIMIT, **kwds):
+    """A wrapper for create_connection() returning a (reader, writer) pair.
+
+    The reader returned is a StreamReader instance; the writer is a
+    StreamWriter instance.
+
+    The arguments are all the usual arguments to create_connection()
+    except protocol_factory; most common are positional host and port,
+    with various optional keyword arguments following.
+
+    Additional optional keyword arguments are loop (to set the event loop
+    instance to use) and limit (to set the buffer limit passed to the
+    StreamReader).
+
+    (If you want to customize the StreamReader and/or
+    StreamReaderProtocol classes, just copy the code -- there's
+    really nothing special here except some convenience.)
+    """
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
+    reader = StreamReader(limit=limit, loop=loop)
+    protocol = StreamReaderProtocol(reader, loop=loop)
+    transport, _ = await loop.create_connection(
+        lambda: protocol, host, port, **kwds)
+    writer = StreamWriter(transport, protocol, reader, loop)
+    return reader, writer
+
+
+async def start_server(client_connected_cb, host=None, port=None, *,
+                       loop=None, limit=_DEFAULT_LIMIT, **kwds):
+    """Start a socket server, call back for each client connected.
+
+    The first parameter, `client_connected_cb`, takes two parameters:
+    client_reader, client_writer.  client_reader is a StreamReader
+    object, while client_writer is a StreamWriter object.  This
+    parameter can either be a plain callback function or a coroutine;
+    if it is a coroutine, it will be automatically converted into a
+    Task.
+
+    The rest of the arguments are all the usual arguments to
+    loop.create_server() except protocol_factory; most common are
+    positional host and port, with various optional keyword arguments
+    following.  The return value is the same as loop.create_server().
+
+    Additional optional keyword arguments are loop (to set the event loop
+    instance to use) and limit (to set the buffer limit passed to the
+    StreamReader).
+
+    The return value is the same as loop.create_server(), i.e. a
+    Server object which can be used to stop the service.
+    """
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
+
+    def factory():
+        reader = StreamReader(limit=limit, loop=loop)
+        protocol = StreamReaderProtocol(reader, client_connected_cb,
+                                        loop=loop)
+        return protocol
+
+    return await loop.create_server(factory, host, port, **kwds)
+
+
+if hasattr(socket, 'AF_UNIX'):
+    # UNIX Domain Sockets are supported on this platform
+
+    async def open_unix_connection(path=None, *,
+                                   loop=None, limit=_DEFAULT_LIMIT, **kwds):
+        """Similar to `open_connection` but works with UNIX Domain Sockets."""
+        if loop is None:
+            loop = events.get_event_loop()
+        else:
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+        reader = StreamReader(limit=limit, loop=loop)
+        protocol = StreamReaderProtocol(reader, loop=loop)
+        transport, _ = await loop.create_unix_connection(
+            lambda: protocol, path, **kwds)
+        writer = StreamWriter(transport, protocol, reader, loop)
+        return reader, writer
+
+    async def start_unix_server(client_connected_cb, path=None, *,
+                                loop=None, limit=_DEFAULT_LIMIT, **kwds):
+        """Similar to `start_server` but works with UNIX Domain Sockets."""
+        if loop is None:
+            loop = events.get_event_loop()
+        else:
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+
+        def factory():
+            reader = StreamReader(limit=limit, loop=loop)
+            protocol = StreamReaderProtocol(reader, client_connected_cb,
+                                            loop=loop)
+            return protocol
+
+        return await loop.create_unix_server(factory, path, **kwds)
+
+
+class FlowControlMixin(protocols.Protocol):
+    """Reusable flow control logic for StreamWriter.drain().
+
+    This implements the protocol methods pause_writing(),
+    resume_writing() and connection_lost().  If the subclass overrides
+    these it must call the super methods.
+
+    StreamWriter.drain() must wait for _drain_helper() coroutine.
+    """
+
+    def __init__(self, loop=None):
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+        self._paused = False
+        self._drain_waiter = None
+        self._connection_lost = False
+
+    def pause_writing(self):
+        assert not self._paused
+        self._paused = True
+        if self._loop.get_debug():
+            logger.debug("%r pauses writing", self)
+
+    def resume_writing(self):
+        assert self._paused
+        self._paused = False
+        if self._loop.get_debug():
+            logger.debug("%r resumes writing", self)
+
+        waiter = self._drain_waiter
+        if waiter is not None:
+            self._drain_waiter = None
+            if not waiter.done():
+                waiter.set_result(None)
+
+    def connection_lost(self, exc):
+        self._connection_lost = True
+        # Wake up the writer if currently paused.
+        if not self._paused:
+            return
+        waiter = self._drain_waiter
+        if waiter is None:
+            return
+        self._drain_waiter = None
+        if waiter.done():
+            return
+        if exc is None:
+            waiter.set_result(None)
+        else:
+            waiter.set_exception(exc)
+
+    async def _drain_helper(self):
+        if self._connection_lost:
+            raise ConnectionResetError('Connection lost')
+        if not self._paused:
+            return
+        waiter = self._drain_waiter
+        assert waiter is None or waiter.cancelled()
+        waiter = self._loop.create_future()
+        self._drain_waiter = waiter
+        await waiter
+
+    def _get_close_waiter(self, stream):
+        raise NotImplementedError
+
+
+class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
+    """Helper class to adapt between Protocol and StreamReader.
+
+    (This is a helper class instead of making StreamReader itself a
+    Protocol subclass, because the StreamReader has other potential
+    uses, and to prevent the user of the StreamReader to accidentally
+    call inappropriate methods of the protocol.)
+    """
+
+    _source_traceback = None
+
+    def __init__(self, stream_reader, client_connected_cb=None, loop=None):
+        super().__init__(loop=loop)
+        if stream_reader is not None:
+            self._stream_reader_wr = weakref.ref(stream_reader)
+            self._source_traceback = stream_reader._source_traceback
+        else:
+            self._stream_reader_wr = None
+        if client_connected_cb is not None:
+            # This is a stream created by the `create_server()` function.
+            # Keep a strong reference to the reader until a connection
+            # is established.
+            self._strong_reader = stream_reader
+        self._reject_connection = False
+        self._stream_writer = None
+        self._transport = None
+        self._client_connected_cb = client_connected_cb
+        self._over_ssl = False
+        self._closed = self._loop.create_future()
+
+    @property
+    def _stream_reader(self):
+        if self._stream_reader_wr is None:
+            return None
+        return self._stream_reader_wr()
+
+    def connection_made(self, transport):
+        if self._reject_connection:
+            context = {
+                'message': ('An open stream was garbage collected prior to '
+                            'establishing network connection; '
+                            'call "stream.close()" explicitly.')
+            }
+            if self._source_traceback:
+                context['source_traceback'] = self._source_traceback
+            self._loop.call_exception_handler(context)
+            transport.abort()
+            return
+        self._transport = transport
+        reader = self._stream_reader
+        if reader is not None:
+            reader.set_transport(transport)
+        self._over_ssl = transport.get_extra_info('sslcontext') is not None
+        if self._client_connected_cb is not None:
+            self._stream_writer = StreamWriter(transport, self,
+                                               reader,
+                                               self._loop)
+            res = self._client_connected_cb(reader,
+                                            self._stream_writer)
+            if coroutines.iscoroutine(res):
+                self._loop.create_task(res)
+            self._strong_reader = None
+
+    def connection_lost(self, exc):
+        reader = self._stream_reader
+        if reader is not None:
+            if exc is None:
+                reader.feed_eof()
+            else:
+                reader.set_exception(exc)
+        if not self._closed.done():
+            if exc is None:
+                self._closed.set_result(None)
+            else:
+                self._closed.set_exception(exc)
+        super().connection_lost(exc)
+        self._stream_reader_wr = None
+        self._stream_writer = None
+        self._transport = None
+
+    def data_received(self, data):
+        reader = self._stream_reader
+        if reader is not None:
+            reader.feed_data(data)
+
+    def eof_received(self):
+        reader = self._stream_reader
+        if reader is not None:
+            reader.feed_eof()
+        if self._over_ssl:
+            # Prevent a warning in SSLProtocol.eof_received:
+            # "returning true from eof_received()
+            # has no effect when using ssl"
+            return False
+        return True
+
+    def _get_close_waiter(self, stream):
+        return self._closed
+
+    def __del__(self):
+        # Prevent reports about unhandled exceptions.
+        # Better than self._closed._log_traceback = False hack
+        closed = self._closed
+        if closed.done() and not closed.cancelled():
+            closed.exception()
+
+
+class StreamWriter:
+    """Wraps a Transport.
+
+    This exposes write(), writelines(), [can_]write_eof(),
+    get_extra_info() and close().  It adds drain() which returns an
+    optional Future on which you can wait for flow control.  It also
+    adds a transport property which references the Transport
+    directly.
+    """
+
+    def __init__(self, transport, protocol, reader, loop):
+        self._transport = transport
+        self._protocol = protocol
+        # drain() expects that the reader has an exception() method
+        assert reader is None or isinstance(reader, StreamReader)
+        self._reader = reader
+        self._loop = loop
+        self._complete_fut = self._loop.create_future()
+        self._complete_fut.set_result(None)
+
+    def __repr__(self):
+        info = [self.__class__.__name__, f'transport={self._transport!r}']
+        if self._reader is not None:
+            info.append(f'reader={self._reader!r}')
+        return '<{}>'.format(' '.join(info))
+
+    @property
+    def transport(self):
+        return self._transport
+
+    def write(self, data):
+        self._transport.write(data)
+
+    def writelines(self, data):
+        self._transport.writelines(data)
+
+    def write_eof(self):
+        return self._transport.write_eof()
+
+    def can_write_eof(self):
+        return self._transport.can_write_eof()
+
+    def close(self):
+        return self._transport.close()
+
+    def is_closing(self):
+        return self._transport.is_closing()
+
+    async def wait_closed(self):
+        await self._protocol._get_close_waiter(self)
+
+    def get_extra_info(self, name, default=None):
+        return self._transport.get_extra_info(name, default)
+
+    async def drain(self):
+        """Flush the write buffer.
+
+        The intended use is to write
+
+          w.write(data)
+          await w.drain()
+        """
+        if self._reader is not None:
+            exc = self._reader.exception()
+            if exc is not None:
+                raise exc
+        if self._transport.is_closing():
+            # Wait for protocol.connection_lost() call
+            # Raise connection closing error if any,
+            # ConnectionResetError otherwise
+            # Yield to the event loop so connection_lost() may be
+            # called.  Without this, _drain_helper() would return
+            # immediately, and code that calls
+            #     write(...); await drain()
+            # in a loop would never call connection_lost(), so it
+            # would not see an error when the socket is closed.
+            await sleep(0)
+        await self._protocol._drain_helper()
+
+
+class StreamReader:
+
+    _source_traceback = None
+
+    def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
+        # The line length limit is  a security feature;
+        # it also doubles as half the buffer limit.
+
+        if limit <= 0:
+            raise ValueError('Limit cannot be <= 0')
+
+        self._limit = limit
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+        self._buffer = bytearray()
+        self._eof = False    # Whether we're done.
+        self._waiter = None  # A future used by _wait_for_data()
+        self._exception = None
+        self._transport = None
+        self._paused = False
+        if self._loop.get_debug():
+            self._source_traceback = format_helpers.extract_stack(
+                sys._getframe(1))
+
+    def __repr__(self):
+        info = ['StreamReader']
+        if self._buffer:
+            info.append(f'{len(self._buffer)} bytes')
+        if self._eof:
+            info.append('eof')
+        if self._limit != _DEFAULT_LIMIT:
+            info.append(f'limit={self._limit}')
+        if self._waiter:
+            info.append(f'waiter={self._waiter!r}')
+        if self._exception:
+            info.append(f'exception={self._exception!r}')
+        if self._transport:
+            info.append(f'transport={self._transport!r}')
+        if self._paused:
+            info.append('paused')
+        return '<{}>'.format(' '.join(info))
+
+    def exception(self):
+        return self._exception
+
+    def set_exception(self, exc):
+        self._exception = exc
+
+        waiter = self._waiter
+        if waiter is not None:
+            self._waiter = None
+            if not waiter.cancelled():
+                waiter.set_exception(exc)
+
+    def _wakeup_waiter(self):
+        """Wakeup read*() functions waiting for data or EOF."""
+        waiter = self._waiter
+        if waiter is not None:
+            self._waiter = None
+            if not waiter.cancelled():
+                waiter.set_result(None)
+
+    def set_transport(self, transport):
+        assert self._transport is None, 'Transport already set'
+        self._transport = transport
+
+    def _maybe_resume_transport(self):
+        if self._paused and len(self._buffer) <= self._limit:
+            self._paused = False
+            self._transport.resume_reading()
+
+    def feed_eof(self):
+        self._eof = True
+        self._wakeup_waiter()
+
+    def at_eof(self):
+        """Return True if the buffer is empty and 'feed_eof' was called."""
+        return self._eof and not self._buffer
+
+    def feed_data(self, data):
+        assert not self._eof, 'feed_data after feed_eof'
+
+        if not data:
+            return
+
+        self._buffer.extend(data)
+        self._wakeup_waiter()
+
+        if (self._transport is not None and
+                not self._paused and
+                len(self._buffer) > 2 * self._limit):
+            try:
+                self._transport.pause_reading()
+            except NotImplementedError:
+                # The transport can't be paused.
+                # We'll just have to buffer all data.
+                # Forget the transport so we don't keep trying.
+                self._transport = None
+            else:
+                self._paused = True
+
+    async def _wait_for_data(self, func_name):
+        """Wait until feed_data() or feed_eof() is called.
+
+        If stream was paused, automatically resume it.
+        """
+        # StreamReader uses a future to link the protocol feed_data() method
+        # to a read coroutine. Running two read coroutines at the same time
+        # would have an unexpected behaviour. It would not possible to know
+        # which coroutine would get the next data.
+        if self._waiter is not None:
+            raise RuntimeError(
+                f'{func_name}() called while another coroutine is '
+                f'already waiting for incoming data')
+
+        assert not self._eof, '_wait_for_data after EOF'
+
+        # Waiting for data while paused will make deadlock, so prevent it.
+        # This is essential for readexactly(n) for case when n > self._limit.
+        if self._paused:
+            self._paused = False
+            self._transport.resume_reading()
+
+        self._waiter = self._loop.create_future()
+        try:
+            await self._waiter
+        finally:
+            self._waiter = None
+
+    async def readline(self):
+        """Read chunk of data from the stream until newline (b'\n') is found.
+
+        On success, return chunk that ends with newline. If only partial
+        line can be read due to EOF, return incomplete line without
+        terminating newline. When EOF was reached while no bytes read, empty
+        bytes object is returned.
+
+        If limit is reached, ValueError will be raised. In that case, if
+        newline was found, complete line including newline will be removed
+        from internal buffer. Else, internal buffer will be cleared. Limit is
+        compared against part of the line without newline.
+
+        If stream was paused, this function will automatically resume it if
+        needed.
+        """
+        sep = b'\n'
+        seplen = len(sep)
+        try:
+            line = await self.readuntil(sep)
+        except exceptions.IncompleteReadError as e:
+            return e.partial
+        except exceptions.LimitOverrunError as e:
+            if self._buffer.startswith(sep, e.consumed):
+                del self._buffer[:e.consumed + seplen]
+            else:
+                self._buffer.clear()
+            self._maybe_resume_transport()
+            raise ValueError(e.args[0])
+        return line
+
+    async def readuntil(self, separator=b'\n'):
+        """Read data from the stream until ``separator`` is found.
+
+        On success, the data and separator will be removed from the
+        internal buffer (consumed). Returned data will include the
+        separator at the end.
+
+        Configured stream limit is used to check result. Limit sets the
+        maximal length of data that can be returned, not counting the
+        separator.
+
+        If an EOF occurs and the complete separator is still not found,
+        an IncompleteReadError exception will be raised, and the internal
+        buffer will be reset.  The IncompleteReadError.partial attribute
+        may contain the separator partially.
+
+        If the data cannot be read because of over limit, a
+        LimitOverrunError exception  will be raised, and the data
+        will be left in the internal buffer, so it can be read again.
+        """
+        seplen = len(separator)
+        if seplen == 0:
+            raise ValueError('Separator should be at least one-byte string')
+
+        if self._exception is not None:
+            raise self._exception
+
+        # Consume whole buffer except last bytes, which length is
+        # one less than seplen. Let's check corner cases with
+        # separator='SEPARATOR':
+        # * we have received almost complete separator (without last
+        #   byte). i.e buffer='some textSEPARATO'. In this case we
+        #   can safely consume len(separator) - 1 bytes.
+        # * last byte of buffer is first byte of separator, i.e.
+        #   buffer='abcdefghijklmnopqrS'. We may safely consume
+        #   everything except that last byte, but this require to
+        #   analyze bytes of buffer that match partial separator.
+        #   This is slow and/or require FSM. For this case our
+        #   implementation is not optimal, since require rescanning
+        #   of data that is known to not belong to separator. In
+        #   real world, separator will not be so long to notice
+        #   performance problems. Even when reading MIME-encoded
+        #   messages :)
+
+        # `offset` is the number of bytes from the beginning of the buffer
+        # where there is no occurrence of `separator`.
+        offset = 0
+
+        # Loop until we find `separator` in the buffer, exceed the buffer size,
+        # or an EOF has happened.
+        while True:
+            buflen = len(self._buffer)
+
+            # Check if we now have enough data in the buffer for `separator` to
+            # fit.
+            if buflen - offset >= seplen:
+                isep = self._buffer.find(separator, offset)
+
+                if isep != -1:
+                    # `separator` is in the buffer. `isep` will be used later
+                    # to retrieve the data.
+                    break
+
+                # see upper comment for explanation.
+                offset = buflen + 1 - seplen
+                if offset > self._limit:
+                    raise exceptions.LimitOverrunError(
+                        'Separator is not found, and chunk exceed the limit',
+                        offset)
+
+            # Complete message (with full separator) may be present in buffer
+            # even when EOF flag is set. This may happen when the last chunk
+            # adds data which makes separator be found. That's why we check for
+            # EOF *ater* inspecting the buffer.
+            if self._eof:
+                chunk = bytes(self._buffer)
+                self._buffer.clear()
+                raise exceptions.IncompleteReadError(chunk, None)
+
+            # _wait_for_data() will resume reading if stream was paused.
+            await self._wait_for_data('readuntil')
+
+        if isep > self._limit:
+            raise exceptions.LimitOverrunError(
+                'Separator is found, but chunk is longer than limit', isep)
+
+        chunk = self._buffer[:isep + seplen]
+        del self._buffer[:isep + seplen]
+        self._maybe_resume_transport()
+        return bytes(chunk)
+
+    async def read(self, n=-1):
+        """Read up to `n` bytes from the stream.
+
+        If n is not provided, or set to -1, read until EOF and return all read
+        bytes. If the EOF was received and the internal buffer is empty, return
+        an empty bytes object.
+
+        If n is zero, return empty bytes object immediately.
+
+        If n is positive, this function try to read `n` bytes, and may return
+        less or equal bytes than requested, but at least one byte. If EOF was
+        received before any byte is read, this function returns empty byte
+        object.
+
+        Returned value is not limited with limit, configured at stream
+        creation.
+
+        If stream was paused, this function will automatically resume it if
+        needed.
+        """
+
+        if self._exception is not None:
+            raise self._exception
+
+        if n == 0:
+            return b''
+
+        if n < 0:
+            # This used to just loop creating a new waiter hoping to
+            # collect everything in self._buffer, but that would
+            # deadlock if the subprocess sends more than self.limit
+            # bytes.  So just call self.read(self._limit) until EOF.
+            blocks = []
+            while True:
+                block = await self.read(self._limit)
+                if not block:
+                    break
+                blocks.append(block)
+            return b''.join(blocks)
+
+        if not self._buffer and not self._eof:
+            await self._wait_for_data('read')
+
+        # This will work right even if buffer is less than n bytes
+        data = bytes(self._buffer[:n])
+        del self._buffer[:n]
+
+        self._maybe_resume_transport()
+        return data
+
+    async def readexactly(self, n):
+        """Read exactly `n` bytes.
+
+        Raise an IncompleteReadError if EOF is reached before `n` bytes can be
+        read. The IncompleteReadError.partial attribute of the exception will
+        contain the partial read bytes.
+
+        if n is zero, return empty bytes object.
+
+        Returned value is not limited with limit, configured at stream
+        creation.
+
+        If stream was paused, this function will automatically resume it if
+        needed.
+        """
+        if n < 0:
+            raise ValueError('readexactly size can not be less than zero')
+
+        if self._exception is not None:
+            raise self._exception
+
+        if n == 0:
+            return b''
+
+        while len(self._buffer) < n:
+            if self._eof:
+                incomplete = bytes(self._buffer)
+                self._buffer.clear()
+                raise exceptions.IncompleteReadError(incomplete, n)
+
+            await self._wait_for_data('readexactly')
+
+        if len(self._buffer) == n:
+            data = bytes(self._buffer)
+            self._buffer.clear()
+        else:
+            data = bytes(self._buffer[:n])
+            del self._buffer[:n]
+        self._maybe_resume_transport()
+        return data
+
+    def __aiter__(self):
+        return self
+
+    async def __anext__(self):
+        val = await self.readline()
+        if val == b'':
+            raise StopAsyncIteration
+        return val
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/subprocess.py b/linux-x64/clang/python3/lib/python3.9/asyncio/subprocess.py
new file mode 100644
index 0000000..c9506b1
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/subprocess.py
@@ -0,0 +1,241 @@
+__all__ = 'create_subprocess_exec', 'create_subprocess_shell'
+
+import subprocess
+import warnings
+
+from . import events
+from . import protocols
+from . import streams
+from . import tasks
+from .log import logger
+
+
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+DEVNULL = subprocess.DEVNULL
+
+
+class SubprocessStreamProtocol(streams.FlowControlMixin,
+                               protocols.SubprocessProtocol):
+    """Like StreamReaderProtocol, but for a subprocess."""
+
+    def __init__(self, limit, loop):
+        super().__init__(loop=loop)
+        self._limit = limit
+        self.stdin = self.stdout = self.stderr = None
+        self._transport = None
+        self._process_exited = False
+        self._pipe_fds = []
+        self._stdin_closed = self._loop.create_future()
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self.stdin is not None:
+            info.append(f'stdin={self.stdin!r}')
+        if self.stdout is not None:
+            info.append(f'stdout={self.stdout!r}')
+        if self.stderr is not None:
+            info.append(f'stderr={self.stderr!r}')
+        return '<{}>'.format(' '.join(info))
+
+    def connection_made(self, transport):
+        self._transport = transport
+
+        stdout_transport = transport.get_pipe_transport(1)
+        if stdout_transport is not None:
+            self.stdout = streams.StreamReader(limit=self._limit,
+                                               loop=self._loop)
+            self.stdout.set_transport(stdout_transport)
+            self._pipe_fds.append(1)
+
+        stderr_transport = transport.get_pipe_transport(2)
+        if stderr_transport is not None:
+            self.stderr = streams.StreamReader(limit=self._limit,
+                                               loop=self._loop)
+            self.stderr.set_transport(stderr_transport)
+            self._pipe_fds.append(2)
+
+        stdin_transport = transport.get_pipe_transport(0)
+        if stdin_transport is not None:
+            self.stdin = streams.StreamWriter(stdin_transport,
+                                              protocol=self,
+                                              reader=None,
+                                              loop=self._loop)
+
+    def pipe_data_received(self, fd, data):
+        if fd == 1:
+            reader = self.stdout
+        elif fd == 2:
+            reader = self.stderr
+        else:
+            reader = None
+        if reader is not None:
+            reader.feed_data(data)
+
+    def pipe_connection_lost(self, fd, exc):
+        if fd == 0:
+            pipe = self.stdin
+            if pipe is not None:
+                pipe.close()
+            self.connection_lost(exc)
+            if exc is None:
+                self._stdin_closed.set_result(None)
+            else:
+                self._stdin_closed.set_exception(exc)
+            return
+        if fd == 1:
+            reader = self.stdout
+        elif fd == 2:
+            reader = self.stderr
+        else:
+            reader = None
+        if reader is not None:
+            if exc is None:
+                reader.feed_eof()
+            else:
+                reader.set_exception(exc)
+
+        if fd in self._pipe_fds:
+            self._pipe_fds.remove(fd)
+        self._maybe_close_transport()
+
+    def process_exited(self):
+        self._process_exited = True
+        self._maybe_close_transport()
+
+    def _maybe_close_transport(self):
+        if len(self._pipe_fds) == 0 and self._process_exited:
+            self._transport.close()
+            self._transport = None
+
+    def _get_close_waiter(self, stream):
+        if stream is self.stdin:
+            return self._stdin_closed
+
+
+class Process:
+    def __init__(self, transport, protocol, loop):
+        self._transport = transport
+        self._protocol = protocol
+        self._loop = loop
+        self.stdin = protocol.stdin
+        self.stdout = protocol.stdout
+        self.stderr = protocol.stderr
+        self.pid = transport.get_pid()
+
+    def __repr__(self):
+        return f'<{self.__class__.__name__} {self.pid}>'
+
+    @property
+    def returncode(self):
+        return self._transport.get_returncode()
+
+    async def wait(self):
+        """Wait until the process exit and return the process return code."""
+        return await self._transport._wait()
+
+    def send_signal(self, signal):
+        self._transport.send_signal(signal)
+
+    def terminate(self):
+        self._transport.terminate()
+
+    def kill(self):
+        self._transport.kill()
+
+    async def _feed_stdin(self, input):
+        debug = self._loop.get_debug()
+        self.stdin.write(input)
+        if debug:
+            logger.debug(
+                '%r communicate: feed stdin (%s bytes)', self, len(input))
+        try:
+            await self.stdin.drain()
+        except (BrokenPipeError, ConnectionResetError) as exc:
+            # communicate() ignores BrokenPipeError and ConnectionResetError
+            if debug:
+                logger.debug('%r communicate: stdin got %r', self, exc)
+
+        if debug:
+            logger.debug('%r communicate: close stdin', self)
+        self.stdin.close()
+
+    async def _noop(self):
+        return None
+
+    async def _read_stream(self, fd):
+        transport = self._transport.get_pipe_transport(fd)
+        if fd == 2:
+            stream = self.stderr
+        else:
+            assert fd == 1
+            stream = self.stdout
+        if self._loop.get_debug():
+            name = 'stdout' if fd == 1 else 'stderr'
+            logger.debug('%r communicate: read %s', self, name)
+        output = await stream.read()
+        if self._loop.get_debug():
+            name = 'stdout' if fd == 1 else 'stderr'
+            logger.debug('%r communicate: close %s', self, name)
+        transport.close()
+        return output
+
+    async def communicate(self, input=None):
+        if input is not None:
+            stdin = self._feed_stdin(input)
+        else:
+            stdin = self._noop()
+        if self.stdout is not None:
+            stdout = self._read_stream(1)
+        else:
+            stdout = self._noop()
+        if self.stderr is not None:
+            stderr = self._read_stream(2)
+        else:
+            stderr = self._noop()
+        stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr,
+                                                   loop=self._loop)
+        await self.wait()
+        return (stdout, stderr)
+
+
+async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
+                                  loop=None, limit=streams._DEFAULT_LIMIT,
+                                  **kwds):
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8 "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning,
+                      stacklevel=2
+        )
+
+    protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
+                                                        loop=loop)
+    transport, protocol = await loop.subprocess_shell(
+        protocol_factory,
+        cmd, stdin=stdin, stdout=stdout,
+        stderr=stderr, **kwds)
+    return Process(transport, protocol, loop)
+
+
+async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
+                                 stderr=None, loop=None,
+                                 limit=streams._DEFAULT_LIMIT, **kwds):
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8 "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning,
+                      stacklevel=2
+        )
+    protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
+                                                        loop=loop)
+    transport, protocol = await loop.subprocess_exec(
+        protocol_factory,
+        program, *args,
+        stdin=stdin, stdout=stdout,
+        stderr=stderr, **kwds)
+    return Process(transport, protocol, loop)
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/tasks.py b/linux-x64/clang/python3/lib/python3.9/asyncio/tasks.py
new file mode 100644
index 0000000..f486b67
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/tasks.py
@@ -0,0 +1,982 @@
+"""Support for tasks, coroutines and the scheduler."""
+
+__all__ = (
+    'Task', 'create_task',
+    'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
+    'wait', 'wait_for', 'as_completed', 'sleep',
+    'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
+    'current_task', 'all_tasks',
+    '_register_task', '_unregister_task', '_enter_task', '_leave_task',
+)
+
+import concurrent.futures
+import contextvars
+import functools
+import inspect
+import itertools
+import types
+import warnings
+import weakref
+
+from . import base_tasks
+from . import coroutines
+from . import events
+from . import exceptions
+from . import futures
+from .coroutines import _is_coroutine
+
+# Helper to generate new task names
+# This uses itertools.count() instead of a "+= 1" operation because the latter
+# is not thread safe. See bpo-11866 for a longer explanation.
+_task_name_counter = itertools.count(1).__next__
+
+
+def current_task(loop=None):
+    """Return a currently executed task."""
+    if loop is None:
+        loop = events.get_running_loop()
+    return _current_tasks.get(loop)
+
+
+def all_tasks(loop=None):
+    """Return a set of all tasks for the loop."""
+    if loop is None:
+        loop = events.get_running_loop()
+    # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another
+    # thread while we do so. Therefore we cast it to list prior to filtering. The list
+    # cast itself requires iteration, so we repeat it several times ignoring
+    # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for
+    # details.
+    i = 0
+    while True:
+        try:
+            tasks = list(_all_tasks)
+        except RuntimeError:
+            i += 1
+            if i >= 1000:
+                raise
+        else:
+            break
+    return {t for t in tasks
+            if futures._get_loop(t) is loop and not t.done()}
+
+
+def _all_tasks_compat(loop=None):
+    # Different from "all_task()" by returning *all* Tasks, including
+    # the completed ones.  Used to implement deprecated "Tasks.all_task()"
+    # method.
+    if loop is None:
+        loop = events.get_event_loop()
+    # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another
+    # thread while we do so. Therefore we cast it to list prior to filtering. The list
+    # cast itself requires iteration, so we repeat it several times ignoring
+    # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for
+    # details.
+    i = 0
+    while True:
+        try:
+            tasks = list(_all_tasks)
+        except RuntimeError:
+            i += 1
+            if i >= 1000:
+                raise
+        else:
+            break
+    return {t for t in tasks if futures._get_loop(t) is loop}
+
+
+def _set_task_name(task, name):
+    if name is not None:
+        try:
+            set_name = task.set_name
+        except AttributeError:
+            pass
+        else:
+            set_name(name)
+
+
+class Task(futures._PyFuture):  # Inherit Python Task implementation
+                                # from a Python Future implementation.
+
+    """A coroutine wrapped in a Future."""
+
+    # An important invariant maintained while a Task not done:
+    #
+    # - Either _fut_waiter is None, and _step() is scheduled;
+    # - or _fut_waiter is some Future, and _step() is *not* scheduled.
+    #
+    # The only transition from the latter to the former is through
+    # _wakeup().  When _fut_waiter is not None, one of its callbacks
+    # must be _wakeup().
+
+    # If False, don't log a message if the task is destroyed whereas its
+    # status is still pending
+    _log_destroy_pending = True
+
+    def __init__(self, coro, *, loop=None, name=None):
+        super().__init__(loop=loop)
+        if self._source_traceback:
+            del self._source_traceback[-1]
+        if not coroutines.iscoroutine(coro):
+            # raise after Future.__init__(), attrs are required for __del__
+            # prevent logging for pending task in __del__
+            self._log_destroy_pending = False
+            raise TypeError(f"a coroutine was expected, got {coro!r}")
+
+        if name is None:
+            self._name = f'Task-{_task_name_counter()}'
+        else:
+            self._name = str(name)
+
+        self._must_cancel = False
+        self._fut_waiter = None
+        self._coro = coro
+        self._context = contextvars.copy_context()
+
+        self._loop.call_soon(self.__step, context=self._context)
+        _register_task(self)
+
+    def __del__(self):
+        if self._state == futures._PENDING and self._log_destroy_pending:
+            context = {
+                'task': self,
+                'message': 'Task was destroyed but it is pending!',
+            }
+            if self._source_traceback:
+                context['source_traceback'] = self._source_traceback
+            self._loop.call_exception_handler(context)
+        super().__del__()
+
+    def __class_getitem__(cls, type):
+        return cls
+
+    def _repr_info(self):
+        return base_tasks._task_repr_info(self)
+
+    def get_coro(self):
+        return self._coro
+
+    def get_name(self):
+        return self._name
+
+    def set_name(self, value):
+        self._name = str(value)
+
+    def set_result(self, result):
+        raise RuntimeError('Task does not support set_result operation')
+
+    def set_exception(self, exception):
+        raise RuntimeError('Task does not support set_exception operation')
+
+    def get_stack(self, *, limit=None):
+        """Return the list of stack frames for this task's coroutine.
+
+        If the coroutine is not done, this returns the stack where it is
+        suspended.  If the coroutine has completed successfully or was
+        cancelled, this returns an empty list.  If the coroutine was
+        terminated by an exception, this returns the list of traceback
+        frames.
+
+        The frames are always ordered from oldest to newest.
+
+        The optional limit gives the maximum number of frames to
+        return; by default all available frames are returned.  Its
+        meaning differs depending on whether a stack or a traceback is
+        returned: the newest frames of a stack are returned, but the
+        oldest frames of a traceback are returned.  (This matches the
+        behavior of the traceback module.)
+
+        For reasons beyond our control, only one stack frame is
+        returned for a suspended coroutine.
+        """
+        return base_tasks._task_get_stack(self, limit)
+
+    def print_stack(self, *, limit=None, file=None):
+        """Print the stack or traceback for this task's coroutine.
+
+        This produces output similar to that of the traceback module,
+        for the frames retrieved by get_stack().  The limit argument
+        is passed to get_stack().  The file argument is an I/O stream
+        to which the output is written; by default output is written
+        to sys.stderr.
+        """
+        return base_tasks._task_print_stack(self, limit, file)
+
+    def cancel(self, msg=None):
+        """Request that this task cancel itself.
+
+        This arranges for a CancelledError to be thrown into the
+        wrapped coroutine on the next cycle through the event loop.
+        The coroutine then has a chance to clean up or even deny
+        the request using try/except/finally.
+
+        Unlike Future.cancel, this does not guarantee that the
+        task will be cancelled: the exception might be caught and
+        acted upon, delaying cancellation of the task or preventing
+        cancellation completely.  The task may also return a value or
+        raise a different exception.
+
+        Immediately after this method is called, Task.cancelled() will
+        not return True (unless the task was already cancelled).  A
+        task will be marked as cancelled when the wrapped coroutine
+        terminates with a CancelledError exception (even if cancel()
+        was not called).
+        """
+        self._log_traceback = False
+        if self.done():
+            return False
+        if self._fut_waiter is not None:
+            if self._fut_waiter.cancel(msg=msg):
+                # Leave self._fut_waiter; it may be a Task that
+                # catches and ignores the cancellation so we may have
+                # to cancel it again later.
+                return True
+        # It must be the case that self.__step is already scheduled.
+        self._must_cancel = True
+        self._cancel_message = msg
+        return True
+
+    def __step(self, exc=None):
+        if self.done():
+            raise exceptions.InvalidStateError(
+                f'_step(): already done: {self!r}, {exc!r}')
+        if self._must_cancel:
+            if not isinstance(exc, exceptions.CancelledError):
+                exc = self._make_cancelled_error()
+            self._must_cancel = False
+        coro = self._coro
+        self._fut_waiter = None
+
+        _enter_task(self._loop, self)
+        # Call either coro.throw(exc) or coro.send(None).
+        try:
+            if exc is None:
+                # We use the `send` method directly, because coroutines
+                # don't have `__iter__` and `__next__` methods.
+                result = coro.send(None)
+            else:
+                result = coro.throw(exc)
+        except StopIteration as exc:
+            if self._must_cancel:
+                # Task is cancelled right before coro stops.
+                self._must_cancel = False
+                super().cancel(msg=self._cancel_message)
+            else:
+                super().set_result(exc.value)
+        except exceptions.CancelledError as exc:
+            # Save the original exception so we can chain it later.
+            self._cancelled_exc = exc
+            super().cancel()  # I.e., Future.cancel(self).
+        except (KeyboardInterrupt, SystemExit) as exc:
+            super().set_exception(exc)
+            raise
+        except BaseException as exc:
+            super().set_exception(exc)
+        else:
+            blocking = getattr(result, '_asyncio_future_blocking', None)
+            if blocking is not None:
+                # Yielded Future must come from Future.__iter__().
+                if futures._get_loop(result) is not self._loop:
+                    new_exc = RuntimeError(
+                        f'Task {self!r} got Future '
+                        f'{result!r} attached to a different loop')
+                    self._loop.call_soon(
+                        self.__step, new_exc, context=self._context)
+                elif blocking:
+                    if result is self:
+                        new_exc = RuntimeError(
+                            f'Task cannot await on itself: {self!r}')
+                        self._loop.call_soon(
+                            self.__step, new_exc, context=self._context)
+                    else:
+                        result._asyncio_future_blocking = False
+                        result.add_done_callback(
+                            self.__wakeup, context=self._context)
+                        self._fut_waiter = result
+                        if self._must_cancel:
+                            if self._fut_waiter.cancel(
+                                    msg=self._cancel_message):
+                                self._must_cancel = False
+                else:
+                    new_exc = RuntimeError(
+                        f'yield was used instead of yield from '
+                        f'in task {self!r} with {result!r}')
+                    self._loop.call_soon(
+                        self.__step, new_exc, context=self._context)
+
+            elif result is None:
+                # Bare yield relinquishes control for one event loop iteration.
+                self._loop.call_soon(self.__step, context=self._context)
+            elif inspect.isgenerator(result):
+                # Yielding a generator is just wrong.
+                new_exc = RuntimeError(
+                    f'yield was used instead of yield from for '
+                    f'generator in task {self!r} with {result!r}')
+                self._loop.call_soon(
+                    self.__step, new_exc, context=self._context)
+            else:
+                # Yielding something else is an error.
+                new_exc = RuntimeError(f'Task got bad yield: {result!r}')
+                self._loop.call_soon(
+                    self.__step, new_exc, context=self._context)
+        finally:
+            _leave_task(self._loop, self)
+            self = None  # Needed to break cycles when an exception occurs.
+
+    def __wakeup(self, future):
+        try:
+            future.result()
+        except BaseException as exc:
+            # This may also be a cancellation.
+            self.__step(exc)
+        else:
+            # Don't pass the value of `future.result()` explicitly,
+            # as `Future.__iter__` and `Future.__await__` don't need it.
+            # If we call `_step(value, None)` instead of `_step()`,
+            # Python eval loop would use `.send(value)` method call,
+            # instead of `__next__()`, which is slower for futures
+            # that return non-generator iterators from their `__iter__`.
+            self.__step()
+        self = None  # Needed to break cycles when an exception occurs.
+
+
+_PyTask = Task
+
+
+try:
+    import _asyncio
+except ImportError:
+    pass
+else:
+    # _CTask is needed for tests.
+    Task = _CTask = _asyncio.Task
+
+
+def create_task(coro, *, name=None):
+    """Schedule the execution of a coroutine object in a spawn task.
+
+    Return a Task object.
+    """
+    loop = events.get_running_loop()
+    task = loop.create_task(coro)
+    _set_task_name(task, name)
+    return task
+
+
+# wait() and as_completed() similar to those in PEP 3148.
+
+FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
+FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
+ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
+
+
+async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
+    """Wait for the Futures and coroutines given by fs to complete.
+
+    The fs iterable must not be empty.
+
+    Coroutines will be wrapped in Tasks.
+
+    Returns two sets of Future: (done, pending).
+
+    Usage:
+
+        done, pending = await asyncio.wait(fs)
+
+    Note: This does not raise TimeoutError! Futures that aren't done
+    when the timeout occurs are returned in the second set.
+    """
+    if futures.isfuture(fs) or coroutines.iscoroutine(fs):
+        raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
+    if not fs:
+        raise ValueError('Set of coroutines/Futures is empty.')
+    if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
+        raise ValueError(f'Invalid return_when value: {return_when}')
+
+    if loop is None:
+        loop = events.get_running_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
+
+    fs = set(fs)
+
+    if any(coroutines.iscoroutine(f) for f in fs):
+        warnings.warn("The explicit passing of coroutine objects to "
+                      "asyncio.wait() is deprecated since Python 3.8, and "
+                      "scheduled for removal in Python 3.11.",
+                      DeprecationWarning, stacklevel=2)
+
+    fs = {ensure_future(f, loop=loop) for f in fs}
+
+    return await _wait(fs, timeout, return_when, loop)
+
+
+def _release_waiter(waiter, *args):
+    if not waiter.done():
+        waiter.set_result(None)
+
+
+async def wait_for(fut, timeout, *, loop=None):
+    """Wait for the single Future or coroutine to complete, with timeout.
+
+    Coroutine will be wrapped in Task.
+
+    Returns result of the Future or coroutine.  When a timeout occurs,
+    it cancels the task and raises TimeoutError.  To avoid the task
+    cancellation, wrap it in shield().
+
+    If the wait is cancelled, the task is also cancelled.
+
+    This function is a coroutine.
+    """
+    if loop is None:
+        loop = events.get_running_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
+
+    if timeout is None:
+        return await fut
+
+    if timeout <= 0:
+        fut = ensure_future(fut, loop=loop)
+
+        if fut.done():
+            return fut.result()
+
+        await _cancel_and_wait(fut, loop=loop)
+        try:
+            fut.result()
+        except exceptions.CancelledError as exc:
+            raise exceptions.TimeoutError() from exc
+        else:
+            raise exceptions.TimeoutError()
+
+    waiter = loop.create_future()
+    timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
+    cb = functools.partial(_release_waiter, waiter)
+
+    fut = ensure_future(fut, loop=loop)
+    fut.add_done_callback(cb)
+
+    try:
+        # wait until the future completes or the timeout
+        try:
+            await waiter
+        except exceptions.CancelledError:
+            if fut.done():
+                return fut.result()
+            else:
+                fut.remove_done_callback(cb)
+                fut.cancel()
+                raise
+
+        if fut.done():
+            return fut.result()
+        else:
+            fut.remove_done_callback(cb)
+            # We must ensure that the task is not running
+            # after wait_for() returns.
+            # See https://bugs.python.org/issue32751
+            await _cancel_and_wait(fut, loop=loop)
+            # In case task cancellation failed with some
+            # exception, we should re-raise it
+            # See https://bugs.python.org/issue40607
+            try:
+                fut.result()
+            except exceptions.CancelledError as exc:
+                raise exceptions.TimeoutError() from exc
+            else:
+                raise exceptions.TimeoutError()
+    finally:
+        timeout_handle.cancel()
+
+
+async def _wait(fs, timeout, return_when, loop):
+    """Internal helper for wait().
+
+    The fs argument must be a collection of Futures.
+    """
+    assert fs, 'Set of Futures is empty.'
+    waiter = loop.create_future()
+    timeout_handle = None
+    if timeout is not None:
+        timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
+    counter = len(fs)
+
+    def _on_completion(f):
+        nonlocal counter
+        counter -= 1
+        if (counter <= 0 or
+            return_when == FIRST_COMPLETED or
+            return_when == FIRST_EXCEPTION and (not f.cancelled() and
+                                                f.exception() is not None)):
+            if timeout_handle is not None:
+                timeout_handle.cancel()
+            if not waiter.done():
+                waiter.set_result(None)
+
+    for f in fs:
+        f.add_done_callback(_on_completion)
+
+    try:
+        await waiter
+    finally:
+        if timeout_handle is not None:
+            timeout_handle.cancel()
+        for f in fs:
+            f.remove_done_callback(_on_completion)
+
+    done, pending = set(), set()
+    for f in fs:
+        if f.done():
+            done.add(f)
+        else:
+            pending.add(f)
+    return done, pending
+
+
+async def _cancel_and_wait(fut, loop):
+    """Cancel the *fut* future or task and wait until it completes."""
+
+    waiter = loop.create_future()
+    cb = functools.partial(_release_waiter, waiter)
+    fut.add_done_callback(cb)
+
+    try:
+        fut.cancel()
+        # We cannot wait on *fut* directly to make
+        # sure _cancel_and_wait itself is reliably cancellable.
+        await waiter
+    finally:
+        fut.remove_done_callback(cb)
+
+
+# This is *not* a @coroutine!  It is just an iterator (yielding Futures).
+def as_completed(fs, *, loop=None, timeout=None):
+    """Return an iterator whose values are coroutines.
+
+    When waiting for the yielded coroutines you'll get the results (or
+    exceptions!) of the original Futures (or coroutines), in the order
+    in which and as soon as they complete.
+
+    This differs from PEP 3148; the proper way to use this is:
+
+        for f in as_completed(fs):
+            result = await f  # The 'await' may raise.
+            # Use result.
+
+    If a timeout is specified, the 'await' will raise
+    TimeoutError when the timeout occurs before all Futures are done.
+
+    Note: The futures 'f' are not necessarily members of fs.
+    """
+    if futures.isfuture(fs) or coroutines.iscoroutine(fs):
+        raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}")
+
+    from .queues import Queue  # Import here to avoid circular import problem.
+    done = Queue(loop=loop)
+
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
+    todo = {ensure_future(f, loop=loop) for f in set(fs)}
+    timeout_handle = None
+
+    def _on_timeout():
+        for f in todo:
+            f.remove_done_callback(_on_completion)
+            done.put_nowait(None)  # Queue a dummy value for _wait_for_one().
+        todo.clear()  # Can't do todo.remove(f) in the loop.
+
+    def _on_completion(f):
+        if not todo:
+            return  # _on_timeout() was here first.
+        todo.remove(f)
+        done.put_nowait(f)
+        if not todo and timeout_handle is not None:
+            timeout_handle.cancel()
+
+    async def _wait_for_one():
+        f = await done.get()
+        if f is None:
+            # Dummy value from _on_timeout().
+            raise exceptions.TimeoutError
+        return f.result()  # May raise f.exception().
+
+    for f in todo:
+        f.add_done_callback(_on_completion)
+    if todo and timeout is not None:
+        timeout_handle = loop.call_later(timeout, _on_timeout)
+    for _ in range(len(todo)):
+        yield _wait_for_one()
+
+
+@types.coroutine
+def __sleep0():
+    """Skip one event loop run cycle.
+
+    This is a private helper for 'asyncio.sleep()', used
+    when the 'delay' is set to 0.  It uses a bare 'yield'
+    expression (which Task.__step knows how to handle)
+    instead of creating a Future object.
+    """
+    yield
+
+
+async def sleep(delay, result=None, *, loop=None):
+    """Coroutine that completes after a given time (in seconds)."""
+    if delay <= 0:
+        await __sleep0()
+        return result
+
+    if loop is None:
+        loop = events.get_running_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
+
+    future = loop.create_future()
+    h = loop.call_later(delay,
+                        futures._set_result_unless_cancelled,
+                        future, result)
+    try:
+        return await future
+    finally:
+        h.cancel()
+
+
+def ensure_future(coro_or_future, *, loop=None):
+    """Wrap a coroutine or an awaitable in a future.
+
+    If the argument is a Future, it is returned directly.
+    """
+    if coroutines.iscoroutine(coro_or_future):
+        if loop is None:
+            loop = events.get_event_loop()
+        task = loop.create_task(coro_or_future)
+        if task._source_traceback:
+            del task._source_traceback[-1]
+        return task
+    elif futures.isfuture(coro_or_future):
+        if loop is not None and loop is not futures._get_loop(coro_or_future):
+            raise ValueError('The future belongs to a different loop than '
+                             'the one specified as the loop argument')
+        return coro_or_future
+    elif inspect.isawaitable(coro_or_future):
+        return ensure_future(_wrap_awaitable(coro_or_future), loop=loop)
+    else:
+        raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
+                        'required')
+
+
+@types.coroutine
+def _wrap_awaitable(awaitable):
+    """Helper for asyncio.ensure_future().
+
+    Wraps awaitable (an object with __await__) into a coroutine
+    that will later be wrapped in a Task by ensure_future().
+    """
+    return (yield from awaitable.__await__())
+
+_wrap_awaitable._is_coroutine = _is_coroutine
+
+
+class _GatheringFuture(futures.Future):
+    """Helper for gather().
+
+    This overrides cancel() to cancel all the children and act more
+    like Task.cancel(), which doesn't immediately mark itself as
+    cancelled.
+    """
+
+    def __init__(self, children, *, loop=None):
+        super().__init__(loop=loop)
+        self._children = children
+        self._cancel_requested = False
+
+    def cancel(self, msg=None):
+        if self.done():
+            return False
+        ret = False
+        for child in self._children:
+            if child.cancel(msg=msg):
+                ret = True
+        if ret:
+            # If any child tasks were actually cancelled, we should
+            # propagate the cancellation request regardless of
+            # *return_exceptions* argument.  See issue 32684.
+            self._cancel_requested = True
+        return ret
+
+
+def gather(*coros_or_futures, loop=None, return_exceptions=False):
+    """Return a future aggregating results from the given coroutines/futures.
+
+    Coroutines will be wrapped in a future and scheduled in the event
+    loop. They will not necessarily be scheduled in the same order as
+    passed in.
+
+    All futures must share the same event loop.  If all the tasks are
+    done successfully, the returned future's result is the list of
+    results (in the order of the original sequence, not necessarily
+    the order of results arrival).  If *return_exceptions* is True,
+    exceptions in the tasks are treated the same as successful
+    results, and gathered in the result list; otherwise, the first
+    raised exception will be immediately propagated to the returned
+    future.
+
+    Cancellation: if the outer Future is cancelled, all children (that
+    have not completed yet) are also cancelled.  If any child is
+    cancelled, this is treated as if it raised CancelledError --
+    the outer Future is *not* cancelled in this case.  (This is to
+    prevent the cancellation of one child to cause other children to
+    be cancelled.)
+
+    If *return_exceptions* is False, cancelling gather() after it
+    has been marked done won't cancel any submitted awaitables.
+    For instance, gather can be marked done after propagating an
+    exception to the caller, therefore, calling ``gather.cancel()``
+    after catching an exception (raised by one of the awaitables) from
+    gather won't cancel any other awaitables.
+    """
+    if not coros_or_futures:
+        if loop is None:
+            loop = events.get_event_loop()
+        else:
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+        outer = loop.create_future()
+        outer.set_result([])
+        return outer
+
+    def _done_callback(fut):
+        nonlocal nfinished
+        nfinished += 1
+
+        if outer.done():
+            if not fut.cancelled():
+                # Mark exception retrieved.
+                fut.exception()
+            return
+
+        if not return_exceptions:
+            if fut.cancelled():
+                # Check if 'fut' is cancelled first, as
+                # 'fut.exception()' will *raise* a CancelledError
+                # instead of returning it.
+                exc = fut._make_cancelled_error()
+                outer.set_exception(exc)
+                return
+            else:
+                exc = fut.exception()
+                if exc is not None:
+                    outer.set_exception(exc)
+                    return
+
+        if nfinished == nfuts:
+            # All futures are done; create a list of results
+            # and set it to the 'outer' future.
+            results = []
+
+            for fut in children:
+                if fut.cancelled():
+                    # Check if 'fut' is cancelled first, as 'fut.exception()'
+                    # will *raise* a CancelledError instead of returning it.
+                    # Also, since we're adding the exception return value
+                    # to 'results' instead of raising it, don't bother
+                    # setting __context__.  This also lets us preserve
+                    # calling '_make_cancelled_error()' at most once.
+                    res = exceptions.CancelledError(
+                        '' if fut._cancel_message is None else
+                        fut._cancel_message)
+                else:
+                    res = fut.exception()
+                    if res is None:
+                        res = fut.result()
+                results.append(res)
+
+            if outer._cancel_requested:
+                # If gather is being cancelled we must propagate the
+                # cancellation regardless of *return_exceptions* argument.
+                # See issue 32684.
+                exc = fut._make_cancelled_error()
+                outer.set_exception(exc)
+            else:
+                outer.set_result(results)
+
+    arg_to_fut = {}
+    children = []
+    nfuts = 0
+    nfinished = 0
+    for arg in coros_or_futures:
+        if arg not in arg_to_fut:
+            fut = ensure_future(arg, loop=loop)
+            if loop is None:
+                loop = futures._get_loop(fut)
+            if fut is not arg:
+                # 'arg' was not a Future, therefore, 'fut' is a new
+                # Future created specifically for 'arg'.  Since the caller
+                # can't control it, disable the "destroy pending task"
+                # warning.
+                fut._log_destroy_pending = False
+
+            nfuts += 1
+            arg_to_fut[arg] = fut
+            fut.add_done_callback(_done_callback)
+
+        else:
+            # There's a duplicate Future object in coros_or_futures.
+            fut = arg_to_fut[arg]
+
+        children.append(fut)
+
+    outer = _GatheringFuture(children, loop=loop)
+    return outer
+
+
+def shield(arg, *, loop=None):
+    """Wait for a future, shielding it from cancellation.
+
+    The statement
+
+        res = await shield(something())
+
+    is exactly equivalent to the statement
+
+        res = await something()
+
+    *except* that if the coroutine containing it is cancelled, the
+    task running in something() is not cancelled.  From the POV of
+    something(), the cancellation did not happen.  But its caller is
+    still cancelled, so the yield-from expression still raises
+    CancelledError.  Note: If something() is cancelled by other means
+    this will still cancel shield().
+
+    If you want to completely ignore cancellation (not recommended)
+    you can combine shield() with a try/except clause, as follows:
+
+        try:
+            res = await shield(something())
+        except CancelledError:
+            res = None
+    """
+    if loop is not None:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
+    inner = ensure_future(arg, loop=loop)
+    if inner.done():
+        # Shortcut.
+        return inner
+    loop = futures._get_loop(inner)
+    outer = loop.create_future()
+
+    def _inner_done_callback(inner):
+        if outer.cancelled():
+            if not inner.cancelled():
+                # Mark inner's result as retrieved.
+                inner.exception()
+            return
+
+        if inner.cancelled():
+            outer.cancel()
+        else:
+            exc = inner.exception()
+            if exc is not None:
+                outer.set_exception(exc)
+            else:
+                outer.set_result(inner.result())
+
+
+    def _outer_done_callback(outer):
+        if not inner.done():
+            inner.remove_done_callback(_inner_done_callback)
+
+    inner.add_done_callback(_inner_done_callback)
+    outer.add_done_callback(_outer_done_callback)
+    return outer
+
+
+def run_coroutine_threadsafe(coro, loop):
+    """Submit a coroutine object to a given event loop.
+
+    Return a concurrent.futures.Future to access the result.
+    """
+    if not coroutines.iscoroutine(coro):
+        raise TypeError('A coroutine object is required')
+    future = concurrent.futures.Future()
+
+    def callback():
+        try:
+            futures._chain_future(ensure_future(coro, loop=loop), future)
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            if future.set_running_or_notify_cancel():
+                future.set_exception(exc)
+            raise
+
+    loop.call_soon_threadsafe(callback)
+    return future
+
+
+# WeakSet containing all alive tasks.
+_all_tasks = weakref.WeakSet()
+
+# Dictionary containing tasks that are currently active in
+# all running event loops.  {EventLoop: Task}
+_current_tasks = {}
+
+
+def _register_task(task):
+    """Register a new task in asyncio as executed by loop."""
+    _all_tasks.add(task)
+
+
+def _enter_task(loop, task):
+    current_task = _current_tasks.get(loop)
+    if current_task is not None:
+        raise RuntimeError(f"Cannot enter into task {task!r} while another "
+                           f"task {current_task!r} is being executed.")
+    _current_tasks[loop] = task
+
+
+def _leave_task(loop, task):
+    current_task = _current_tasks.get(loop)
+    if current_task is not task:
+        raise RuntimeError(f"Leaving task {task!r} does not match "
+                           f"the current task {current_task!r}.")
+    del _current_tasks[loop]
+
+
+def _unregister_task(task):
+    """Unregister a task."""
+    _all_tasks.discard(task)
+
+
+_py_register_task = _register_task
+_py_unregister_task = _unregister_task
+_py_enter_task = _enter_task
+_py_leave_task = _leave_task
+
+
+try:
+    from _asyncio import (_register_task, _unregister_task,
+                          _enter_task, _leave_task,
+                          _all_tasks, _current_tasks)
+except ImportError:
+    pass
+else:
+    _c_register_task = _register_task
+    _c_unregister_task = _unregister_task
+    _c_enter_task = _enter_task
+    _c_leave_task = _leave_task
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/threads.py b/linux-x64/clang/python3/lib/python3.9/asyncio/threads.py
new file mode 100644
index 0000000..34b7513
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/threads.py
@@ -0,0 +1,25 @@
+"""High-level support for working with threads in asyncio"""
+
+import functools
+import contextvars
+
+from . import events
+
+
+__all__ = "to_thread",
+
+
+async def to_thread(func, /, *args, **kwargs):
+    """Asynchronously run function *func* in a separate thread.
+
+    Any *args and **kwargs supplied for this function are directly passed
+    to *func*. Also, the current :class:`contextvars.Context` is propogated,
+    allowing context variables from the main thread to be accessed in the
+    separate thread.
+
+    Return a coroutine that can be awaited to get the eventual result of *func*.
+    """
+    loop = events.get_running_loop()
+    ctx = contextvars.copy_context()
+    func_call = functools.partial(ctx.run, func, *args, **kwargs)
+    return await loop.run_in_executor(None, func_call)
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/transports.py b/linux-x64/clang/python3/lib/python3.9/asyncio/transports.py
new file mode 100644
index 0000000..45e155c
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/transports.py
@@ -0,0 +1,329 @@
+"""Abstract Transport class."""
+
+__all__ = (
+    'BaseTransport', 'ReadTransport', 'WriteTransport',
+    'Transport', 'DatagramTransport', 'SubprocessTransport',
+)
+
+
+class BaseTransport:
+    """Base class for transports."""
+
+    __slots__ = ('_extra',)
+
+    def __init__(self, extra=None):
+        if extra is None:
+            extra = {}
+        self._extra = extra
+
+    def get_extra_info(self, name, default=None):
+        """Get optional transport information."""
+        return self._extra.get(name, default)
+
+    def is_closing(self):
+        """Return True if the transport is closing or closed."""
+        raise NotImplementedError
+
+    def close(self):
+        """Close the transport.
+
+        Buffered data will be flushed asynchronously.  No more data
+        will be received.  After all buffered data is flushed, the
+        protocol's connection_lost() method will (eventually) be
+        called with None as its argument.
+        """
+        raise NotImplementedError
+
+    def set_protocol(self, protocol):
+        """Set a new protocol."""
+        raise NotImplementedError
+
+    def get_protocol(self):
+        """Return the current protocol."""
+        raise NotImplementedError
+
+
+class ReadTransport(BaseTransport):
+    """Interface for read-only transports."""
+
+    __slots__ = ()
+
+    def is_reading(self):
+        """Return True if the transport is receiving."""
+        raise NotImplementedError
+
+    def pause_reading(self):
+        """Pause the receiving end.
+
+        No data will be passed to the protocol's data_received()
+        method until resume_reading() is called.
+        """
+        raise NotImplementedError
+
+    def resume_reading(self):
+        """Resume the receiving end.
+
+        Data received will once again be passed to the protocol's
+        data_received() method.
+        """
+        raise NotImplementedError
+
+
+class WriteTransport(BaseTransport):
+    """Interface for write-only transports."""
+
+    __slots__ = ()
+
+    def set_write_buffer_limits(self, high=None, low=None):
+        """Set the high- and low-water limits for write flow control.
+
+        These two values control when to call the protocol's
+        pause_writing() and resume_writing() methods.  If specified,
+        the low-water limit must be less than or equal to the
+        high-water limit.  Neither value can be negative.
+
+        The defaults are implementation-specific.  If only the
+        high-water limit is given, the low-water limit defaults to an
+        implementation-specific value less than or equal to the
+        high-water limit.  Setting high to zero forces low to zero as
+        well, and causes pause_writing() to be called whenever the
+        buffer becomes non-empty.  Setting low to zero causes
+        resume_writing() to be called only once the buffer is empty.
+        Use of zero for either limit is generally sub-optimal as it
+        reduces opportunities for doing I/O and computation
+        concurrently.
+        """
+        raise NotImplementedError
+
+    def get_write_buffer_size(self):
+        """Return the current size of the write buffer."""
+        raise NotImplementedError
+
+    def write(self, data):
+        """Write some data bytes to the transport.
+
+        This does not block; it buffers the data and arranges for it
+        to be sent out asynchronously.
+        """
+        raise NotImplementedError
+
+    def writelines(self, list_of_data):
+        """Write a list (or any iterable) of data bytes to the transport.
+
+        The default implementation concatenates the arguments and
+        calls write() on the result.
+        """
+        data = b''.join(list_of_data)
+        self.write(data)
+
+    def write_eof(self):
+        """Close the write end after flushing buffered data.
+
+        (This is like typing ^D into a UNIX program reading from stdin.)
+
+        Data may still be received.
+        """
+        raise NotImplementedError
+
+    def can_write_eof(self):
+        """Return True if this transport supports write_eof(), False if not."""
+        raise NotImplementedError
+
+    def abort(self):
+        """Close the transport immediately.
+
+        Buffered data will be lost.  No more data will be received.
+        The protocol's connection_lost() method will (eventually) be
+        called with None as its argument.
+        """
+        raise NotImplementedError
+
+
+class Transport(ReadTransport, WriteTransport):
+    """Interface representing a bidirectional transport.
+
+    There may be several implementations, but typically, the user does
+    not implement new transports; rather, the platform provides some
+    useful transports that are implemented using the platform's best
+    practices.
+
+    The user never instantiates a transport directly; they call a
+    utility function, passing it a protocol factory and other
+    information necessary to create the transport and protocol.  (E.g.
+    EventLoop.create_connection() or EventLoop.create_server().)
+
+    The utility function will asynchronously create a transport and a
+    protocol and hook them up by calling the protocol's
+    connection_made() method, passing it the transport.
+
+    The implementation here raises NotImplemented for every method
+    except writelines(), which calls write() in a loop.
+    """
+
+    __slots__ = ()
+
+
+class DatagramTransport(BaseTransport):
+    """Interface for datagram (UDP) transports."""
+
+    __slots__ = ()
+
+    def sendto(self, data, addr=None):
+        """Send data to the transport.
+
+        This does not block; it buffers the data and arranges for it
+        to be sent out asynchronously.
+        addr is target socket address.
+        If addr is None use target address pointed on transport creation.
+        """
+        raise NotImplementedError
+
+    def abort(self):
+        """Close the transport immediately.
+
+        Buffered data will be lost.  No more data will be received.
+        The protocol's connection_lost() method will (eventually) be
+        called with None as its argument.
+        """
+        raise NotImplementedError
+
+
+class SubprocessTransport(BaseTransport):
+
+    __slots__ = ()
+
+    def get_pid(self):
+        """Get subprocess id."""
+        raise NotImplementedError
+
+    def get_returncode(self):
+        """Get subprocess returncode.
+
+        See also
+        http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
+        """
+        raise NotImplementedError
+
+    def get_pipe_transport(self, fd):
+        """Get transport for pipe with number fd."""
+        raise NotImplementedError
+
+    def send_signal(self, signal):
+        """Send signal to subprocess.
+
+        See also:
+        docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
+        """
+        raise NotImplementedError
+
+    def terminate(self):
+        """Stop the subprocess.
+
+        Alias for close() method.
+
+        On Posix OSs the method sends SIGTERM to the subprocess.
+        On Windows the Win32 API function TerminateProcess()
+         is called to stop the subprocess.
+
+        See also:
+        http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
+        """
+        raise NotImplementedError
+
+    def kill(self):
+        """Kill the subprocess.
+
+        On Posix OSs the function sends SIGKILL to the subprocess.
+        On Windows kill() is an alias for terminate().
+
+        See also:
+        http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
+        """
+        raise NotImplementedError
+
+
+class _FlowControlMixin(Transport):
+    """All the logic for (write) flow control in a mix-in base class.
+
+    The subclass must implement get_write_buffer_size().  It must call
+    _maybe_pause_protocol() whenever the write buffer size increases,
+    and _maybe_resume_protocol() whenever it decreases.  It may also
+    override set_write_buffer_limits() (e.g. to specify different
+    defaults).
+
+    The subclass constructor must call super().__init__(extra).  This
+    will call set_write_buffer_limits().
+
+    The user may call set_write_buffer_limits() and
+    get_write_buffer_size(), and their protocol's pause_writing() and
+    resume_writing() may be called.
+    """
+
+    __slots__ = ('_loop', '_protocol_paused', '_high_water', '_low_water')
+
+    def __init__(self, extra=None, loop=None):
+        super().__init__(extra)
+        assert loop is not None
+        self._loop = loop
+        self._protocol_paused = False
+        self._set_write_buffer_limits()
+
+    def _maybe_pause_protocol(self):
+        size = self.get_write_buffer_size()
+        if size <= self._high_water:
+            return
+        if not self._protocol_paused:
+            self._protocol_paused = True
+            try:
+                self._protocol.pause_writing()
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException as exc:
+                self._loop.call_exception_handler({
+                    'message': 'protocol.pause_writing() failed',
+                    'exception': exc,
+                    'transport': self,
+                    'protocol': self._protocol,
+                })
+
+    def _maybe_resume_protocol(self):
+        if (self._protocol_paused and
+                self.get_write_buffer_size() <= self._low_water):
+            self._protocol_paused = False
+            try:
+                self._protocol.resume_writing()
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException as exc:
+                self._loop.call_exception_handler({
+                    'message': 'protocol.resume_writing() failed',
+                    'exception': exc,
+                    'transport': self,
+                    'protocol': self._protocol,
+                })
+
+    def get_write_buffer_limits(self):
+        return (self._low_water, self._high_water)
+
+    def _set_write_buffer_limits(self, high=None, low=None):
+        if high is None:
+            if low is None:
+                high = 64 * 1024
+            else:
+                high = 4 * low
+        if low is None:
+            low = high // 4
+
+        if not high >= low >= 0:
+            raise ValueError(
+                f'high ({high!r}) must be >= low ({low!r}) must be >= 0')
+
+        self._high_water = high
+        self._low_water = low
+
+    def set_write_buffer_limits(self, high=None, low=None):
+        self._set_write_buffer_limits(high=high, low=low)
+        self._maybe_pause_protocol()
+
+    def get_write_buffer_size(self):
+        raise NotImplementedError
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/trsock.py b/linux-x64/clang/python3/lib/python3.9/asyncio/trsock.py
new file mode 100644
index 0000000..e9ebcc3
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/trsock.py
@@ -0,0 +1,206 @@
+import socket
+import warnings
+
+
+class TransportSocket:
+
+    """A socket-like wrapper for exposing real transport sockets.
+
+    These objects can be safely returned by APIs like
+    `transport.get_extra_info('socket')`.  All potentially disruptive
+    operations (like "socket.close()") are banned.
+    """
+
+    __slots__ = ('_sock',)
+
+    def __init__(self, sock: socket.socket):
+        self._sock = sock
+
+    def _na(self, what):
+        warnings.warn(
+            f"Using {what} on sockets returned from get_extra_info('socket') "
+            f"will be prohibited in asyncio 3.9. Please report your use case "
+            f"to bugs.python.org.",
+            DeprecationWarning, source=self)
+
+    @property
+    def family(self):
+        return self._sock.family
+
+    @property
+    def type(self):
+        return self._sock.type
+
+    @property
+    def proto(self):
+        return self._sock.proto
+
+    def __repr__(self):
+        s = (
+            f"<asyncio.TransportSocket fd={self.fileno()}, "
+            f"family={self.family!s}, type={self.type!s}, "
+            f"proto={self.proto}"
+        )
+
+        if self.fileno() != -1:
+            try:
+                laddr = self.getsockname()
+                if laddr:
+                    s = f"{s}, laddr={laddr}"
+            except socket.error:
+                pass
+            try:
+                raddr = self.getpeername()
+                if raddr:
+                    s = f"{s}, raddr={raddr}"
+            except socket.error:
+                pass
+
+        return f"{s}>"
+
+    def __getstate__(self):
+        raise TypeError("Cannot serialize asyncio.TransportSocket object")
+
+    def fileno(self):
+        return self._sock.fileno()
+
+    def dup(self):
+        return self._sock.dup()
+
+    def get_inheritable(self):
+        return self._sock.get_inheritable()
+
+    def shutdown(self, how):
+        # asyncio doesn't currently provide a high-level transport API
+        # to shutdown the connection.
+        self._sock.shutdown(how)
+
+    def getsockopt(self, *args, **kwargs):
+        return self._sock.getsockopt(*args, **kwargs)
+
+    def setsockopt(self, *args, **kwargs):
+        self._sock.setsockopt(*args, **kwargs)
+
+    def getpeername(self):
+        return self._sock.getpeername()
+
+    def getsockname(self):
+        return self._sock.getsockname()
+
+    def getsockbyname(self):
+        return self._sock.getsockbyname()
+
+    def accept(self):
+        self._na('accept() method')
+        return self._sock.accept()
+
+    def connect(self, *args, **kwargs):
+        self._na('connect() method')
+        return self._sock.connect(*args, **kwargs)
+
+    def connect_ex(self, *args, **kwargs):
+        self._na('connect_ex() method')
+        return self._sock.connect_ex(*args, **kwargs)
+
+    def bind(self, *args, **kwargs):
+        self._na('bind() method')
+        return self._sock.bind(*args, **kwargs)
+
+    def ioctl(self, *args, **kwargs):
+        self._na('ioctl() method')
+        return self._sock.ioctl(*args, **kwargs)
+
+    def listen(self, *args, **kwargs):
+        self._na('listen() method')
+        return self._sock.listen(*args, **kwargs)
+
+    def makefile(self):
+        self._na('makefile() method')
+        return self._sock.makefile()
+
+    def sendfile(self, *args, **kwargs):
+        self._na('sendfile() method')
+        return self._sock.sendfile(*args, **kwargs)
+
+    def close(self):
+        self._na('close() method')
+        return self._sock.close()
+
+    def detach(self):
+        self._na('detach() method')
+        return self._sock.detach()
+
+    def sendmsg_afalg(self, *args, **kwargs):
+        self._na('sendmsg_afalg() method')
+        return self._sock.sendmsg_afalg(*args, **kwargs)
+
+    def sendmsg(self, *args, **kwargs):
+        self._na('sendmsg() method')
+        return self._sock.sendmsg(*args, **kwargs)
+
+    def sendto(self, *args, **kwargs):
+        self._na('sendto() method')
+        return self._sock.sendto(*args, **kwargs)
+
+    def send(self, *args, **kwargs):
+        self._na('send() method')
+        return self._sock.send(*args, **kwargs)
+
+    def sendall(self, *args, **kwargs):
+        self._na('sendall() method')
+        return self._sock.sendall(*args, **kwargs)
+
+    def set_inheritable(self, *args, **kwargs):
+        self._na('set_inheritable() method')
+        return self._sock.set_inheritable(*args, **kwargs)
+
+    def share(self, process_id):
+        self._na('share() method')
+        return self._sock.share(process_id)
+
+    def recv_into(self, *args, **kwargs):
+        self._na('recv_into() method')
+        return self._sock.recv_into(*args, **kwargs)
+
+    def recvfrom_into(self, *args, **kwargs):
+        self._na('recvfrom_into() method')
+        return self._sock.recvfrom_into(*args, **kwargs)
+
+    def recvmsg_into(self, *args, **kwargs):
+        self._na('recvmsg_into() method')
+        return self._sock.recvmsg_into(*args, **kwargs)
+
+    def recvmsg(self, *args, **kwargs):
+        self._na('recvmsg() method')
+        return self._sock.recvmsg(*args, **kwargs)
+
+    def recvfrom(self, *args, **kwargs):
+        self._na('recvfrom() method')
+        return self._sock.recvfrom(*args, **kwargs)
+
+    def recv(self, *args, **kwargs):
+        self._na('recv() method')
+        return self._sock.recv(*args, **kwargs)
+
+    def settimeout(self, value):
+        if value == 0:
+            return
+        raise ValueError(
+            'settimeout(): only 0 timeout is allowed on transport sockets')
+
+    def gettimeout(self):
+        return 0
+
+    def setblocking(self, flag):
+        if not flag:
+            return
+        raise ValueError(
+            'setblocking(): transport sockets cannot be blocking')
+
+    def __enter__(self):
+        self._na('context manager protocol')
+        return self._sock.__enter__()
+
+    def __exit__(self, *err):
+        self._na('context manager protocol')
+        return self._sock.__exit__(*err)
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/unix_events.py b/linux-x64/clang/python3/lib/python3.9/asyncio/unix_events.py
new file mode 100644
index 0000000..f34a5b4
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/unix_events.py
@@ -0,0 +1,1466 @@
+"""Selector event loop for Unix with signal handling."""
+
+import errno
+import io
+import itertools
+import os
+import selectors
+import signal
+import socket
+import stat
+import subprocess
+import sys
+import threading
+import warnings
+
+from . import base_events
+from . import base_subprocess
+from . import constants
+from . import coroutines
+from . import events
+from . import exceptions
+from . import futures
+from . import selector_events
+from . import tasks
+from . import transports
+from .log import logger
+
+
+__all__ = (
+    'SelectorEventLoop',
+    'AbstractChildWatcher', 'SafeChildWatcher',
+    'FastChildWatcher', 'PidfdChildWatcher',
+    'MultiLoopChildWatcher', 'ThreadedChildWatcher',
+    'DefaultEventLoopPolicy',
+)
+
+
+if sys.platform == 'win32':  # pragma: no cover
+    raise ImportError('Signals are not really supported on Windows')
+
+
+def _sighandler_noop(signum, frame):
+    """Dummy signal handler."""
+    pass
+
+
+class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
+    """Unix event loop.
+
+    Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
+    """
+
+    def __init__(self, selector=None):
+        super().__init__(selector)
+        self._signal_handlers = {}
+
+    def close(self):
+        super().close()
+        if not sys.is_finalizing():
+            for sig in list(self._signal_handlers):
+                self.remove_signal_handler(sig)
+        else:
+            if self._signal_handlers:
+                warnings.warn(f"Closing the loop {self!r} "
+                              f"on interpreter shutdown "
+                              f"stage, skipping signal handlers removal",
+                              ResourceWarning,
+                              source=self)
+                self._signal_handlers.clear()
+
+    def _process_self_data(self, data):
+        for signum in data:
+            if not signum:
+                # ignore null bytes written by _write_to_self()
+                continue
+            self._handle_signal(signum)
+
+    def add_signal_handler(self, sig, callback, *args):
+        """Add a handler for a signal.  UNIX only.
+
+        Raise ValueError if the signal number is invalid or uncatchable.
+        Raise RuntimeError if there is a problem setting up the handler.
+        """
+        if (coroutines.iscoroutine(callback) or
+                coroutines.iscoroutinefunction(callback)):
+            raise TypeError("coroutines cannot be used "
+                            "with add_signal_handler()")
+        self._check_signal(sig)
+        self._check_closed()
+        try:
+            # set_wakeup_fd() raises ValueError if this is not the
+            # main thread.  By calling it early we ensure that an
+            # event loop running in another thread cannot add a signal
+            # handler.
+            signal.set_wakeup_fd(self._csock.fileno())
+        except (ValueError, OSError) as exc:
+            raise RuntimeError(str(exc))
+
+        handle = events.Handle(callback, args, self, None)
+        self._signal_handlers[sig] = handle
+
+        try:
+            # Register a dummy signal handler to ask Python to write the signal
+            # number in the wakeup file descriptor. _process_self_data() will
+            # read signal numbers from this file descriptor to handle signals.
+            signal.signal(sig, _sighandler_noop)
+
+            # Set SA_RESTART to limit EINTR occurrences.
+            signal.siginterrupt(sig, False)
+        except OSError as exc:
+            del self._signal_handlers[sig]
+            if not self._signal_handlers:
+                try:
+                    signal.set_wakeup_fd(-1)
+                except (ValueError, OSError) as nexc:
+                    logger.info('set_wakeup_fd(-1) failed: %s', nexc)
+
+            if exc.errno == errno.EINVAL:
+                raise RuntimeError(f'sig {sig} cannot be caught')
+            else:
+                raise
+
+    def _handle_signal(self, sig):
+        """Internal helper that is the actual signal handler."""
+        handle = self._signal_handlers.get(sig)
+        if handle is None:
+            return  # Assume it's some race condition.
+        if handle._cancelled:
+            self.remove_signal_handler(sig)  # Remove it properly.
+        else:
+            self._add_callback_signalsafe(handle)
+
+    def remove_signal_handler(self, sig):
+        """Remove a handler for a signal.  UNIX only.
+
+        Return True if a signal handler was removed, False if not.
+        """
+        self._check_signal(sig)
+        try:
+            del self._signal_handlers[sig]
+        except KeyError:
+            return False
+
+        if sig == signal.SIGINT:
+            handler = signal.default_int_handler
+        else:
+            handler = signal.SIG_DFL
+
+        try:
+            signal.signal(sig, handler)
+        except OSError as exc:
+            if exc.errno == errno.EINVAL:
+                raise RuntimeError(f'sig {sig} cannot be caught')
+            else:
+                raise
+
+        if not self._signal_handlers:
+            try:
+                signal.set_wakeup_fd(-1)
+            except (ValueError, OSError) as exc:
+                logger.info('set_wakeup_fd(-1) failed: %s', exc)
+
+        return True
+
+    def _check_signal(self, sig):
+        """Internal helper to validate a signal.
+
+        Raise ValueError if the signal number is invalid or uncatchable.
+        Raise RuntimeError if there is a problem setting up the handler.
+        """
+        if not isinstance(sig, int):
+            raise TypeError(f'sig must be an int, not {sig!r}')
+
+        if sig not in signal.valid_signals():
+            raise ValueError(f'invalid signal number {sig}')
+
+    def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
+                                  extra=None):
+        return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
+
+    def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
+                                   extra=None):
+        return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
+
+    async def _make_subprocess_transport(self, protocol, args, shell,
+                                         stdin, stdout, stderr, bufsize,
+                                         extra=None, **kwargs):
+        with events.get_child_watcher() as watcher:
+            if not watcher.is_active():
+                # Check early.
+                # Raising exception before process creation
+                # prevents subprocess execution if the watcher
+                # is not ready to handle it.
+                raise RuntimeError("asyncio.get_child_watcher() is not activated, "
+                                   "subprocess support is not installed.")
+            waiter = self.create_future()
+            transp = _UnixSubprocessTransport(self, protocol, args, shell,
+                                              stdin, stdout, stderr, bufsize,
+                                              waiter=waiter, extra=extra,
+                                              **kwargs)
+
+            watcher.add_child_handler(transp.get_pid(),
+                                      self._child_watcher_callback, transp)
+            try:
+                await waiter
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException:
+                transp.close()
+                await transp._wait()
+                raise
+
+        return transp
+
+    def _child_watcher_callback(self, pid, returncode, transp):
+        self.call_soon_threadsafe(transp._process_exited, returncode)
+
+    async def create_unix_connection(
+            self, protocol_factory, path=None, *,
+            ssl=None, sock=None,
+            server_hostname=None,
+            ssl_handshake_timeout=None):
+        assert server_hostname is None or isinstance(server_hostname, str)
+        if ssl:
+            if server_hostname is None:
+                raise ValueError(
+                    'you have to pass server_hostname when using ssl')
+        else:
+            if server_hostname is not None:
+                raise ValueError('server_hostname is only meaningful with ssl')
+            if ssl_handshake_timeout is not None:
+                raise ValueError(
+                    'ssl_handshake_timeout is only meaningful with ssl')
+
+        if path is not None:
+            if sock is not None:
+                raise ValueError(
+                    'path and sock can not be specified at the same time')
+
+            path = os.fspath(path)
+            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+            try:
+                sock.setblocking(False)
+                await self.sock_connect(sock, path)
+            except:
+                sock.close()
+                raise
+
+        else:
+            if sock is None:
+                raise ValueError('no path and sock were specified')
+            if (sock.family != socket.AF_UNIX or
+                    sock.type != socket.SOCK_STREAM):
+                raise ValueError(
+                    f'A UNIX Domain Stream Socket was expected, got {sock!r}')
+            sock.setblocking(False)
+
+        transport, protocol = await self._create_connection_transport(
+            sock, protocol_factory, ssl, server_hostname,
+            ssl_handshake_timeout=ssl_handshake_timeout)
+        return transport, protocol
+
+    async def create_unix_server(
+            self, protocol_factory, path=None, *,
+            sock=None, backlog=100, ssl=None,
+            ssl_handshake_timeout=None,
+            start_serving=True):
+        if isinstance(ssl, bool):
+            raise TypeError('ssl argument must be an SSLContext or None')
+
+        if ssl_handshake_timeout is not None and not ssl:
+            raise ValueError(
+                'ssl_handshake_timeout is only meaningful with ssl')
+
+        if path is not None:
+            if sock is not None:
+                raise ValueError(
+                    'path and sock can not be specified at the same time')
+
+            path = os.fspath(path)
+            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+            # Check for abstract socket. `str` and `bytes` paths are supported.
+            if path[0] not in (0, '\x00'):
+                try:
+                    if stat.S_ISSOCK(os.stat(path).st_mode):
+                        os.remove(path)
+                except FileNotFoundError:
+                    pass
+                except OSError as err:
+                    # Directory may have permissions only to create socket.
+                    logger.error('Unable to check or remove stale UNIX socket '
+                                 '%r: %r', path, err)
+
+            try:
+                sock.bind(path)
+            except OSError as exc:
+                sock.close()
+                if exc.errno == errno.EADDRINUSE:
+                    # Let's improve the error message by adding
+                    # with what exact address it occurs.
+                    msg = f'Address {path!r} is already in use'
+                    raise OSError(errno.EADDRINUSE, msg) from None
+                else:
+                    raise
+            except:
+                sock.close()
+                raise
+        else:
+            if sock is None:
+                raise ValueError(
+                    'path was not specified, and no sock specified')
+
+            if (sock.family != socket.AF_UNIX or
+                    sock.type != socket.SOCK_STREAM):
+                raise ValueError(
+                    f'A UNIX Domain Stream Socket was expected, got {sock!r}')
+
+        sock.setblocking(False)
+        server = base_events.Server(self, [sock], protocol_factory,
+                                    ssl, backlog, ssl_handshake_timeout)
+        if start_serving:
+            server._start_serving()
+            # Skip one loop iteration so that all 'loop.add_reader'
+            # go through.
+            await tasks.sleep(0, loop=self)
+
+        return server
+
+    async def _sock_sendfile_native(self, sock, file, offset, count):
+        try:
+            os.sendfile
+        except AttributeError:
+            raise exceptions.SendfileNotAvailableError(
+                "os.sendfile() is not available")
+        try:
+            fileno = file.fileno()
+        except (AttributeError, io.UnsupportedOperation) as err:
+            raise exceptions.SendfileNotAvailableError("not a regular file")
+        try:
+            fsize = os.fstat(fileno).st_size
+        except OSError:
+            raise exceptions.SendfileNotAvailableError("not a regular file")
+        blocksize = count if count else fsize
+        if not blocksize:
+            return 0  # empty file
+
+        fut = self.create_future()
+        self._sock_sendfile_native_impl(fut, None, sock, fileno,
+                                        offset, count, blocksize, 0)
+        return await fut
+
+    def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno,
+                                   offset, count, blocksize, total_sent):
+        fd = sock.fileno()
+        if registered_fd is not None:
+            # Remove the callback early.  It should be rare that the
+            # selector says the fd is ready but the call still returns
+            # EAGAIN, and I am willing to take a hit in that case in
+            # order to simplify the common case.
+            self.remove_writer(registered_fd)
+        if fut.cancelled():
+            self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+            return
+        if count:
+            blocksize = count - total_sent
+            if blocksize <= 0:
+                self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+                fut.set_result(total_sent)
+                return
+
+        try:
+            sent = os.sendfile(fd, fileno, offset, blocksize)
+        except (BlockingIOError, InterruptedError):
+            if registered_fd is None:
+                self._sock_add_cancellation_callback(fut, sock)
+            self.add_writer(fd, self._sock_sendfile_native_impl, fut,
+                            fd, sock, fileno,
+                            offset, count, blocksize, total_sent)
+        except OSError as exc:
+            if (registered_fd is not None and
+                    exc.errno == errno.ENOTCONN and
+                    type(exc) is not ConnectionError):
+                # If we have an ENOTCONN and this isn't a first call to
+                # sendfile(), i.e. the connection was closed in the middle
+                # of the operation, normalize the error to ConnectionError
+                # to make it consistent across all Posix systems.
+                new_exc = ConnectionError(
+                    "socket is not connected", errno.ENOTCONN)
+                new_exc.__cause__ = exc
+                exc = new_exc
+            if total_sent == 0:
+                # We can get here for different reasons, the main
+                # one being 'file' is not a regular mmap(2)-like
+                # file, in which case we'll fall back on using
+                # plain send().
+                err = exceptions.SendfileNotAvailableError(
+                    "os.sendfile call failed")
+                self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+                fut.set_exception(err)
+            else:
+                self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+                fut.set_exception(exc)
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+            fut.set_exception(exc)
+        else:
+            if sent == 0:
+                # EOF
+                self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+                fut.set_result(total_sent)
+            else:
+                offset += sent
+                total_sent += sent
+                if registered_fd is None:
+                    self._sock_add_cancellation_callback(fut, sock)
+                self.add_writer(fd, self._sock_sendfile_native_impl, fut,
+                                fd, sock, fileno,
+                                offset, count, blocksize, total_sent)
+
+    def _sock_sendfile_update_filepos(self, fileno, offset, total_sent):
+        if total_sent > 0:
+            os.lseek(fileno, offset, os.SEEK_SET)
+
+    def _sock_add_cancellation_callback(self, fut, sock):
+        def cb(fut):
+            if fut.cancelled():
+                fd = sock.fileno()
+                if fd != -1:
+                    self.remove_writer(fd)
+        fut.add_done_callback(cb)
+
+
+class _UnixReadPipeTransport(transports.ReadTransport):
+
+    max_size = 256 * 1024  # max bytes we read in one event loop iteration
+
+    def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
+        super().__init__(extra)
+        self._extra['pipe'] = pipe
+        self._loop = loop
+        self._pipe = pipe
+        self._fileno = pipe.fileno()
+        self._protocol = protocol
+        self._closing = False
+        self._paused = False
+
+        mode = os.fstat(self._fileno).st_mode
+        if not (stat.S_ISFIFO(mode) or
+                stat.S_ISSOCK(mode) or
+                stat.S_ISCHR(mode)):
+            self._pipe = None
+            self._fileno = None
+            self._protocol = None
+            raise ValueError("Pipe transport is for pipes/sockets only.")
+
+        os.set_blocking(self._fileno, False)
+
+        self._loop.call_soon(self._protocol.connection_made, self)
+        # only start reading when connection_made() has been called
+        self._loop.call_soon(self._loop._add_reader,
+                             self._fileno, self._read_ready)
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(futures._set_result_unless_cancelled,
+                                 waiter, None)
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._pipe is None:
+            info.append('closed')
+        elif self._closing:
+            info.append('closing')
+        info.append(f'fd={self._fileno}')
+        selector = getattr(self._loop, '_selector', None)
+        if self._pipe is not None and selector is not None:
+            polling = selector_events._test_selector_event(
+                selector, self._fileno, selectors.EVENT_READ)
+            if polling:
+                info.append('polling')
+            else:
+                info.append('idle')
+        elif self._pipe is not None:
+            info.append('open')
+        else:
+            info.append('closed')
+        return '<{}>'.format(' '.join(info))
+
+    def _read_ready(self):
+        try:
+            data = os.read(self._fileno, self.max_size)
+        except (BlockingIOError, InterruptedError):
+            pass
+        except OSError as exc:
+            self._fatal_error(exc, 'Fatal read error on pipe transport')
+        else:
+            if data:
+                self._protocol.data_received(data)
+            else:
+                if self._loop.get_debug():
+                    logger.info("%r was closed by peer", self)
+                self._closing = True
+                self._loop._remove_reader(self._fileno)
+                self._loop.call_soon(self._protocol.eof_received)
+                self._loop.call_soon(self._call_connection_lost, None)
+
+    def pause_reading(self):
+        if self._closing or self._paused:
+            return
+        self._paused = True
+        self._loop._remove_reader(self._fileno)
+        if self._loop.get_debug():
+            logger.debug("%r pauses reading", self)
+
+    def resume_reading(self):
+        if self._closing or not self._paused:
+            return
+        self._paused = False
+        self._loop._add_reader(self._fileno, self._read_ready)
+        if self._loop.get_debug():
+            logger.debug("%r resumes reading", self)
+
+    def set_protocol(self, protocol):
+        self._protocol = protocol
+
+    def get_protocol(self):
+        return self._protocol
+
+    def is_closing(self):
+        return self._closing
+
+    def close(self):
+        if not self._closing:
+            self._close(None)
+
+    def __del__(self, _warn=warnings.warn):
+        if self._pipe is not None:
+            _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+            self._pipe.close()
+
+    def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+        # should be called by exception handler only
+        if (isinstance(exc, OSError) and exc.errno == errno.EIO):
+            if self._loop.get_debug():
+                logger.debug("%r: %s", self, message, exc_info=True)
+        else:
+            self._loop.call_exception_handler({
+                'message': message,
+                'exception': exc,
+                'transport': self,
+                'protocol': self._protocol,
+            })
+        self._close(exc)
+
+    def _close(self, exc):
+        self._closing = True
+        self._loop._remove_reader(self._fileno)
+        self._loop.call_soon(self._call_connection_lost, exc)
+
+    def _call_connection_lost(self, exc):
+        try:
+            self._protocol.connection_lost(exc)
+        finally:
+            self._pipe.close()
+            self._pipe = None
+            self._protocol = None
+            self._loop = None
+
+
+class _UnixWritePipeTransport(transports._FlowControlMixin,
+                              transports.WriteTransport):
+
+    def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
+        super().__init__(extra, loop)
+        self._extra['pipe'] = pipe
+        self._pipe = pipe
+        self._fileno = pipe.fileno()
+        self._protocol = protocol
+        self._buffer = bytearray()
+        self._conn_lost = 0
+        self._closing = False  # Set when close() or write_eof() called.
+
+        mode = os.fstat(self._fileno).st_mode
+        is_char = stat.S_ISCHR(mode)
+        is_fifo = stat.S_ISFIFO(mode)
+        is_socket = stat.S_ISSOCK(mode)
+        if not (is_char or is_fifo or is_socket):
+            self._pipe = None
+            self._fileno = None
+            self._protocol = None
+            raise ValueError("Pipe transport is only for "
+                             "pipes, sockets and character devices")
+
+        os.set_blocking(self._fileno, False)
+        self._loop.call_soon(self._protocol.connection_made, self)
+
+        # On AIX, the reader trick (to be notified when the read end of the
+        # socket is closed) only works for sockets. On other platforms it
+        # works for pipes and sockets. (Exception: OS X 10.4?  Issue #19294.)
+        if is_socket or (is_fifo and not sys.platform.startswith("aix")):
+            # only start reading when connection_made() has been called
+            self._loop.call_soon(self._loop._add_reader,
+                                 self._fileno, self._read_ready)
+
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(futures._set_result_unless_cancelled,
+                                 waiter, None)
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._pipe is None:
+            info.append('closed')
+        elif self._closing:
+            info.append('closing')
+        info.append(f'fd={self._fileno}')
+        selector = getattr(self._loop, '_selector', None)
+        if self._pipe is not None and selector is not None:
+            polling = selector_events._test_selector_event(
+                selector, self._fileno, selectors.EVENT_WRITE)
+            if polling:
+                info.append('polling')
+            else:
+                info.append('idle')
+
+            bufsize = self.get_write_buffer_size()
+            info.append(f'bufsize={bufsize}')
+        elif self._pipe is not None:
+            info.append('open')
+        else:
+            info.append('closed')
+        return '<{}>'.format(' '.join(info))
+
+    def get_write_buffer_size(self):
+        return len(self._buffer)
+
+    def _read_ready(self):
+        # Pipe was closed by peer.
+        if self._loop.get_debug():
+            logger.info("%r was closed by peer", self)
+        if self._buffer:
+            self._close(BrokenPipeError())
+        else:
+            self._close()
+
+    def write(self, data):
+        assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
+        if isinstance(data, bytearray):
+            data = memoryview(data)
+        if not data:
+            return
+
+        if self._conn_lost or self._closing:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('pipe closed by peer or '
+                               'os.write(pipe, data) raised exception.')
+            self._conn_lost += 1
+            return
+
+        if not self._buffer:
+            # Attempt to send it right away first.
+            try:
+                n = os.write(self._fileno, data)
+            except (BlockingIOError, InterruptedError):
+                n = 0
+            except (SystemExit, KeyboardInterrupt):
+                raise
+            except BaseException as exc:
+                self._conn_lost += 1
+                self._fatal_error(exc, 'Fatal write error on pipe transport')
+                return
+            if n == len(data):
+                return
+            elif n > 0:
+                data = memoryview(data)[n:]
+            self._loop._add_writer(self._fileno, self._write_ready)
+
+        self._buffer += data
+        self._maybe_pause_protocol()
+
+    def _write_ready(self):
+        assert self._buffer, 'Data should not be empty'
+
+        try:
+            n = os.write(self._fileno, self._buffer)
+        except (BlockingIOError, InterruptedError):
+            pass
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            self._buffer.clear()
+            self._conn_lost += 1
+            # Remove writer here, _fatal_error() doesn't it
+            # because _buffer is empty.
+            self._loop._remove_writer(self._fileno)
+            self._fatal_error(exc, 'Fatal write error on pipe transport')
+        else:
+            if n == len(self._buffer):
+                self._buffer.clear()
+                self._loop._remove_writer(self._fileno)
+                self._maybe_resume_protocol()  # May append to buffer.
+                if self._closing:
+                    self._loop._remove_reader(self._fileno)
+                    self._call_connection_lost(None)
+                return
+            elif n > 0:
+                del self._buffer[:n]
+
+    def can_write_eof(self):
+        return True
+
+    def write_eof(self):
+        if self._closing:
+            return
+        assert self._pipe
+        self._closing = True
+        if not self._buffer:
+            self._loop._remove_reader(self._fileno)
+            self._loop.call_soon(self._call_connection_lost, None)
+
+    def set_protocol(self, protocol):
+        self._protocol = protocol
+
+    def get_protocol(self):
+        return self._protocol
+
+    def is_closing(self):
+        return self._closing
+
+    def close(self):
+        if self._pipe is not None and not self._closing:
+            # write_eof is all what we needed to close the write pipe
+            self.write_eof()
+
+    def __del__(self, _warn=warnings.warn):
+        if self._pipe is not None:
+            _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+            self._pipe.close()
+
+    def abort(self):
+        self._close(None)
+
+    def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+        # should be called by exception handler only
+        if isinstance(exc, OSError):
+            if self._loop.get_debug():
+                logger.debug("%r: %s", self, message, exc_info=True)
+        else:
+            self._loop.call_exception_handler({
+                'message': message,
+                'exception': exc,
+                'transport': self,
+                'protocol': self._protocol,
+            })
+        self._close(exc)
+
+    def _close(self, exc=None):
+        self._closing = True
+        if self._buffer:
+            self._loop._remove_writer(self._fileno)
+        self._buffer.clear()
+        self._loop._remove_reader(self._fileno)
+        self._loop.call_soon(self._call_connection_lost, exc)
+
+    def _call_connection_lost(self, exc):
+        try:
+            self._protocol.connection_lost(exc)
+        finally:
+            self._pipe.close()
+            self._pipe = None
+            self._protocol = None
+            self._loop = None
+
+
+class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
+
+    def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+        stdin_w = None
+        if stdin == subprocess.PIPE:
+            # Use a socket pair for stdin, since not all platforms
+            # support selecting read events on the write end of a
+            # socket (which we use in order to detect closing of the
+            # other end).  Notably this is needed on AIX, and works
+            # just fine on other platforms.
+            stdin, stdin_w = socket.socketpair()
+        try:
+            self._proc = subprocess.Popen(
+                args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
+                universal_newlines=False, bufsize=bufsize, **kwargs)
+            if stdin_w is not None:
+                stdin.close()
+                self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
+                stdin_w = None
+        finally:
+            if stdin_w is not None:
+                stdin.close()
+                stdin_w.close()
+
+
+class AbstractChildWatcher:
+    """Abstract base class for monitoring child processes.
+
+    Objects derived from this class monitor a collection of subprocesses and
+    report their termination or interruption by a signal.
+
+    New callbacks are registered with .add_child_handler(). Starting a new
+    process must be done within a 'with' block to allow the watcher to suspend
+    its activity until the new process if fully registered (this is needed to
+    prevent a race condition in some implementations).
+
+    Example:
+        with watcher:
+            proc = subprocess.Popen("sleep 1")
+            watcher.add_child_handler(proc.pid, callback)
+
+    Notes:
+        Implementations of this class must be thread-safe.
+
+        Since child watcher objects may catch the SIGCHLD signal and call
+        waitpid(-1), there should be only one active object per process.
+    """
+
+    def add_child_handler(self, pid, callback, *args):
+        """Register a new child handler.
+
+        Arrange for callback(pid, returncode, *args) to be called when
+        process 'pid' terminates. Specifying another callback for the same
+        process replaces the previous handler.
+
+        Note: callback() must be thread-safe.
+        """
+        raise NotImplementedError()
+
+    def remove_child_handler(self, pid):
+        """Removes the handler for process 'pid'.
+
+        The function returns True if the handler was successfully removed,
+        False if there was nothing to remove."""
+
+        raise NotImplementedError()
+
+    def attach_loop(self, loop):
+        """Attach the watcher to an event loop.
+
+        If the watcher was previously attached to an event loop, then it is
+        first detached before attaching to the new loop.
+
+        Note: loop may be None.
+        """
+        raise NotImplementedError()
+
+    def close(self):
+        """Close the watcher.
+
+        This must be called to make sure that any underlying resource is freed.
+        """
+        raise NotImplementedError()
+
+    def is_active(self):
+        """Return ``True`` if the watcher is active and is used by the event loop.
+
+        Return True if the watcher is installed and ready to handle process exit
+        notifications.
+
+        """
+        raise NotImplementedError()
+
+    def __enter__(self):
+        """Enter the watcher's context and allow starting new processes
+
+        This function must return self"""
+        raise NotImplementedError()
+
+    def __exit__(self, a, b, c):
+        """Exit the watcher's context"""
+        raise NotImplementedError()
+
+
+class PidfdChildWatcher(AbstractChildWatcher):
+    """Child watcher implementation using Linux's pid file descriptors.
+
+    This child watcher polls process file descriptors (pidfds) to await child
+    process termination. In some respects, PidfdChildWatcher is a "Goldilocks"
+    child watcher implementation. It doesn't require signals or threads, doesn't
+    interfere with any processes launched outside the event loop, and scales
+    linearly with the number of subprocesses launched by the event loop. The
+    main disadvantage is that pidfds are specific to Linux, and only work on
+    recent (5.3+) kernels.
+    """
+
+    def __init__(self):
+        self._loop = None
+        self._callbacks = {}
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_traceback):
+        pass
+
+    def is_active(self):
+        return self._loop is not None and self._loop.is_running()
+
+    def close(self):
+        self.attach_loop(None)
+
+    def attach_loop(self, loop):
+        if self._loop is not None and loop is None and self._callbacks:
+            warnings.warn(
+                'A loop is being detached '
+                'from a child watcher with pending handlers',
+                RuntimeWarning)
+        for pidfd, _, _ in self._callbacks.values():
+            self._loop._remove_reader(pidfd)
+            os.close(pidfd)
+        self._callbacks.clear()
+        self._loop = loop
+
+    def add_child_handler(self, pid, callback, *args):
+        existing = self._callbacks.get(pid)
+        if existing is not None:
+            self._callbacks[pid] = existing[0], callback, args
+        else:
+            pidfd = os.pidfd_open(pid)
+            self._loop._add_reader(pidfd, self._do_wait, pid)
+            self._callbacks[pid] = pidfd, callback, args
+
+    def _do_wait(self, pid):
+        pidfd, callback, args = self._callbacks.pop(pid)
+        self._loop._remove_reader(pidfd)
+        try:
+            _, status = os.waitpid(pid, 0)
+        except ChildProcessError:
+            # The child process is already reaped
+            # (may happen if waitpid() is called elsewhere).
+            returncode = 255
+            logger.warning(
+                "child process pid %d exit status already read: "
+                " will report returncode 255",
+                pid)
+        else:
+            returncode = _compute_returncode(status)
+
+        os.close(pidfd)
+        callback(pid, returncode, *args)
+
+    def remove_child_handler(self, pid):
+        try:
+            pidfd, _, _ = self._callbacks.pop(pid)
+        except KeyError:
+            return False
+        self._loop._remove_reader(pidfd)
+        os.close(pidfd)
+        return True
+
+
+def _compute_returncode(status):
+    if os.WIFSIGNALED(status):
+        # The child process died because of a signal.
+        return -os.WTERMSIG(status)
+    elif os.WIFEXITED(status):
+        # The child process exited (e.g sys.exit()).
+        return os.WEXITSTATUS(status)
+    else:
+        # The child exited, but we don't understand its status.
+        # This shouldn't happen, but if it does, let's just
+        # return that status; perhaps that helps debug it.
+        return status
+
+
+class BaseChildWatcher(AbstractChildWatcher):
+
+    def __init__(self):
+        self._loop = None
+        self._callbacks = {}
+
+    def close(self):
+        self.attach_loop(None)
+
+    def is_active(self):
+        return self._loop is not None and self._loop.is_running()
+
+    def _do_waitpid(self, expected_pid):
+        raise NotImplementedError()
+
+    def _do_waitpid_all(self):
+        raise NotImplementedError()
+
+    def attach_loop(self, loop):
+        assert loop is None or isinstance(loop, events.AbstractEventLoop)
+
+        if self._loop is not None and loop is None and self._callbacks:
+            warnings.warn(
+                'A loop is being detached '
+                'from a child watcher with pending handlers',
+                RuntimeWarning)
+
+        if self._loop is not None:
+            self._loop.remove_signal_handler(signal.SIGCHLD)
+
+        self._loop = loop
+        if loop is not None:
+            loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
+
+            # Prevent a race condition in case a child terminated
+            # during the switch.
+            self._do_waitpid_all()
+
+    def _sig_chld(self):
+        try:
+            self._do_waitpid_all()
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException as exc:
+            # self._loop should always be available here
+            # as '_sig_chld' is added as a signal handler
+            # in 'attach_loop'
+            self._loop.call_exception_handler({
+                'message': 'Unknown exception in SIGCHLD handler',
+                'exception': exc,
+            })
+
+
+class SafeChildWatcher(BaseChildWatcher):
+    """'Safe' child watcher implementation.
+
+    This implementation avoids disrupting other code spawning processes by
+    polling explicitly each process in the SIGCHLD handler instead of calling
+    os.waitpid(-1).
+
+    This is a safe solution but it has a significant overhead when handling a
+    big number of children (O(n) each time SIGCHLD is raised)
+    """
+
+    def close(self):
+        self._callbacks.clear()
+        super().close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, a, b, c):
+        pass
+
+    def add_child_handler(self, pid, callback, *args):
+        self._callbacks[pid] = (callback, args)
+
+        # Prevent a race condition in case the child is already terminated.
+        self._do_waitpid(pid)
+
+    def remove_child_handler(self, pid):
+        try:
+            del self._callbacks[pid]
+            return True
+        except KeyError:
+            return False
+
+    def _do_waitpid_all(self):
+
+        for pid in list(self._callbacks):
+            self._do_waitpid(pid)
+
+    def _do_waitpid(self, expected_pid):
+        assert expected_pid > 0
+
+        try:
+            pid, status = os.waitpid(expected_pid, os.WNOHANG)
+        except ChildProcessError:
+            # The child process is already reaped
+            # (may happen if waitpid() is called elsewhere).
+            pid = expected_pid
+            returncode = 255
+            logger.warning(
+                "Unknown child process pid %d, will report returncode 255",
+                pid)
+        else:
+            if pid == 0:
+                # The child process is still alive.
+                return
+
+            returncode = _compute_returncode(status)
+            if self._loop.get_debug():
+                logger.debug('process %s exited with returncode %s',
+                             expected_pid, returncode)
+
+        try:
+            callback, args = self._callbacks.pop(pid)
+        except KeyError:  # pragma: no cover
+            # May happen if .remove_child_handler() is called
+            # after os.waitpid() returns.
+            if self._loop.get_debug():
+                logger.warning("Child watcher got an unexpected pid: %r",
+                               pid, exc_info=True)
+        else:
+            callback(pid, returncode, *args)
+
+
+class FastChildWatcher(BaseChildWatcher):
+    """'Fast' child watcher implementation.
+
+    This implementation reaps every terminated processes by calling
+    os.waitpid(-1) directly, possibly breaking other code spawning processes
+    and waiting for their termination.
+
+    There is no noticeable overhead when handling a big number of children
+    (O(1) each time a child terminates).
+    """
+    def __init__(self):
+        super().__init__()
+        self._lock = threading.Lock()
+        self._zombies = {}
+        self._forks = 0
+
+    def close(self):
+        self._callbacks.clear()
+        self._zombies.clear()
+        super().close()
+
+    def __enter__(self):
+        with self._lock:
+            self._forks += 1
+
+            return self
+
+    def __exit__(self, a, b, c):
+        with self._lock:
+            self._forks -= 1
+
+            if self._forks or not self._zombies:
+                return
+
+            collateral_victims = str(self._zombies)
+            self._zombies.clear()
+
+        logger.warning(
+            "Caught subprocesses termination from unknown pids: %s",
+            collateral_victims)
+
+    def add_child_handler(self, pid, callback, *args):
+        assert self._forks, "Must use the context manager"
+
+        with self._lock:
+            try:
+                returncode = self._zombies.pop(pid)
+            except KeyError:
+                # The child is running.
+                self._callbacks[pid] = callback, args
+                return
+
+        # The child is dead already. We can fire the callback.
+        callback(pid, returncode, *args)
+
+    def remove_child_handler(self, pid):
+        try:
+            del self._callbacks[pid]
+            return True
+        except KeyError:
+            return False
+
+    def _do_waitpid_all(self):
+        # Because of signal coalescing, we must keep calling waitpid() as
+        # long as we're able to reap a child.
+        while True:
+            try:
+                pid, status = os.waitpid(-1, os.WNOHANG)
+            except ChildProcessError:
+                # No more child processes exist.
+                return
+            else:
+                if pid == 0:
+                    # A child process is still alive.
+                    return
+
+                returncode = _compute_returncode(status)
+
+            with self._lock:
+                try:
+                    callback, args = self._callbacks.pop(pid)
+                except KeyError:
+                    # unknown child
+                    if self._forks:
+                        # It may not be registered yet.
+                        self._zombies[pid] = returncode
+                        if self._loop.get_debug():
+                            logger.debug('unknown process %s exited '
+                                         'with returncode %s',
+                                         pid, returncode)
+                        continue
+                    callback = None
+                else:
+                    if self._loop.get_debug():
+                        logger.debug('process %s exited with returncode %s',
+                                     pid, returncode)
+
+            if callback is None:
+                logger.warning(
+                    "Caught subprocess termination from unknown pid: "
+                    "%d -> %d", pid, returncode)
+            else:
+                callback(pid, returncode, *args)
+
+
+class MultiLoopChildWatcher(AbstractChildWatcher):
+    """A watcher that doesn't require running loop in the main thread.
+
+    This implementation registers a SIGCHLD signal handler on
+    instantiation (which may conflict with other code that
+    install own handler for this signal).
+
+    The solution is safe but it has a significant overhead when
+    handling a big number of processes (*O(n)* each time a
+    SIGCHLD is received).
+    """
+
+    # Implementation note:
+    # The class keeps compatibility with AbstractChildWatcher ABC
+    # To achieve this it has empty attach_loop() method
+    # and doesn't accept explicit loop argument
+    # for add_child_handler()/remove_child_handler()
+    # but retrieves the current loop by get_running_loop()
+
+    def __init__(self):
+        self._callbacks = {}
+        self._saved_sighandler = None
+
+    def is_active(self):
+        return self._saved_sighandler is not None
+
+    def close(self):
+        self._callbacks.clear()
+        if self._saved_sighandler is not None:
+            handler = signal.getsignal(signal.SIGCHLD)
+            if handler != self._sig_chld:
+                logger.warning("SIGCHLD handler was changed by outside code")
+            else:
+                signal.signal(signal.SIGCHLD, self._saved_sighandler)
+            self._saved_sighandler = None
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        pass
+
+    def add_child_handler(self, pid, callback, *args):
+        loop = events.get_running_loop()
+        self._callbacks[pid] = (loop, callback, args)
+
+        # Prevent a race condition in case the child is already terminated.
+        self._do_waitpid(pid)
+
+    def remove_child_handler(self, pid):
+        try:
+            del self._callbacks[pid]
+            return True
+        except KeyError:
+            return False
+
+    def attach_loop(self, loop):
+        # Don't save the loop but initialize itself if called first time
+        # The reason to do it here is that attach_loop() is called from
+        # unix policy only for the main thread.
+        # Main thread is required for subscription on SIGCHLD signal
+        if self._saved_sighandler is None:
+            self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld)
+            if self._saved_sighandler is None:
+                logger.warning("Previous SIGCHLD handler was set by non-Python code, "
+                               "restore to default handler on watcher close.")
+                self._saved_sighandler = signal.SIG_DFL
+
+            # Set SA_RESTART to limit EINTR occurrences.
+            signal.siginterrupt(signal.SIGCHLD, False)
+
+    def _do_waitpid_all(self):
+        for pid in list(self._callbacks):
+            self._do_waitpid(pid)
+
+    def _do_waitpid(self, expected_pid):
+        assert expected_pid > 0
+
+        try:
+            pid, status = os.waitpid(expected_pid, os.WNOHANG)
+        except ChildProcessError:
+            # The child process is already reaped
+            # (may happen if waitpid() is called elsewhere).
+            pid = expected_pid
+            returncode = 255
+            logger.warning(
+                "Unknown child process pid %d, will report returncode 255",
+                pid)
+            debug_log = False
+        else:
+            if pid == 0:
+                # The child process is still alive.
+                return
+
+            returncode = _compute_returncode(status)
+            debug_log = True
+        try:
+            loop, callback, args = self._callbacks.pop(pid)
+        except KeyError:  # pragma: no cover
+            # May happen if .remove_child_handler() is called
+            # after os.waitpid() returns.
+            logger.warning("Child watcher got an unexpected pid: %r",
+                           pid, exc_info=True)
+        else:
+            if loop.is_closed():
+                logger.warning("Loop %r that handles pid %r is closed", loop, pid)
+            else:
+                if debug_log and loop.get_debug():
+                    logger.debug('process %s exited with returncode %s',
+                                 expected_pid, returncode)
+                loop.call_soon_threadsafe(callback, pid, returncode, *args)
+
+    def _sig_chld(self, signum, frame):
+        try:
+            self._do_waitpid_all()
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException:
+            logger.warning('Unknown exception in SIGCHLD handler', exc_info=True)
+
+
+class ThreadedChildWatcher(AbstractChildWatcher):
+    """Threaded child watcher implementation.
+
+    The watcher uses a thread per process
+    for waiting for the process finish.
+
+    It doesn't require subscription on POSIX signal
+    but a thread creation is not free.
+
+    The watcher has O(1) complexity, its performance doesn't depend
+    on amount of spawn processes.
+    """
+
+    def __init__(self):
+        self._pid_counter = itertools.count(0)
+        self._threads = {}
+
+    def is_active(self):
+        return True
+
+    def close(self):
+        self._join_threads()
+
+    def _join_threads(self):
+        """Internal: Join all non-daemon threads"""
+        threads = [thread for thread in list(self._threads.values())
+                   if thread.is_alive() and not thread.daemon]
+        for thread in threads:
+            thread.join()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        pass
+
+    def __del__(self, _warn=warnings.warn):
+        threads = [thread for thread in list(self._threads.values())
+                   if thread.is_alive()]
+        if threads:
+            _warn(f"{self.__class__} has registered but not finished child processes",
+                  ResourceWarning,
+                  source=self)
+
+    def add_child_handler(self, pid, callback, *args):
+        loop = events.get_running_loop()
+        thread = threading.Thread(target=self._do_waitpid,
+                                  name=f"waitpid-{next(self._pid_counter)}",
+                                  args=(loop, pid, callback, args),
+                                  daemon=True)
+        self._threads[pid] = thread
+        thread.start()
+
+    def remove_child_handler(self, pid):
+        # asyncio never calls remove_child_handler() !!!
+        # The method is no-op but is implemented because
+        # abstract base classe requires it
+        return True
+
+    def attach_loop(self, loop):
+        pass
+
+    def _do_waitpid(self, loop, expected_pid, callback, args):
+        assert expected_pid > 0
+
+        try:
+            pid, status = os.waitpid(expected_pid, 0)
+        except ChildProcessError:
+            # The child process is already reaped
+            # (may happen if waitpid() is called elsewhere).
+            pid = expected_pid
+            returncode = 255
+            logger.warning(
+                "Unknown child process pid %d, will report returncode 255",
+                pid)
+        else:
+            returncode = _compute_returncode(status)
+            if loop.get_debug():
+                logger.debug('process %s exited with returncode %s',
+                             expected_pid, returncode)
+
+        if loop.is_closed():
+            logger.warning("Loop %r that handles pid %r is closed", loop, pid)
+        else:
+            loop.call_soon_threadsafe(callback, pid, returncode, *args)
+
+        self._threads.pop(expected_pid)
+
+
+class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
+    """UNIX event loop policy with a watcher for child processes."""
+    _loop_factory = _UnixSelectorEventLoop
+
+    def __init__(self):
+        super().__init__()
+        self._watcher = None
+
+    def _init_watcher(self):
+        with events._lock:
+            if self._watcher is None:  # pragma: no branch
+                self._watcher = ThreadedChildWatcher()
+                if threading.current_thread() is threading.main_thread():
+                    self._watcher.attach_loop(self._local._loop)
+
+    def set_event_loop(self, loop):
+        """Set the event loop.
+
+        As a side effect, if a child watcher was set before, then calling
+        .set_event_loop() from the main thread will call .attach_loop(loop) on
+        the child watcher.
+        """
+
+        super().set_event_loop(loop)
+
+        if (self._watcher is not None and
+                threading.current_thread() is threading.main_thread()):
+            self._watcher.attach_loop(loop)
+
+    def get_child_watcher(self):
+        """Get the watcher for child processes.
+
+        If not yet set, a ThreadedChildWatcher object is automatically created.
+        """
+        if self._watcher is None:
+            self._init_watcher()
+
+        return self._watcher
+
+    def set_child_watcher(self, watcher):
+        """Set the watcher for child processes."""
+
+        assert watcher is None or isinstance(watcher, AbstractChildWatcher)
+
+        if self._watcher is not None:
+            self._watcher.close()
+
+        self._watcher = watcher
+
+
+SelectorEventLoop = _UnixSelectorEventLoop
+DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/windows_events.py b/linux-x64/clang/python3/lib/python3.9/asyncio/windows_events.py
new file mode 100644
index 0000000..5e7cd79
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/windows_events.py
@@ -0,0 +1,908 @@
+"""Selector and proactor event loops for Windows."""
+
+import _overlapped
+import _winapi
+import errno
+import math
+import msvcrt
+import socket
+import struct
+import time
+import weakref
+
+from . import events
+from . import base_subprocess
+from . import futures
+from . import exceptions
+from . import proactor_events
+from . import selector_events
+from . import tasks
+from . import windows_utils
+from .log import logger
+
+
+__all__ = (
+    'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
+    'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy',
+    'WindowsProactorEventLoopPolicy',
+)
+
+
+NULL = 0
+INFINITE = 0xffffffff
+ERROR_CONNECTION_REFUSED = 1225
+ERROR_CONNECTION_ABORTED = 1236
+
+# Initial delay in seconds for connect_pipe() before retrying to connect
+CONNECT_PIPE_INIT_DELAY = 0.001
+
+# Maximum delay in seconds for connect_pipe() before retrying to connect
+CONNECT_PIPE_MAX_DELAY = 0.100
+
+
+class _OverlappedFuture(futures.Future):
+    """Subclass of Future which represents an overlapped operation.
+
+    Cancelling it will immediately cancel the overlapped operation.
+    """
+
+    def __init__(self, ov, *, loop=None):
+        super().__init__(loop=loop)
+        if self._source_traceback:
+            del self._source_traceback[-1]
+        self._ov = ov
+
+    def _repr_info(self):
+        info = super()._repr_info()
+        if self._ov is not None:
+            state = 'pending' if self._ov.pending else 'completed'
+            info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>')
+        return info
+
+    def _cancel_overlapped(self):
+        if self._ov is None:
+            return
+        try:
+            self._ov.cancel()
+        except OSError as exc:
+            context = {
+                'message': 'Cancelling an overlapped future failed',
+                'exception': exc,
+                'future': self,
+            }
+            if self._source_traceback:
+                context['source_traceback'] = self._source_traceback
+            self._loop.call_exception_handler(context)
+        self._ov = None
+
+    def cancel(self, msg=None):
+        self._cancel_overlapped()
+        return super().cancel(msg=msg)
+
+    def set_exception(self, exception):
+        super().set_exception(exception)
+        self._cancel_overlapped()
+
+    def set_result(self, result):
+        super().set_result(result)
+        self._ov = None
+
+
+class _BaseWaitHandleFuture(futures.Future):
+    """Subclass of Future which represents a wait handle."""
+
+    def __init__(self, ov, handle, wait_handle, *, loop=None):
+        super().__init__(loop=loop)
+        if self._source_traceback:
+            del self._source_traceback[-1]
+        # Keep a reference to the Overlapped object to keep it alive until the
+        # wait is unregistered
+        self._ov = ov
+        self._handle = handle
+        self._wait_handle = wait_handle
+
+        # Should we call UnregisterWaitEx() if the wait completes
+        # or is cancelled?
+        self._registered = True
+
+    def _poll(self):
+        # non-blocking wait: use a timeout of 0 millisecond
+        return (_winapi.WaitForSingleObject(self._handle, 0) ==
+                _winapi.WAIT_OBJECT_0)
+
+    def _repr_info(self):
+        info = super()._repr_info()
+        info.append(f'handle={self._handle:#x}')
+        if self._handle is not None:
+            state = 'signaled' if self._poll() else 'waiting'
+            info.append(state)
+        if self._wait_handle is not None:
+            info.append(f'wait_handle={self._wait_handle:#x}')
+        return info
+
+    def _unregister_wait_cb(self, fut):
+        # The wait was unregistered: it's not safe to destroy the Overlapped
+        # object
+        self._ov = None
+
+    def _unregister_wait(self):
+        if not self._registered:
+            return
+        self._registered = False
+
+        wait_handle = self._wait_handle
+        self._wait_handle = None
+        try:
+            _overlapped.UnregisterWait(wait_handle)
+        except OSError as exc:
+            if exc.winerror != _overlapped.ERROR_IO_PENDING:
+                context = {
+                    'message': 'Failed to unregister the wait handle',
+                    'exception': exc,
+                    'future': self,
+                }
+                if self._source_traceback:
+                    context['source_traceback'] = self._source_traceback
+                self._loop.call_exception_handler(context)
+                return
+            # ERROR_IO_PENDING means that the unregister is pending
+
+        self._unregister_wait_cb(None)
+
+    def cancel(self, msg=None):
+        self._unregister_wait()
+        return super().cancel(msg=msg)
+
+    def set_exception(self, exception):
+        self._unregister_wait()
+        super().set_exception(exception)
+
+    def set_result(self, result):
+        self._unregister_wait()
+        super().set_result(result)
+
+
+class _WaitCancelFuture(_BaseWaitHandleFuture):
+    """Subclass of Future which represents a wait for the cancellation of a
+    _WaitHandleFuture using an event.
+    """
+
+    def __init__(self, ov, event, wait_handle, *, loop=None):
+        super().__init__(ov, event, wait_handle, loop=loop)
+
+        self._done_callback = None
+
+    def cancel(self):
+        raise RuntimeError("_WaitCancelFuture must not be cancelled")
+
+    def set_result(self, result):
+        super().set_result(result)
+        if self._done_callback is not None:
+            self._done_callback(self)
+
+    def set_exception(self, exception):
+        super().set_exception(exception)
+        if self._done_callback is not None:
+            self._done_callback(self)
+
+
+class _WaitHandleFuture(_BaseWaitHandleFuture):
+    def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):
+        super().__init__(ov, handle, wait_handle, loop=loop)
+        self._proactor = proactor
+        self._unregister_proactor = True
+        self._event = _overlapped.CreateEvent(None, True, False, None)
+        self._event_fut = None
+
+    def _unregister_wait_cb(self, fut):
+        if self._event is not None:
+            _winapi.CloseHandle(self._event)
+            self._event = None
+            self._event_fut = None
+
+        # If the wait was cancelled, the wait may never be signalled, so
+        # it's required to unregister it. Otherwise, IocpProactor.close() will
+        # wait forever for an event which will never come.
+        #
+        # If the IocpProactor already received the event, it's safe to call
+        # _unregister() because we kept a reference to the Overlapped object
+        # which is used as a unique key.
+        self._proactor._unregister(self._ov)
+        self._proactor = None
+
+        super()._unregister_wait_cb(fut)
+
+    def _unregister_wait(self):
+        if not self._registered:
+            return
+        self._registered = False
+
+        wait_handle = self._wait_handle
+        self._wait_handle = None
+        try:
+            _overlapped.UnregisterWaitEx(wait_handle, self._event)
+        except OSError as exc:
+            if exc.winerror != _overlapped.ERROR_IO_PENDING:
+                context = {
+                    'message': 'Failed to unregister the wait handle',
+                    'exception': exc,
+                    'future': self,
+                }
+                if self._source_traceback:
+                    context['source_traceback'] = self._source_traceback
+                self._loop.call_exception_handler(context)
+                return
+            # ERROR_IO_PENDING is not an error, the wait was unregistered
+
+        self._event_fut = self._proactor._wait_cancel(self._event,
+                                                      self._unregister_wait_cb)
+
+
+class PipeServer(object):
+    """Class representing a pipe server.
+
+    This is much like a bound, listening socket.
+    """
+    def __init__(self, address):
+        self._address = address
+        self._free_instances = weakref.WeakSet()
+        # initialize the pipe attribute before calling _server_pipe_handle()
+        # because this function can raise an exception and the destructor calls
+        # the close() method
+        self._pipe = None
+        self._accept_pipe_future = None
+        self._pipe = self._server_pipe_handle(True)
+
+    def _get_unconnected_pipe(self):
+        # Create new instance and return previous one.  This ensures
+        # that (until the server is closed) there is always at least
+        # one pipe handle for address.  Therefore if a client attempt
+        # to connect it will not fail with FileNotFoundError.
+        tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
+        return tmp
+
+    def _server_pipe_handle(self, first):
+        # Return a wrapper for a new pipe handle.
+        if self.closed():
+            return None
+        flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
+        if first:
+            flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
+        h = _winapi.CreateNamedPipe(
+            self._address, flags,
+            _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
+            _winapi.PIPE_WAIT,
+            _winapi.PIPE_UNLIMITED_INSTANCES,
+            windows_utils.BUFSIZE, windows_utils.BUFSIZE,
+            _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
+        pipe = windows_utils.PipeHandle(h)
+        self._free_instances.add(pipe)
+        return pipe
+
+    def closed(self):
+        return (self._address is None)
+
+    def close(self):
+        if self._accept_pipe_future is not None:
+            self._accept_pipe_future.cancel()
+            self._accept_pipe_future = None
+        # Close all instances which have not been connected to by a client.
+        if self._address is not None:
+            for pipe in self._free_instances:
+                pipe.close()
+            self._pipe = None
+            self._address = None
+            self._free_instances.clear()
+
+    __del__ = close
+
+
+class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
+    """Windows version of selector event loop."""
+
+
+class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
+    """Windows version of proactor event loop using IOCP."""
+
+    def __init__(self, proactor=None):
+        if proactor is None:
+            proactor = IocpProactor()
+        super().__init__(proactor)
+
+    def run_forever(self):
+        try:
+            assert self._self_reading_future is None
+            self.call_soon(self._loop_self_reading)
+            super().run_forever()
+        finally:
+            if self._self_reading_future is not None:
+                ov = self._self_reading_future._ov
+                self._self_reading_future.cancel()
+                # self_reading_future was just cancelled so if it hasn't been
+                # finished yet, it never will be (it's possible that it has
+                # already finished and its callback is waiting in the queue,
+                # where it could still happen if the event loop is restarted).
+                # Unregister it otherwise IocpProactor.close will wait for it
+                # forever
+                if ov is not None:
+                    self._proactor._unregister(ov)
+                self._self_reading_future = None
+
+    async def create_pipe_connection(self, protocol_factory, address):
+        f = self._proactor.connect_pipe(address)
+        pipe = await f
+        protocol = protocol_factory()
+        trans = self._make_duplex_pipe_transport(pipe, protocol,
+                                                 extra={'addr': address})
+        return trans, protocol
+
+    async def start_serving_pipe(self, protocol_factory, address):
+        server = PipeServer(address)
+
+        def loop_accept_pipe(f=None):
+            pipe = None
+            try:
+                if f:
+                    pipe = f.result()
+                    server._free_instances.discard(pipe)
+
+                    if server.closed():
+                        # A client connected before the server was closed:
+                        # drop the client (close the pipe) and exit
+                        pipe.close()
+                        return
+
+                    protocol = protocol_factory()
+                    self._make_duplex_pipe_transport(
+                        pipe, protocol, extra={'addr': address})
+
+                pipe = server._get_unconnected_pipe()
+                if pipe is None:
+                    return
+
+                f = self._proactor.accept_pipe(pipe)
+            except OSError as exc:
+                if pipe and pipe.fileno() != -1:
+                    self.call_exception_handler({
+                        'message': 'Pipe accept failed',
+                        'exception': exc,
+                        'pipe': pipe,
+                    })
+                    pipe.close()
+                elif self._debug:
+                    logger.warning("Accept pipe failed on pipe %r",
+                                   pipe, exc_info=True)
+            except exceptions.CancelledError:
+                if pipe:
+                    pipe.close()
+            else:
+                server._accept_pipe_future = f
+                f.add_done_callback(loop_accept_pipe)
+
+        self.call_soon(loop_accept_pipe)
+        return [server]
+
+    async def _make_subprocess_transport(self, protocol, args, shell,
+                                         stdin, stdout, stderr, bufsize,
+                                         extra=None, **kwargs):
+        waiter = self.create_future()
+        transp = _WindowsSubprocessTransport(self, protocol, args, shell,
+                                             stdin, stdout, stderr, bufsize,
+                                             waiter=waiter, extra=extra,
+                                             **kwargs)
+        try:
+            await waiter
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except BaseException:
+            transp.close()
+            await transp._wait()
+            raise
+
+        return transp
+
+
+class IocpProactor:
+    """Proactor implementation using IOCP."""
+
+    def __init__(self, concurrency=0xffffffff):
+        self._loop = None
+        self._results = []
+        self._iocp = _overlapped.CreateIoCompletionPort(
+            _overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
+        self._cache = {}
+        self._registered = weakref.WeakSet()
+        self._unregistered = []
+        self._stopped_serving = weakref.WeakSet()
+
+    def _check_closed(self):
+        if self._iocp is None:
+            raise RuntimeError('IocpProactor is closed')
+
+    def __repr__(self):
+        info = ['overlapped#=%s' % len(self._cache),
+                'result#=%s' % len(self._results)]
+        if self._iocp is None:
+            info.append('closed')
+        return '<%s %s>' % (self.__class__.__name__, " ".join(info))
+
+    def set_loop(self, loop):
+        self._loop = loop
+
+    def select(self, timeout=None):
+        if not self._results:
+            self._poll(timeout)
+        tmp = self._results
+        self._results = []
+        return tmp
+
+    def _result(self, value):
+        fut = self._loop.create_future()
+        fut.set_result(value)
+        return fut
+
+    def recv(self, conn, nbytes, flags=0):
+        self._register_with_iocp(conn)
+        ov = _overlapped.Overlapped(NULL)
+        try:
+            if isinstance(conn, socket.socket):
+                ov.WSARecv(conn.fileno(), nbytes, flags)
+            else:
+                ov.ReadFile(conn.fileno(), nbytes)
+        except BrokenPipeError:
+            return self._result(b'')
+
+        def finish_recv(trans, key, ov):
+            try:
+                return ov.getresult()
+            except OSError as exc:
+                if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
+                                    _overlapped.ERROR_OPERATION_ABORTED):
+                    raise ConnectionResetError(*exc.args)
+                else:
+                    raise
+
+        return self._register(ov, conn, finish_recv)
+
+    def recv_into(self, conn, buf, flags=0):
+        self._register_with_iocp(conn)
+        ov = _overlapped.Overlapped(NULL)
+        try:
+            if isinstance(conn, socket.socket):
+                ov.WSARecvInto(conn.fileno(), buf, flags)
+            else:
+                ov.ReadFileInto(conn.fileno(), buf)
+        except BrokenPipeError:
+            return self._result(0)
+
+        def finish_recv(trans, key, ov):
+            try:
+                return ov.getresult()
+            except OSError as exc:
+                if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
+                                    _overlapped.ERROR_OPERATION_ABORTED):
+                    raise ConnectionResetError(*exc.args)
+                else:
+                    raise
+
+        return self._register(ov, conn, finish_recv)
+
+    def recvfrom(self, conn, nbytes, flags=0):
+        self._register_with_iocp(conn)
+        ov = _overlapped.Overlapped(NULL)
+        try:
+            ov.WSARecvFrom(conn.fileno(), nbytes, flags)
+        except BrokenPipeError:
+            return self._result((b'', None))
+
+        def finish_recv(trans, key, ov):
+            try:
+                return ov.getresult()
+            except OSError as exc:
+                if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
+                                    _overlapped.ERROR_OPERATION_ABORTED):
+                    raise ConnectionResetError(*exc.args)
+                else:
+                    raise
+
+        return self._register(ov, conn, finish_recv)
+
+    def sendto(self, conn, buf, flags=0, addr=None):
+        self._register_with_iocp(conn)
+        ov = _overlapped.Overlapped(NULL)
+
+        ov.WSASendTo(conn.fileno(), buf, flags, addr)
+
+        def finish_send(trans, key, ov):
+            try:
+                return ov.getresult()
+            except OSError as exc:
+                if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
+                                    _overlapped.ERROR_OPERATION_ABORTED):
+                    raise ConnectionResetError(*exc.args)
+                else:
+                    raise
+
+        return self._register(ov, conn, finish_send)
+
+    def send(self, conn, buf, flags=0):
+        self._register_with_iocp(conn)
+        ov = _overlapped.Overlapped(NULL)
+        if isinstance(conn, socket.socket):
+            ov.WSASend(conn.fileno(), buf, flags)
+        else:
+            ov.WriteFile(conn.fileno(), buf)
+
+        def finish_send(trans, key, ov):
+            try:
+                return ov.getresult()
+            except OSError as exc:
+                if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
+                                    _overlapped.ERROR_OPERATION_ABORTED):
+                    raise ConnectionResetError(*exc.args)
+                else:
+                    raise
+
+        return self._register(ov, conn, finish_send)
+
+    def accept(self, listener):
+        self._register_with_iocp(listener)
+        conn = self._get_accept_socket(listener.family)
+        ov = _overlapped.Overlapped(NULL)
+        ov.AcceptEx(listener.fileno(), conn.fileno())
+
+        def finish_accept(trans, key, ov):
+            ov.getresult()
+            # Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
+            buf = struct.pack('@P', listener.fileno())
+            conn.setsockopt(socket.SOL_SOCKET,
+                            _overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
+            conn.settimeout(listener.gettimeout())
+            return conn, conn.getpeername()
+
+        async def accept_coro(future, conn):
+            # Coroutine closing the accept socket if the future is cancelled
+            try:
+                await future
+            except exceptions.CancelledError:
+                conn.close()
+                raise
+
+        future = self._register(ov, listener, finish_accept)
+        coro = accept_coro(future, conn)
+        tasks.ensure_future(coro, loop=self._loop)
+        return future
+
+    def connect(self, conn, address):
+        if conn.type == socket.SOCK_DGRAM:
+            # WSAConnect will complete immediately for UDP sockets so we don't
+            # need to register any IOCP operation
+            _overlapped.WSAConnect(conn.fileno(), address)
+            fut = self._loop.create_future()
+            fut.set_result(None)
+            return fut
+
+        self._register_with_iocp(conn)
+        # The socket needs to be locally bound before we call ConnectEx().
+        try:
+            _overlapped.BindLocal(conn.fileno(), conn.family)
+        except OSError as e:
+            if e.winerror != errno.WSAEINVAL:
+                raise
+            # Probably already locally bound; check using getsockname().
+            if conn.getsockname()[1] == 0:
+                raise
+        ov = _overlapped.Overlapped(NULL)
+        ov.ConnectEx(conn.fileno(), address)
+
+        def finish_connect(trans, key, ov):
+            ov.getresult()
+            # Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
+            conn.setsockopt(socket.SOL_SOCKET,
+                            _overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
+            return conn
+
+        return self._register(ov, conn, finish_connect)
+
+    def sendfile(self, sock, file, offset, count):
+        self._register_with_iocp(sock)
+        ov = _overlapped.Overlapped(NULL)
+        offset_low = offset & 0xffff_ffff
+        offset_high = (offset >> 32) & 0xffff_ffff
+        ov.TransmitFile(sock.fileno(),
+                        msvcrt.get_osfhandle(file.fileno()),
+                        offset_low, offset_high,
+                        count, 0, 0)
+
+        def finish_sendfile(trans, key, ov):
+            try:
+                return ov.getresult()
+            except OSError as exc:
+                if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
+                                    _overlapped.ERROR_OPERATION_ABORTED):
+                    raise ConnectionResetError(*exc.args)
+                else:
+                    raise
+        return self._register(ov, sock, finish_sendfile)
+
+    def accept_pipe(self, pipe):
+        self._register_with_iocp(pipe)
+        ov = _overlapped.Overlapped(NULL)
+        connected = ov.ConnectNamedPipe(pipe.fileno())
+
+        if connected:
+            # ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means
+            # that the pipe is connected. There is no need to wait for the
+            # completion of the connection.
+            return self._result(pipe)
+
+        def finish_accept_pipe(trans, key, ov):
+            ov.getresult()
+            return pipe
+
+        return self._register(ov, pipe, finish_accept_pipe)
+
+    async def connect_pipe(self, address):
+        delay = CONNECT_PIPE_INIT_DELAY
+        while True:
+            # Unfortunately there is no way to do an overlapped connect to
+            # a pipe.  Call CreateFile() in a loop until it doesn't fail with
+            # ERROR_PIPE_BUSY.
+            try:
+                handle = _overlapped.ConnectPipe(address)
+                break
+            except OSError as exc:
+                if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
+                    raise
+
+            # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
+            delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
+            await tasks.sleep(delay)
+
+        return windows_utils.PipeHandle(handle)
+
+    def wait_for_handle(self, handle, timeout=None):
+        """Wait for a handle.
+
+        Return a Future object. The result of the future is True if the wait
+        completed, or False if the wait did not complete (on timeout).
+        """
+        return self._wait_for_handle(handle, timeout, False)
+
+    def _wait_cancel(self, event, done_callback):
+        fut = self._wait_for_handle(event, None, True)
+        # add_done_callback() cannot be used because the wait may only complete
+        # in IocpProactor.close(), while the event loop is not running.
+        fut._done_callback = done_callback
+        return fut
+
+    def _wait_for_handle(self, handle, timeout, _is_cancel):
+        self._check_closed()
+
+        if timeout is None:
+            ms = _winapi.INFINITE
+        else:
+            # RegisterWaitForSingleObject() has a resolution of 1 millisecond,
+            # round away from zero to wait *at least* timeout seconds.
+            ms = math.ceil(timeout * 1e3)
+
+        # We only create ov so we can use ov.address as a key for the cache.
+        ov = _overlapped.Overlapped(NULL)
+        wait_handle = _overlapped.RegisterWaitWithQueue(
+            handle, self._iocp, ov.address, ms)
+        if _is_cancel:
+            f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)
+        else:
+            f = _WaitHandleFuture(ov, handle, wait_handle, self,
+                                  loop=self._loop)
+        if f._source_traceback:
+            del f._source_traceback[-1]
+
+        def finish_wait_for_handle(trans, key, ov):
+            # Note that this second wait means that we should only use
+            # this with handles types where a successful wait has no
+            # effect.  So events or processes are all right, but locks
+            # or semaphores are not.  Also note if the handle is
+            # signalled and then quickly reset, then we may return
+            # False even though we have not timed out.
+            return f._poll()
+
+        self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
+        return f
+
+    def _register_with_iocp(self, obj):
+        # To get notifications of finished ops on this objects sent to the
+        # completion port, were must register the handle.
+        if obj not in self._registered:
+            self._registered.add(obj)
+            _overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
+            # XXX We could also use SetFileCompletionNotificationModes()
+            # to avoid sending notifications to completion port of ops
+            # that succeed immediately.
+
+    def _register(self, ov, obj, callback):
+        self._check_closed()
+
+        # Return a future which will be set with the result of the
+        # operation when it completes.  The future's value is actually
+        # the value returned by callback().
+        f = _OverlappedFuture(ov, loop=self._loop)
+        if f._source_traceback:
+            del f._source_traceback[-1]
+        if not ov.pending:
+            # The operation has completed, so no need to postpone the
+            # work.  We cannot take this short cut if we need the
+            # NumberOfBytes, CompletionKey values returned by
+            # PostQueuedCompletionStatus().
+            try:
+                value = callback(None, None, ov)
+            except OSError as e:
+                f.set_exception(e)
+            else:
+                f.set_result(value)
+            # Even if GetOverlappedResult() was called, we have to wait for the
+            # notification of the completion in GetQueuedCompletionStatus().
+            # Register the overlapped operation to keep a reference to the
+            # OVERLAPPED object, otherwise the memory is freed and Windows may
+            # read uninitialized memory.
+
+        # Register the overlapped operation for later.  Note that
+        # we only store obj to prevent it from being garbage
+        # collected too early.
+        self._cache[ov.address] = (f, ov, obj, callback)
+        return f
+
+    def _unregister(self, ov):
+        """Unregister an overlapped object.
+
+        Call this method when its future has been cancelled. The event can
+        already be signalled (pending in the proactor event queue). It is also
+        safe if the event is never signalled (because it was cancelled).
+        """
+        self._check_closed()
+        self._unregistered.append(ov)
+
+    def _get_accept_socket(self, family):
+        s = socket.socket(family)
+        s.settimeout(0)
+        return s
+
+    def _poll(self, timeout=None):
+        if timeout is None:
+            ms = INFINITE
+        elif timeout < 0:
+            raise ValueError("negative timeout")
+        else:
+            # GetQueuedCompletionStatus() has a resolution of 1 millisecond,
+            # round away from zero to wait *at least* timeout seconds.
+            ms = math.ceil(timeout * 1e3)
+            if ms >= INFINITE:
+                raise ValueError("timeout too big")
+
+        while True:
+            status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
+            if status is None:
+                break
+            ms = 0
+
+            err, transferred, key, address = status
+            try:
+                f, ov, obj, callback = self._cache.pop(address)
+            except KeyError:
+                if self._loop.get_debug():
+                    self._loop.call_exception_handler({
+                        'message': ('GetQueuedCompletionStatus() returned an '
+                                    'unexpected event'),
+                        'status': ('err=%s transferred=%s key=%#x address=%#x'
+                                   % (err, transferred, key, address)),
+                    })
+
+                # key is either zero, or it is used to return a pipe
+                # handle which should be closed to avoid a leak.
+                if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
+                    _winapi.CloseHandle(key)
+                continue
+
+            if obj in self._stopped_serving:
+                f.cancel()
+            # Don't call the callback if _register() already read the result or
+            # if the overlapped has been cancelled
+            elif not f.done():
+                try:
+                    value = callback(transferred, key, ov)
+                except OSError as e:
+                    f.set_exception(e)
+                    self._results.append(f)
+                else:
+                    f.set_result(value)
+                    self._results.append(f)
+
+        # Remove unregistered futures
+        for ov in self._unregistered:
+            self._cache.pop(ov.address, None)
+        self._unregistered.clear()
+
+    def _stop_serving(self, obj):
+        # obj is a socket or pipe handle.  It will be closed in
+        # BaseProactorEventLoop._stop_serving() which will make any
+        # pending operations fail quickly.
+        self._stopped_serving.add(obj)
+
+    def close(self):
+        if self._iocp is None:
+            # already closed
+            return
+
+        # Cancel remaining registered operations.
+        for address, (fut, ov, obj, callback) in list(self._cache.items()):
+            if fut.cancelled():
+                # Nothing to do with cancelled futures
+                pass
+            elif isinstance(fut, _WaitCancelFuture):
+                # _WaitCancelFuture must not be cancelled
+                pass
+            else:
+                try:
+                    fut.cancel()
+                except OSError as exc:
+                    if self._loop is not None:
+                        context = {
+                            'message': 'Cancelling a future failed',
+                            'exception': exc,
+                            'future': fut,
+                        }
+                        if fut._source_traceback:
+                            context['source_traceback'] = fut._source_traceback
+                        self._loop.call_exception_handler(context)
+
+        # Wait until all cancelled overlapped complete: don't exit with running
+        # overlapped to prevent a crash. Display progress every second if the
+        # loop is still running.
+        msg_update = 1.0
+        start_time = time.monotonic()
+        next_msg = start_time + msg_update
+        while self._cache:
+            if next_msg <= time.monotonic():
+                logger.debug('%r is running after closing for %.1f seconds',
+                             self, time.monotonic() - start_time)
+                next_msg = time.monotonic() + msg_update
+
+            # handle a few events, or timeout
+            self._poll(msg_update)
+
+        self._results = []
+
+        _winapi.CloseHandle(self._iocp)
+        self._iocp = None
+
+    def __del__(self):
+        self.close()
+
+
+class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
+
+    def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+        self._proc = windows_utils.Popen(
+            args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
+            bufsize=bufsize, **kwargs)
+
+        def callback(f):
+            returncode = self._proc.poll()
+            self._process_exited(returncode)
+
+        f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
+        f.add_done_callback(callback)
+
+
+SelectorEventLoop = _WindowsSelectorEventLoop
+
+
+class WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
+    _loop_factory = SelectorEventLoop
+
+
+class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
+    _loop_factory = ProactorEventLoop
+
+
+DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy
diff --git a/linux-x64/clang/python3/lib/python3.9/asyncio/windows_utils.py b/linux-x64/clang/python3/lib/python3.9/asyncio/windows_utils.py
new file mode 100644
index 0000000..ef277fa
--- /dev/null
+++ b/linux-x64/clang/python3/lib/python3.9/asyncio/windows_utils.py
@@ -0,0 +1,173 @@
+"""Various Windows specific bits and pieces."""
+
+import sys
+
+if sys.platform != 'win32':  # pragma: no cover
+    raise ImportError('win32 only')
+
+import _winapi
+import itertools
+import msvcrt
+import os
+import subprocess
+import tempfile
+import warnings
+
+
+__all__ = 'pipe', 'Popen', 'PIPE', 'PipeHandle'
+
+
+# Constants/globals
+
+
+BUFSIZE = 8192
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+_mmap_counter = itertools.count()
+
+
+# Replacement for os.pipe() using handles instead of fds
+
+
+def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
+    """Like os.pipe() but with overlapped support and using handles not fds."""
+    address = tempfile.mktemp(
+        prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format(
+            os.getpid(), next(_mmap_counter)))
+
+    if duplex:
+        openmode = _winapi.PIPE_ACCESS_DUPLEX
+        access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
+        obsize, ibsize = bufsize, bufsize
+    else:
+        openmode = _winapi.PIPE_ACCESS_INBOUND
+        access = _winapi.GENERIC_WRITE
+        obsize, ibsize = 0, bufsize
+
+    openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
+
+    if overlapped[0]:
+        openmode |= _winapi.FILE_FLAG_OVERLAPPED
+
+    if overlapped[1]:
+        flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
+    else:
+        flags_and_attribs = 0
+
+    h1 = h2 = None
+    try:
+        h1 = _winapi.CreateNamedPipe(
+            address, openmode, _winapi.PIPE_WAIT,
+            1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
+
+        h2 = _winapi.CreateFile(
+            address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
+            flags_and_attribs, _winapi.NULL)
+
+        ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
+        ov.GetOverlappedResult(True)
+        return h1, h2
+    except:
+        if h1 is not None:
+            _winapi.CloseHandle(h1)
+        if h2 is not None:
+            _winapi.CloseHandle(h2)
+        raise
+
+
+# Wrapper for a pipe handle
+
+
+class PipeHandle:
+    """Wrapper for an overlapped pipe handle which is vaguely file-object like.
+
+    The IOCP event loop can use these instead of socket objects.
+    """
+    def __init__(self, handle):
+        self._handle = handle
+
+    def __repr__(self):
+        if self._handle is not None:
+            handle = f'handle={self._handle!r}'
+        else:
+            handle = 'closed'
+        return f'<{self.__class__.__name__} {handle}>'
+
+    @property
+    def handle(self):
+        return self._handle
+
+    def fileno(self):
+        if self._handle is None:
+            raise ValueError("I/O operation on closed pipe")
+        return self._handle
+
+    def close(self, *, CloseHandle=_winapi.CloseHandle):
+        if self._handle is not None:
+            CloseHandle(self._handle)
+            self._handle = None
+
+    def __del__(self, _warn=warnings.warn):
+        if self._handle is not None:
+            _warn(f"unclosed {self!r}", ResourceWarning, source=self)
+            self.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, t, v, tb):
+        self.close()
+
+
+# Replacement for subprocess.Popen using overlapped pipe handles
+
+
+class Popen(subprocess.Popen):
+    """Replacement for subprocess.Popen using overlapped pipe handles.
+
+    The stdin, stdout, stderr are None or instances of PipeHandle.
+    """
+    def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
+        assert not kwds.get('universal_newlines')
+        assert kwds.get('bufsize', 0) == 0
+        stdin_rfd = stdout_wfd = stderr_wfd = None
+        stdin_wh = stdout_rh = stderr_rh = None
+        if stdin == PIPE:
+            stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
+            stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
+        else:
+            stdin_rfd = stdin
+        if stdout == PIPE:
+            stdout_rh, stdout_wh = pipe(overlapped=(True, False))
+            stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
+        else:
+            stdout_wfd = stdout
+        if stderr == PIPE:
+            stderr_rh, stderr_wh = pipe(overlapped=(True, False))
+            stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
+        elif stderr == STDOUT:
+            stderr_wfd = stdout_wfd
+        else:
+            stderr_wfd = stderr
+        try:
+            super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
+                             stderr=stderr_wfd, **kwds)
+        except:
+            for h in (stdin_wh, stdout_rh, stderr_rh):
+                if h is not None:
+                    _winapi.CloseHandle(h)
+            raise
+        else:
+            if stdin_wh is not None:
+                self.stdin = PipeHandle(stdin_wh)
+            if stdout_rh is not None:
+                self.stdout = PipeHandle(stdout_rh)
+            if stderr_rh is not None:
+                self.stderr = PipeHandle(stderr_rh)
+        finally:
+            if stdin == PIPE:
+                os.close(stdin_rfd)
+            if stdout == PIPE:
+                os.close(stdout_wfd)
+            if stderr == PIPE:
+                os.close(stderr_wfd)