first comit
This commit is contained in:
7
venv/lib/python3.10/site-packages/ipykernel/__init__.py
Normal file
7
venv/lib/python3.10/site-packages/ipykernel/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from ._version import (
|
||||
__version__,
|
||||
kernel_protocol_version,
|
||||
kernel_protocol_version_info,
|
||||
version_info,
|
||||
)
|
||||
from .connect import *
|
||||
5
venv/lib/python3.10/site-packages/ipykernel/__main__.py
Normal file
5
venv/lib/python3.10/site-packages/ipykernel/__main__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""The cli entry point for ipykernel."""
|
||||
if __name__ == "__main__":
|
||||
from ipykernel import kernelapp as app
|
||||
|
||||
app.launch_new_instance()
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
157
venv/lib/python3.10/site-packages/ipykernel/_eventloop_macos.py
Normal file
157
venv/lib/python3.10/site-packages/ipykernel/_eventloop_macos.py
Normal file
@@ -0,0 +1,157 @@
|
||||
"""Eventloop hook for OS X
|
||||
|
||||
Calls NSApp / CoreFoundation APIs via ctypes.
|
||||
"""
|
||||
|
||||
# cribbed heavily from IPython.terminal.pt_inputhooks.osx
|
||||
# obj-c boilerplate from appnope, used under BSD 2-clause
|
||||
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
from threading import Event
|
||||
|
||||
objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("objc")) # type:ignore[arg-type]
|
||||
|
||||
void_p = ctypes.c_void_p
|
||||
|
||||
objc.objc_getClass.restype = void_p
|
||||
objc.sel_registerName.restype = void_p
|
||||
objc.objc_msgSend.restype = void_p
|
||||
objc.objc_msgSend.argtypes = [void_p, void_p]
|
||||
|
||||
msg = objc.objc_msgSend
|
||||
|
||||
|
||||
def _utf8(s):
|
||||
"""ensure utf8 bytes"""
|
||||
if not isinstance(s, bytes):
|
||||
s = s.encode("utf8")
|
||||
return s
|
||||
|
||||
|
||||
def n(name):
|
||||
"""create a selector name (for ObjC methods)"""
|
||||
return objc.sel_registerName(_utf8(name))
|
||||
|
||||
|
||||
def C(classname):
|
||||
"""get an ObjC Class by name"""
|
||||
return objc.objc_getClass(_utf8(classname))
|
||||
|
||||
|
||||
# end obj-c boilerplate from appnope
|
||||
|
||||
# CoreFoundation C-API calls we will use:
|
||||
CoreFoundation = ctypes.cdll.LoadLibrary(
|
||||
ctypes.util.find_library("CoreFoundation") # type:ignore[arg-type]
|
||||
)
|
||||
|
||||
CFAbsoluteTimeGetCurrent = CoreFoundation.CFAbsoluteTimeGetCurrent
|
||||
CFAbsoluteTimeGetCurrent.restype = ctypes.c_double
|
||||
|
||||
CFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent
|
||||
CFRunLoopGetCurrent.restype = void_p
|
||||
|
||||
CFRunLoopGetMain = CoreFoundation.CFRunLoopGetMain
|
||||
CFRunLoopGetMain.restype = void_p
|
||||
|
||||
CFRunLoopStop = CoreFoundation.CFRunLoopStop
|
||||
CFRunLoopStop.restype = None
|
||||
CFRunLoopStop.argtypes = [void_p]
|
||||
|
||||
CFRunLoopTimerCreate = CoreFoundation.CFRunLoopTimerCreate
|
||||
CFRunLoopTimerCreate.restype = void_p
|
||||
CFRunLoopTimerCreate.argtypes = [
|
||||
void_p, # allocator (NULL)
|
||||
ctypes.c_double, # fireDate
|
||||
ctypes.c_double, # interval
|
||||
ctypes.c_int, # flags (0)
|
||||
ctypes.c_int, # order (0)
|
||||
void_p, # callout
|
||||
void_p, # context
|
||||
]
|
||||
|
||||
CFRunLoopAddTimer = CoreFoundation.CFRunLoopAddTimer
|
||||
CFRunLoopAddTimer.restype = None
|
||||
CFRunLoopAddTimer.argtypes = [void_p, void_p, void_p]
|
||||
|
||||
kCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, "kCFRunLoopCommonModes")
|
||||
|
||||
|
||||
def _NSApp():
|
||||
"""Return the global NSApplication instance (NSApp)"""
|
||||
return msg(C("NSApplication"), n("sharedApplication"))
|
||||
|
||||
|
||||
def _wake(NSApp):
|
||||
"""Wake the Application"""
|
||||
event = msg(
|
||||
C("NSEvent"),
|
||||
n(
|
||||
"otherEventWithType:location:modifierFlags:"
|
||||
"timestamp:windowNumber:context:subtype:data1:data2:"
|
||||
),
|
||||
15, # Type
|
||||
0, # location
|
||||
0, # flags
|
||||
0, # timestamp
|
||||
0, # window
|
||||
None, # context
|
||||
0, # subtype
|
||||
0, # data1
|
||||
0, # data2
|
||||
)
|
||||
msg(NSApp, n("postEvent:atStart:"), void_p(event), True)
|
||||
|
||||
|
||||
_triggered = Event()
|
||||
|
||||
|
||||
def stop(timer=None, loop=None):
|
||||
"""Callback to fire when there's input to be read"""
|
||||
_triggered.set()
|
||||
NSApp = _NSApp()
|
||||
# if NSApp is not running, stop CFRunLoop directly,
|
||||
# otherwise stop and wake NSApp
|
||||
if msg(NSApp, n("isRunning")):
|
||||
msg(NSApp, n("stop:"), NSApp)
|
||||
_wake(NSApp)
|
||||
else:
|
||||
CFRunLoopStop(CFRunLoopGetCurrent())
|
||||
|
||||
|
||||
_c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p)
|
||||
_c_stop_callback = _c_callback_func_type(stop)
|
||||
|
||||
|
||||
def _stop_after(delay):
|
||||
"""Register callback to stop eventloop after a delay"""
|
||||
timer = CFRunLoopTimerCreate(
|
||||
None, # allocator
|
||||
CFAbsoluteTimeGetCurrent() + delay, # fireDate
|
||||
0, # interval
|
||||
0, # flags
|
||||
0, # order
|
||||
_c_stop_callback,
|
||||
None,
|
||||
)
|
||||
CFRunLoopAddTimer(
|
||||
CFRunLoopGetMain(),
|
||||
timer,
|
||||
kCFRunLoopCommonModes,
|
||||
)
|
||||
|
||||
|
||||
def mainloop(duration=1):
|
||||
"""run the Cocoa eventloop for the specified duration (seconds)"""
|
||||
|
||||
_triggered.clear()
|
||||
NSApp = _NSApp()
|
||||
_stop_after(duration)
|
||||
msg(NSApp, n("run"))
|
||||
if not _triggered.is_set():
|
||||
# app closed without firing callback,
|
||||
# probably due to last window being closed.
|
||||
# Run the loop manually in this case,
|
||||
# since there may be events still to process (ipython/ipython#9734)
|
||||
CoreFoundation.CFRunLoopRun()
|
||||
20
venv/lib/python3.10/site-packages/ipykernel/_version.py
Normal file
20
venv/lib/python3.10/site-packages/ipykernel/_version.py
Normal file
@@ -0,0 +1,20 @@
|
||||
"""
|
||||
store the current version info of the server.
|
||||
"""
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
# Version string must appear intact for hatch versioning
|
||||
__version__ = "6.29.2"
|
||||
|
||||
# Build up version_info tuple for backwards compatibility
|
||||
pattern = r"(?P<major>\d+).(?P<minor>\d+).(?P<patch>\d+)(?P<rest>.*)"
|
||||
match = re.match(pattern, __version__)
|
||||
assert match is not None
|
||||
parts: List[object] = [int(match[part]) for part in ["major", "minor", "patch"]]
|
||||
if match["rest"]:
|
||||
parts.append(match["rest"])
|
||||
version_info = tuple(parts)
|
||||
|
||||
kernel_protocol_version_info = (5, 3)
|
||||
kernel_protocol_version = "{}.{}".format(*kernel_protocol_version_info)
|
||||
@@ -0,0 +1,4 @@
|
||||
__all__ = ["Comm", "CommManager"]
|
||||
|
||||
from .comm import Comm
|
||||
from .manager import CommManager
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
100
venv/lib/python3.10/site-packages/ipykernel/comm/comm.py
Normal file
100
venv/lib/python3.10/site-packages/ipykernel/comm/comm.py
Normal file
@@ -0,0 +1,100 @@
|
||||
"""Base class for a Comm"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import uuid
|
||||
from typing import Optional
|
||||
from warnings import warn
|
||||
|
||||
import comm.base_comm
|
||||
import traitlets.config
|
||||
from traitlets import Bool, Bytes, Instance, Unicode, default
|
||||
|
||||
from ipykernel.jsonutil import json_clean
|
||||
from ipykernel.kernelbase import Kernel
|
||||
|
||||
|
||||
# this is the class that will be created if we do comm.create_comm
|
||||
class BaseComm(comm.base_comm.BaseComm): # type:ignore[misc]
|
||||
"""The base class for comms."""
|
||||
|
||||
kernel: Optional["Kernel"] = None
|
||||
|
||||
def publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):
|
||||
"""Helper for sending a comm message on IOPub"""
|
||||
if not Kernel.initialized():
|
||||
return
|
||||
|
||||
data = {} if data is None else data
|
||||
metadata = {} if metadata is None else metadata
|
||||
content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))
|
||||
|
||||
if self.kernel is None:
|
||||
self.kernel = Kernel.instance()
|
||||
|
||||
assert self.kernel.session is not None
|
||||
self.kernel.session.send(
|
||||
self.kernel.iopub_socket,
|
||||
msg_type,
|
||||
content,
|
||||
metadata=json_clean(metadata),
|
||||
parent=self.kernel.get_parent(),
|
||||
ident=self.topic,
|
||||
buffers=buffers,
|
||||
)
|
||||
|
||||
|
||||
# but for backwards compatibility, we need to inherit from LoggingConfigurable
|
||||
class Comm(BaseComm, traitlets.config.LoggingConfigurable):
|
||||
"""Class for communicating between a Frontend and a Kernel"""
|
||||
|
||||
kernel = Instance("ipykernel.kernelbase.Kernel", allow_none=True) # type:ignore[assignment]
|
||||
comm_id = Unicode()
|
||||
primary = Bool(True, help="Am I the primary or secondary Comm?")
|
||||
|
||||
target_name = Unicode("comm")
|
||||
target_module = Unicode(
|
||||
None,
|
||||
allow_none=True,
|
||||
help="""requirejs module from
|
||||
which to load comm target.""",
|
||||
)
|
||||
|
||||
topic = Bytes()
|
||||
|
||||
@default("kernel")
|
||||
def _default_kernel(self):
|
||||
if Kernel.initialized():
|
||||
return Kernel.instance()
|
||||
return None
|
||||
|
||||
@default("comm_id")
|
||||
def _default_comm_id(self):
|
||||
return uuid.uuid4().hex
|
||||
|
||||
def __init__(
|
||||
self, target_name="", data=None, metadata=None, buffers=None, show_warning=True, **kwargs
|
||||
):
|
||||
"""Initialize a comm."""
|
||||
if show_warning:
|
||||
warn(
|
||||
"The `ipykernel.comm.Comm` class has been deprecated. Please use the `comm` module instead."
|
||||
"For creating comms, use the function `from comm import create_comm`.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# Handle differing arguments between base classes.
|
||||
had_kernel = "kernel" in kwargs
|
||||
kernel = kwargs.pop("kernel", None)
|
||||
if target_name:
|
||||
kwargs["target_name"] = target_name
|
||||
BaseComm.__init__(self, data=data, metadata=metadata, buffers=buffers, **kwargs) # type:ignore[call-arg]
|
||||
# only re-add kernel if explicitly provided
|
||||
if had_kernel:
|
||||
kwargs["kernel"] = kernel
|
||||
traitlets.config.LoggingConfigurable.__init__(self, **kwargs)
|
||||
|
||||
|
||||
__all__ = ["Comm"]
|
||||
62
venv/lib/python3.10/site-packages/ipykernel/comm/manager.py
Normal file
62
venv/lib/python3.10/site-packages/ipykernel/comm/manager.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""Base class to manage comms"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import logging
|
||||
|
||||
import comm.base_comm
|
||||
import traitlets
|
||||
import traitlets.config
|
||||
|
||||
from .comm import Comm
|
||||
|
||||
logger = logging.getLogger("ipykernel.comm")
|
||||
|
||||
|
||||
class CommManager(comm.base_comm.CommManager, traitlets.config.LoggingConfigurable): # type:ignore[misc]
|
||||
"""A comm manager."""
|
||||
|
||||
kernel = traitlets.Instance("ipykernel.kernelbase.Kernel")
|
||||
comms = traitlets.Dict()
|
||||
targets = traitlets.Dict()
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Initialize the manager."""
|
||||
# CommManager doesn't take arguments, so we explicitly forward arguments
|
||||
comm.base_comm.CommManager.__init__(self)
|
||||
traitlets.config.LoggingConfigurable.__init__(self, **kwargs)
|
||||
|
||||
def comm_open(self, stream, ident, msg):
|
||||
"""Handler for comm_open messages"""
|
||||
# This is for backward compatibility, the comm_open creates a a new ipykernel.comm.Comm
|
||||
# but we should let the base class create the comm with comm.create_comm in a major release
|
||||
content = msg["content"]
|
||||
comm_id = content["comm_id"]
|
||||
target_name = content["target_name"]
|
||||
f = self.targets.get(target_name, None)
|
||||
comm = Comm(
|
||||
comm_id=comm_id,
|
||||
primary=False,
|
||||
target_name=target_name,
|
||||
show_warning=False,
|
||||
)
|
||||
self.register_comm(comm)
|
||||
if f is None:
|
||||
logger.error("No such comm target registered: %s", target_name)
|
||||
else:
|
||||
try:
|
||||
f(comm, msg)
|
||||
return
|
||||
except Exception:
|
||||
logger.error("Exception opening comm with target: %s", target_name, exc_info=True) # noqa: G201
|
||||
|
||||
# Failure.
|
||||
try:
|
||||
comm.close()
|
||||
except Exception:
|
||||
logger.error( # noqa: G201
|
||||
"""Could not close comm during `comm_open` failure
|
||||
clean-up. The comm may not have been opened yet.""",
|
||||
exc_info=True,
|
||||
)
|
||||
105
venv/lib/python3.10/site-packages/ipykernel/compiler.py
Normal file
105
venv/lib/python3.10/site-packages/ipykernel/compiler.py
Normal file
@@ -0,0 +1,105 @@
|
||||
"""Compiler helpers for the debugger."""
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from IPython.core.compilerop import CachingCompiler
|
||||
|
||||
|
||||
def murmur2_x86(data, seed):
|
||||
"""Get the murmur2 hash."""
|
||||
m = 0x5BD1E995
|
||||
data = [chr(d) for d in str.encode(data, "utf8")]
|
||||
length = len(data)
|
||||
h = seed ^ length
|
||||
rounded_end = length & 0xFFFFFFFC
|
||||
for i in range(0, rounded_end, 4):
|
||||
k = (
|
||||
(ord(data[i]) & 0xFF)
|
||||
| ((ord(data[i + 1]) & 0xFF) << 8)
|
||||
| ((ord(data[i + 2]) & 0xFF) << 16)
|
||||
| (ord(data[i + 3]) << 24)
|
||||
)
|
||||
k = (k * m) & 0xFFFFFFFF
|
||||
k ^= k >> 24
|
||||
k = (k * m) & 0xFFFFFFFF
|
||||
|
||||
h = (h * m) & 0xFFFFFFFF
|
||||
h ^= k
|
||||
|
||||
val = length & 0x03
|
||||
k = 0
|
||||
if val == 3:
|
||||
k = (ord(data[rounded_end + 2]) & 0xFF) << 16
|
||||
if val in [2, 3]:
|
||||
k |= (ord(data[rounded_end + 1]) & 0xFF) << 8
|
||||
if val in [1, 2, 3]:
|
||||
k |= ord(data[rounded_end]) & 0xFF
|
||||
h ^= k
|
||||
h = (h * m) & 0xFFFFFFFF
|
||||
|
||||
h ^= h >> 13
|
||||
h = (h * m) & 0xFFFFFFFF
|
||||
h ^= h >> 15
|
||||
|
||||
return h
|
||||
|
||||
|
||||
convert_to_long_pathname = lambda filename: filename
|
||||
|
||||
if sys.platform == "win32":
|
||||
try:
|
||||
import ctypes
|
||||
from ctypes.wintypes import DWORD, LPCWSTR, LPWSTR, MAX_PATH
|
||||
|
||||
_GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
|
||||
_GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
|
||||
_GetLongPathName.restype = DWORD
|
||||
|
||||
def _convert_to_long_pathname(filename):
|
||||
buf = ctypes.create_unicode_buffer(MAX_PATH)
|
||||
rv = _GetLongPathName(filename, buf, MAX_PATH)
|
||||
if rv != 0 and rv <= MAX_PATH:
|
||||
filename = buf.value
|
||||
return filename
|
||||
|
||||
# test that it works so if there are any issues we fail just once here
|
||||
_convert_to_long_pathname(__file__)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
convert_to_long_pathname = _convert_to_long_pathname
|
||||
|
||||
|
||||
def get_tmp_directory():
|
||||
"""Get a temp directory."""
|
||||
tmp_dir = convert_to_long_pathname(tempfile.gettempdir())
|
||||
pid = os.getpid()
|
||||
return tmp_dir + os.sep + "ipykernel_" + str(pid)
|
||||
|
||||
|
||||
def get_tmp_hash_seed():
|
||||
"""Get a temp hash seed."""
|
||||
return 0xC70F6907
|
||||
|
||||
|
||||
def get_file_name(code):
|
||||
"""Get a file name."""
|
||||
cell_name = os.environ.get("IPYKERNEL_CELL_NAME")
|
||||
if cell_name is None:
|
||||
name = murmur2_x86(code, get_tmp_hash_seed())
|
||||
cell_name = get_tmp_directory() + os.sep + str(name) + ".py"
|
||||
return cell_name
|
||||
|
||||
|
||||
class XCachingCompiler(CachingCompiler):
|
||||
"""A custom caching compiler."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initialize the compiler."""
|
||||
super().__init__(*args, **kwargs)
|
||||
self.log = None
|
||||
|
||||
def get_code_name(self, raw_code, code, number):
|
||||
"""Get the code name."""
|
||||
return get_file_name(raw_code)
|
||||
140
venv/lib/python3.10/site-packages/ipykernel/connect.py
Normal file
140
venv/lib/python3.10/site-packages/ipykernel/connect.py
Normal file
@@ -0,0 +1,140 @@
|
||||
"""Connection file-related utilities for the kernel
|
||||
"""
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from subprocess import PIPE, Popen
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import jupyter_client
|
||||
from jupyter_client import write_connection_file
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ipykernel.kernelapp import IPKernelApp
|
||||
|
||||
|
||||
def get_connection_file(app: IPKernelApp | None = None) -> str:
|
||||
"""Return the path to the connection file of an app
|
||||
|
||||
Parameters
|
||||
----------
|
||||
app : IPKernelApp instance [optional]
|
||||
If unspecified, the currently running app will be used
|
||||
"""
|
||||
from traitlets.utils import filefind
|
||||
|
||||
if app is None:
|
||||
from ipykernel.kernelapp import IPKernelApp
|
||||
|
||||
if not IPKernelApp.initialized():
|
||||
msg = "app not specified, and not in a running Kernel"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
app = IPKernelApp.instance()
|
||||
return filefind(app.connection_file, [".", app.connection_dir])
|
||||
|
||||
|
||||
def _find_connection_file(connection_file):
|
||||
"""Return the absolute path for a connection file
|
||||
|
||||
- If nothing specified, return current Kernel's connection file
|
||||
- Otherwise, call jupyter_client.find_connection_file
|
||||
"""
|
||||
if connection_file is None:
|
||||
# get connection file from current kernel
|
||||
return get_connection_file()
|
||||
return jupyter_client.find_connection_file(connection_file)
|
||||
|
||||
|
||||
def get_connection_info(
|
||||
connection_file: str | None = None, unpack: bool = False
|
||||
) -> str | dict[str, Any]:
|
||||
"""Return the connection information for the current Kernel.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
connection_file : str [optional]
|
||||
The connection file to be used. Can be given by absolute path, or
|
||||
IPython will search in the security directory.
|
||||
If run from IPython,
|
||||
|
||||
If unspecified, the connection file for the currently running
|
||||
IPython Kernel will be used, which is only allowed from inside a kernel.
|
||||
|
||||
unpack : bool [default: False]
|
||||
if True, return the unpacked dict, otherwise just the string contents
|
||||
of the file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
The connection dictionary of the current kernel, as string or dict,
|
||||
depending on `unpack`.
|
||||
"""
|
||||
cf = _find_connection_file(connection_file)
|
||||
|
||||
with open(cf) as f:
|
||||
info_str = f.read()
|
||||
|
||||
if unpack:
|
||||
info = json.loads(info_str)
|
||||
# ensure key is bytes:
|
||||
info["key"] = info.get("key", "").encode()
|
||||
return info # type:ignore[no-any-return]
|
||||
|
||||
return info_str
|
||||
|
||||
|
||||
def connect_qtconsole(
|
||||
connection_file: str | None = None, argv: list[str] | None = None
|
||||
) -> Popen[Any]:
|
||||
"""Connect a qtconsole to the current kernel.
|
||||
|
||||
This is useful for connecting a second qtconsole to a kernel, or to a
|
||||
local notebook.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
connection_file : str [optional]
|
||||
The connection file to be used. Can be given by absolute path, or
|
||||
IPython will search in the security directory.
|
||||
If run from IPython,
|
||||
|
||||
If unspecified, the connection file for the currently running
|
||||
IPython Kernel will be used, which is only allowed from inside a kernel.
|
||||
|
||||
argv : list [optional]
|
||||
Any extra args to be passed to the console.
|
||||
|
||||
Returns
|
||||
-------
|
||||
:class:`subprocess.Popen` instance running the qtconsole frontend
|
||||
"""
|
||||
argv = [] if argv is None else argv
|
||||
|
||||
cf = _find_connection_file(connection_file)
|
||||
|
||||
cmd = ";".join(["from qtconsole import qtconsoleapp", "qtconsoleapp.main()"])
|
||||
|
||||
kwargs: dict[str, Any] = {}
|
||||
# Launch the Qt console in a separate session & process group, so
|
||||
# interrupting the kernel doesn't kill it.
|
||||
kwargs["start_new_session"] = True
|
||||
|
||||
return Popen(
|
||||
[sys.executable, "-c", cmd, "--existing", cf, *argv],
|
||||
stdout=PIPE,
|
||||
stderr=PIPE,
|
||||
close_fds=(sys.platform != "win32"),
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"write_connection_file",
|
||||
"get_connection_file",
|
||||
"get_connection_info",
|
||||
"connect_qtconsole",
|
||||
]
|
||||
32
venv/lib/python3.10/site-packages/ipykernel/control.py
Normal file
32
venv/lib/python3.10/site-packages/ipykernel/control.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""A thread for a control channel."""
|
||||
from threading import Thread
|
||||
|
||||
from tornado.ioloop import IOLoop
|
||||
|
||||
CONTROL_THREAD_NAME = "Control"
|
||||
|
||||
|
||||
class ControlThread(Thread):
|
||||
"""A thread for a control channel."""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Initialize the thread."""
|
||||
Thread.__init__(self, name=CONTROL_THREAD_NAME, **kwargs)
|
||||
self.io_loop = IOLoop(make_current=False)
|
||||
self.pydev_do_not_trace = True
|
||||
self.is_pydev_daemon_thread = True
|
||||
|
||||
def run(self):
|
||||
"""Run the thread."""
|
||||
self.name = CONTROL_THREAD_NAME
|
||||
try:
|
||||
self.io_loop.start()
|
||||
finally:
|
||||
self.io_loop.close()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the thread.
|
||||
|
||||
This method is threadsafe.
|
||||
"""
|
||||
self.io_loop.add_callback(self.io_loop.stop)
|
||||
84
venv/lib/python3.10/site-packages/ipykernel/datapub.py
Normal file
84
venv/lib/python3.10/site-packages/ipykernel/datapub.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""Publishing native (typically pickled) objects.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from traitlets import Any, CBytes, Dict, Instance
|
||||
from traitlets.config import Configurable
|
||||
|
||||
from ipykernel.jsonutil import json_clean
|
||||
|
||||
try:
|
||||
# available since ipyparallel 5.0.0
|
||||
from ipyparallel.serialize import serialize_object
|
||||
except ImportError:
|
||||
# Deprecated since ipykernel 4.3.0
|
||||
from ipykernel.serialize import serialize_object
|
||||
|
||||
from jupyter_client.session import Session, extract_header
|
||||
|
||||
|
||||
class ZMQDataPublisher(Configurable):
|
||||
"""A zmq data publisher."""
|
||||
|
||||
topic = topic = CBytes(b"datapub")
|
||||
session = Instance(Session, allow_none=True)
|
||||
pub_socket = Any(allow_none=True)
|
||||
parent_header = Dict({})
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent for outbound messages."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def publish_data(self, data):
|
||||
"""publish a data_message on the IOPub channel
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : dict
|
||||
The data to be published. Think of it as a namespace.
|
||||
"""
|
||||
session = self.session
|
||||
assert session is not None
|
||||
buffers = serialize_object(
|
||||
data,
|
||||
buffer_threshold=session.buffer_threshold,
|
||||
item_threshold=session.item_threshold,
|
||||
)
|
||||
content = json_clean(dict(keys=list(data.keys())))
|
||||
session.send(
|
||||
self.pub_socket,
|
||||
"data_message",
|
||||
content=content,
|
||||
parent=self.parent_header,
|
||||
buffers=buffers,
|
||||
ident=self.topic,
|
||||
)
|
||||
|
||||
|
||||
def publish_data(data):
|
||||
"""publish a data_message on the IOPub channel
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : dict
|
||||
The data to be published. Think of it as a namespace.
|
||||
"""
|
||||
warnings.warn(
|
||||
"ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
from ipykernel.zmqshell import ZMQInteractiveShell
|
||||
|
||||
ZMQInteractiveShell.instance().data_pub.publish_data(data)
|
||||
746
venv/lib/python3.10/site-packages/ipykernel/debugger.py
Normal file
746
venv/lib/python3.10/site-packages/ipykernel/debugger.py
Normal file
@@ -0,0 +1,746 @@
|
||||
"""Debugger implementation for the IPython kernel."""
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import typing as t
|
||||
from pathlib import Path
|
||||
|
||||
import zmq
|
||||
from IPython.core.getipython import get_ipython
|
||||
from IPython.core.inputtransformer2 import leading_empty_lines
|
||||
from tornado.locks import Event
|
||||
from tornado.queues import Queue
|
||||
from zmq.utils import jsonapi
|
||||
|
||||
try:
|
||||
from jupyter_client.jsonutil import json_default
|
||||
except ImportError:
|
||||
from jupyter_client.jsonutil import date_default as json_default
|
||||
|
||||
from .compiler import get_file_name, get_tmp_directory, get_tmp_hash_seed
|
||||
|
||||
try:
|
||||
# This import is required to have the next ones working...
|
||||
from debugpy.server import api
|
||||
|
||||
from _pydevd_bundle import pydevd_frame_utils # isort: skip
|
||||
from _pydevd_bundle.pydevd_suspended_frames import ( # isort: skip
|
||||
SuspendedFramesManager,
|
||||
_FramesTracker,
|
||||
)
|
||||
|
||||
_is_debugpy_available = True
|
||||
except ImportError:
|
||||
_is_debugpy_available = False
|
||||
except Exception as e:
|
||||
# We cannot import the module where the DebuggerInitializationError
|
||||
# is defined
|
||||
if e.__class__.__name__ == "DebuggerInitializationError":
|
||||
_is_debugpy_available = False
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
# Required for backwards compatibility
|
||||
ROUTING_ID = getattr(zmq, "ROUTING_ID", None) or zmq.IDENTITY
|
||||
|
||||
|
||||
class _FakeCode:
|
||||
"""Fake code class."""
|
||||
|
||||
def __init__(self, co_filename, co_name):
|
||||
"""Init."""
|
||||
self.co_filename = co_filename
|
||||
self.co_name = co_name
|
||||
|
||||
|
||||
class _FakeFrame:
|
||||
"""Fake frame class."""
|
||||
|
||||
def __init__(self, f_code, f_globals, f_locals):
|
||||
"""Init."""
|
||||
self.f_code = f_code
|
||||
self.f_globals = f_globals
|
||||
self.f_locals = f_locals
|
||||
self.f_back = None
|
||||
|
||||
|
||||
class _DummyPyDB:
|
||||
"""Fake PyDb class."""
|
||||
|
||||
def __init__(self):
|
||||
"""Init."""
|
||||
from _pydevd_bundle.pydevd_api import PyDevdAPI
|
||||
|
||||
self.variable_presentation = PyDevdAPI.VariablePresentation()
|
||||
|
||||
|
||||
class VariableExplorer:
|
||||
"""A variable explorer."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the explorer."""
|
||||
self.suspended_frame_manager = SuspendedFramesManager()
|
||||
self.py_db = _DummyPyDB()
|
||||
self.tracker = _FramesTracker(self.suspended_frame_manager, self.py_db)
|
||||
self.frame = None
|
||||
|
||||
def track(self):
|
||||
"""Start tracking."""
|
||||
var = get_ipython().user_ns
|
||||
self.frame = _FakeFrame(_FakeCode("<module>", get_file_name("sys._getframe()")), var, var)
|
||||
self.tracker.track("thread1", pydevd_frame_utils.create_frames_list_from_frame(self.frame))
|
||||
|
||||
def untrack_all(self):
|
||||
"""Stop tracking."""
|
||||
self.tracker.untrack_all()
|
||||
|
||||
def get_children_variables(self, variable_ref=None):
|
||||
"""Get the child variables for a variable reference."""
|
||||
var_ref = variable_ref
|
||||
if not var_ref:
|
||||
var_ref = id(self.frame)
|
||||
variables = self.suspended_frame_manager.get_variable(var_ref)
|
||||
return [x.get_var_data() for x in variables.get_children_variables()]
|
||||
|
||||
|
||||
class DebugpyMessageQueue:
|
||||
"""A debugpy message queue."""
|
||||
|
||||
HEADER = "Content-Length: "
|
||||
HEADER_LENGTH = 16
|
||||
SEPARATOR = "\r\n\r\n"
|
||||
SEPARATOR_LENGTH = 4
|
||||
|
||||
def __init__(self, event_callback, log):
|
||||
"""Init the queue."""
|
||||
self.tcp_buffer = ""
|
||||
self._reset_tcp_pos()
|
||||
self.event_callback = event_callback
|
||||
self.message_queue: Queue[t.Any] = Queue()
|
||||
self.log = log
|
||||
|
||||
def _reset_tcp_pos(self):
|
||||
self.header_pos = -1
|
||||
self.separator_pos = -1
|
||||
self.message_size = 0
|
||||
self.message_pos = -1
|
||||
|
||||
def _put_message(self, raw_msg):
|
||||
self.log.debug("QUEUE - _put_message:")
|
||||
msg = t.cast(t.Dict[str, t.Any], jsonapi.loads(raw_msg))
|
||||
if msg["type"] == "event":
|
||||
self.log.debug("QUEUE - received event:")
|
||||
self.log.debug(msg)
|
||||
self.event_callback(msg)
|
||||
else:
|
||||
self.log.debug("QUEUE - put message:")
|
||||
self.log.debug(msg)
|
||||
self.message_queue.put_nowait(msg)
|
||||
|
||||
def put_tcp_frame(self, frame):
|
||||
"""Put a tcp frame in the queue."""
|
||||
self.tcp_buffer += frame
|
||||
|
||||
self.log.debug("QUEUE - received frame")
|
||||
while True:
|
||||
# Finds header
|
||||
if self.header_pos == -1:
|
||||
self.header_pos = self.tcp_buffer.find(DebugpyMessageQueue.HEADER)
|
||||
if self.header_pos == -1:
|
||||
return
|
||||
|
||||
self.log.debug("QUEUE - found header at pos %i", self.header_pos)
|
||||
|
||||
# Finds separator
|
||||
if self.separator_pos == -1:
|
||||
hint = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH
|
||||
self.separator_pos = self.tcp_buffer.find(DebugpyMessageQueue.SEPARATOR, hint)
|
||||
if self.separator_pos == -1:
|
||||
return
|
||||
|
||||
self.log.debug("QUEUE - found separator at pos %i", self.separator_pos)
|
||||
|
||||
if self.message_pos == -1:
|
||||
size_pos = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH
|
||||
self.message_pos = self.separator_pos + DebugpyMessageQueue.SEPARATOR_LENGTH
|
||||
self.message_size = int(self.tcp_buffer[size_pos : self.separator_pos])
|
||||
|
||||
self.log.debug("QUEUE - found message at pos %i", self.message_pos)
|
||||
self.log.debug("QUEUE - message size is %i", self.message_size)
|
||||
|
||||
if len(self.tcp_buffer) - self.message_pos < self.message_size:
|
||||
return
|
||||
|
||||
self._put_message(
|
||||
self.tcp_buffer[self.message_pos : self.message_pos + self.message_size]
|
||||
)
|
||||
if len(self.tcp_buffer) - self.message_pos == self.message_size:
|
||||
self.log.debug("QUEUE - resetting tcp_buffer")
|
||||
self.tcp_buffer = ""
|
||||
self._reset_tcp_pos()
|
||||
return
|
||||
|
||||
self.tcp_buffer = self.tcp_buffer[self.message_pos + self.message_size :]
|
||||
self.log.debug("QUEUE - slicing tcp_buffer: %s", self.tcp_buffer)
|
||||
self._reset_tcp_pos()
|
||||
|
||||
async def get_message(self):
|
||||
"""Get a message from the queue."""
|
||||
return await self.message_queue.get()
|
||||
|
||||
|
||||
class DebugpyClient:
|
||||
"""A client for debugpy."""
|
||||
|
||||
def __init__(self, log, debugpy_stream, event_callback):
|
||||
"""Initialize the client."""
|
||||
self.log = log
|
||||
self.debugpy_stream = debugpy_stream
|
||||
self.event_callback = event_callback
|
||||
self.message_queue = DebugpyMessageQueue(self._forward_event, self.log)
|
||||
self.debugpy_host = "127.0.0.1"
|
||||
self.debugpy_port = -1
|
||||
self.routing_id = None
|
||||
self.wait_for_attach = True
|
||||
self.init_event = Event()
|
||||
self.init_event_seq = -1
|
||||
|
||||
def _get_endpoint(self):
|
||||
host, port = self.get_host_port()
|
||||
return "tcp://" + host + ":" + str(port)
|
||||
|
||||
def _forward_event(self, msg):
|
||||
if msg["event"] == "initialized":
|
||||
self.init_event.set()
|
||||
self.init_event_seq = msg["seq"]
|
||||
self.event_callback(msg)
|
||||
|
||||
def _send_request(self, msg):
|
||||
if self.routing_id is None:
|
||||
self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID)
|
||||
content = jsonapi.dumps(
|
||||
msg,
|
||||
default=json_default,
|
||||
ensure_ascii=False,
|
||||
allow_nan=False,
|
||||
)
|
||||
content_length = str(len(content))
|
||||
buf = (DebugpyMessageQueue.HEADER + content_length + DebugpyMessageQueue.SEPARATOR).encode(
|
||||
"ascii"
|
||||
)
|
||||
buf += content
|
||||
self.log.debug("DEBUGPYCLIENT:")
|
||||
self.log.debug(self.routing_id)
|
||||
self.log.debug(buf)
|
||||
self.debugpy_stream.send_multipart((self.routing_id, buf))
|
||||
|
||||
async def _wait_for_response(self):
|
||||
# Since events are never pushed to the message_queue
|
||||
# we can safely assume the next message in queue
|
||||
# will be an answer to the previous request
|
||||
return await self.message_queue.get_message()
|
||||
|
||||
async def _handle_init_sequence(self):
|
||||
# 1] Waits for initialized event
|
||||
await self.init_event.wait()
|
||||
|
||||
# 2] Sends configurationDone request
|
||||
configurationDone = {
|
||||
"type": "request",
|
||||
"seq": int(self.init_event_seq) + 1,
|
||||
"command": "configurationDone",
|
||||
}
|
||||
self._send_request(configurationDone)
|
||||
|
||||
# 3] Waits for configurationDone response
|
||||
await self._wait_for_response()
|
||||
|
||||
# 4] Waits for attachResponse and returns it
|
||||
return await self._wait_for_response()
|
||||
|
||||
def get_host_port(self):
|
||||
"""Get the host debugpy port."""
|
||||
if self.debugpy_port == -1:
|
||||
socket = self.debugpy_stream.socket
|
||||
socket.bind_to_random_port("tcp://" + self.debugpy_host)
|
||||
self.endpoint = socket.getsockopt(zmq.LAST_ENDPOINT).decode("utf-8")
|
||||
socket.unbind(self.endpoint)
|
||||
index = self.endpoint.rfind(":")
|
||||
self.debugpy_port = self.endpoint[index + 1 :]
|
||||
return self.debugpy_host, self.debugpy_port
|
||||
|
||||
def connect_tcp_socket(self):
|
||||
"""Connect to the tcp socket."""
|
||||
self.debugpy_stream.socket.connect(self._get_endpoint())
|
||||
self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID)
|
||||
|
||||
def disconnect_tcp_socket(self):
|
||||
"""Disconnect from the tcp socket."""
|
||||
self.debugpy_stream.socket.disconnect(self._get_endpoint())
|
||||
self.routing_id = None
|
||||
self.init_event = Event()
|
||||
self.init_event_seq = -1
|
||||
self.wait_for_attach = True
|
||||
|
||||
def receive_dap_frame(self, frame):
|
||||
"""Receive a dap frame."""
|
||||
self.message_queue.put_tcp_frame(frame)
|
||||
|
||||
async def send_dap_request(self, msg):
|
||||
"""Send a dap request."""
|
||||
self._send_request(msg)
|
||||
if self.wait_for_attach and msg["command"] == "attach":
|
||||
rep = await self._handle_init_sequence()
|
||||
self.wait_for_attach = False
|
||||
return rep
|
||||
|
||||
rep = await self._wait_for_response()
|
||||
self.log.debug("DEBUGPYCLIENT - returning:")
|
||||
self.log.debug(rep)
|
||||
return rep
|
||||
|
||||
|
||||
class Debugger:
|
||||
"""The debugger class."""
|
||||
|
||||
# Requests that requires that the debugger has started
|
||||
started_debug_msg_types = [
|
||||
"dumpCell",
|
||||
"setBreakpoints",
|
||||
"source",
|
||||
"stackTrace",
|
||||
"variables",
|
||||
"attach",
|
||||
"configurationDone",
|
||||
]
|
||||
|
||||
# Requests that can be handled even if the debugger is not running
|
||||
static_debug_msg_types = [
|
||||
"debugInfo",
|
||||
"inspectVariables",
|
||||
"richInspectVariables",
|
||||
"modules",
|
||||
"copyToGlobals",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self, log, debugpy_stream, event_callback, shell_socket, session, just_my_code=True
|
||||
):
|
||||
"""Initialize the debugger."""
|
||||
self.log = log
|
||||
self.debugpy_client = DebugpyClient(log, debugpy_stream, self._handle_event)
|
||||
self.shell_socket = shell_socket
|
||||
self.session = session
|
||||
self.is_started = False
|
||||
self.event_callback = event_callback
|
||||
self.just_my_code = just_my_code
|
||||
self.stopped_queue: Queue[t.Any] = Queue()
|
||||
|
||||
self.started_debug_handlers = {}
|
||||
for msg_type in Debugger.started_debug_msg_types:
|
||||
self.started_debug_handlers[msg_type] = getattr(self, msg_type)
|
||||
|
||||
self.static_debug_handlers = {}
|
||||
for msg_type in Debugger.static_debug_msg_types:
|
||||
self.static_debug_handlers[msg_type] = getattr(self, msg_type)
|
||||
|
||||
self.breakpoint_list = {}
|
||||
self.stopped_threads = set()
|
||||
|
||||
self.debugpy_initialized = False
|
||||
self._removed_cleanup = {}
|
||||
|
||||
self.debugpy_host = "127.0.0.1"
|
||||
self.debugpy_port = 0
|
||||
self.endpoint = None
|
||||
|
||||
self.variable_explorer = VariableExplorer()
|
||||
|
||||
def _handle_event(self, msg):
|
||||
if msg["event"] == "stopped":
|
||||
if msg["body"]["allThreadsStopped"]:
|
||||
self.stopped_queue.put_nowait(msg)
|
||||
# Do not forward the event now, will be done in the handle_stopped_event
|
||||
return
|
||||
self.stopped_threads.add(msg["body"]["threadId"])
|
||||
self.event_callback(msg)
|
||||
elif msg["event"] == "continued":
|
||||
if msg["body"]["allThreadsContinued"]:
|
||||
self.stopped_threads = set()
|
||||
else:
|
||||
self.stopped_threads.remove(msg["body"]["threadId"])
|
||||
self.event_callback(msg)
|
||||
else:
|
||||
self.event_callback(msg)
|
||||
|
||||
async def _forward_message(self, msg):
|
||||
return await self.debugpy_client.send_dap_request(msg)
|
||||
|
||||
def _build_variables_response(self, request, variables):
|
||||
var_list = [var for var in variables if self.accept_variable(var["name"])]
|
||||
return {
|
||||
"seq": request["seq"],
|
||||
"type": "response",
|
||||
"request_seq": request["seq"],
|
||||
"success": True,
|
||||
"command": request["command"],
|
||||
"body": {"variables": var_list},
|
||||
}
|
||||
|
||||
def _accept_stopped_thread(self, thread_name):
|
||||
# TODO: identify Thread-2, Thread-3 and Thread-4. These are NOT
|
||||
# Control, IOPub or Heartbeat threads
|
||||
forbid_list = ["IPythonHistorySavingThread", "Thread-2", "Thread-3", "Thread-4"]
|
||||
return thread_name not in forbid_list
|
||||
|
||||
async def handle_stopped_event(self):
|
||||
"""Handle a stopped event."""
|
||||
# Wait for a stopped event message in the stopped queue
|
||||
# This message is used for triggering the 'threads' request
|
||||
event = await self.stopped_queue.get()
|
||||
req = {"seq": event["seq"] + 1, "type": "request", "command": "threads"}
|
||||
rep = await self._forward_message(req)
|
||||
for thread in rep["body"]["threads"]:
|
||||
if self._accept_stopped_thread(thread["name"]):
|
||||
self.stopped_threads.add(thread["id"])
|
||||
self.event_callback(event)
|
||||
|
||||
@property
|
||||
def tcp_client(self):
|
||||
return self.debugpy_client
|
||||
|
||||
def start(self):
|
||||
"""Start the debugger."""
|
||||
if not self.debugpy_initialized:
|
||||
tmp_dir = get_tmp_directory()
|
||||
if not Path(tmp_dir).exists():
|
||||
Path(tmp_dir).mkdir(parents=True)
|
||||
host, port = self.debugpy_client.get_host_port()
|
||||
code = "import debugpy;"
|
||||
code += 'debugpy.listen(("' + host + '",' + port + "))"
|
||||
content = {"code": code, "silent": True}
|
||||
self.session.send(
|
||||
self.shell_socket,
|
||||
"execute_request",
|
||||
content,
|
||||
None,
|
||||
(self.shell_socket.getsockopt(ROUTING_ID)),
|
||||
)
|
||||
|
||||
ident, msg = self.session.recv(self.shell_socket, mode=0)
|
||||
self.debugpy_initialized = msg["content"]["status"] == "ok"
|
||||
|
||||
# Don't remove leading empty lines when debugging so the breakpoints are correctly positioned
|
||||
cleanup_transforms = get_ipython().input_transformer_manager.cleanup_transforms
|
||||
if leading_empty_lines in cleanup_transforms:
|
||||
index = cleanup_transforms.index(leading_empty_lines)
|
||||
self._removed_cleanup[index] = cleanup_transforms.pop(index)
|
||||
|
||||
self.debugpy_client.connect_tcp_socket()
|
||||
return self.debugpy_initialized
|
||||
|
||||
def stop(self):
|
||||
"""Stop the debugger."""
|
||||
self.debugpy_client.disconnect_tcp_socket()
|
||||
|
||||
# Restore remove cleanup transformers
|
||||
cleanup_transforms = get_ipython().input_transformer_manager.cleanup_transforms
|
||||
for index in sorted(self._removed_cleanup):
|
||||
func = self._removed_cleanup.pop(index)
|
||||
cleanup_transforms.insert(index, func)
|
||||
|
||||
async def dumpCell(self, message):
|
||||
"""Handle a dump cell message."""
|
||||
code = message["arguments"]["code"]
|
||||
file_name = get_file_name(code)
|
||||
|
||||
with open(file_name, "w", encoding="utf-8") as f:
|
||||
f.write(code)
|
||||
|
||||
return {
|
||||
"type": "response",
|
||||
"request_seq": message["seq"],
|
||||
"success": True,
|
||||
"command": message["command"],
|
||||
"body": {"sourcePath": file_name},
|
||||
}
|
||||
|
||||
async def setBreakpoints(self, message):
|
||||
"""Handle a set breakpoints message."""
|
||||
source = message["arguments"]["source"]["path"]
|
||||
self.breakpoint_list[source] = message["arguments"]["breakpoints"]
|
||||
message_response = await self._forward_message(message)
|
||||
# debugpy can set breakpoints on different lines than the ones requested,
|
||||
# so we want to record the breakpoints that were actually added
|
||||
if message_response.get("success"):
|
||||
self.breakpoint_list[source] = [
|
||||
{"line": breakpoint["line"]}
|
||||
for breakpoint in message_response["body"]["breakpoints"]
|
||||
]
|
||||
return message_response
|
||||
|
||||
async def source(self, message):
|
||||
"""Handle a source message."""
|
||||
reply = {"type": "response", "request_seq": message["seq"], "command": message["command"]}
|
||||
source_path = message["arguments"]["source"]["path"]
|
||||
if Path(source_path).is_file():
|
||||
with open(source_path, encoding="utf-8") as f:
|
||||
reply["success"] = True
|
||||
reply["body"] = {"content": f.read()}
|
||||
else:
|
||||
reply["success"] = False
|
||||
reply["message"] = "source unavailable"
|
||||
reply["body"] = {}
|
||||
|
||||
return reply
|
||||
|
||||
async def stackTrace(self, message):
|
||||
"""Handle a stack trace message."""
|
||||
reply = await self._forward_message(message)
|
||||
# The stackFrames array can have the following content:
|
||||
# { frames from the notebook}
|
||||
# ...
|
||||
# { 'id': xxx, 'name': '<module>', ... } <= this is the first frame of the code from the notebook
|
||||
# { frames from ipykernel }
|
||||
# ...
|
||||
# {'id': yyy, 'name': '<module>', ... } <= this is the first frame of ipykernel code
|
||||
# or only the frames from the notebook.
|
||||
# We want to remove all the frames from ipykernel when they are present.
|
||||
try:
|
||||
sf_list = reply["body"]["stackFrames"]
|
||||
module_idx = len(sf_list) - next(
|
||||
i for i, v in enumerate(reversed(sf_list), 1) if v["name"] == "<module>" and i != 1
|
||||
)
|
||||
reply["body"]["stackFrames"] = reply["body"]["stackFrames"][: module_idx + 1]
|
||||
except StopIteration:
|
||||
pass
|
||||
return reply
|
||||
|
||||
def accept_variable(self, variable_name):
|
||||
"""Accept a variable by name."""
|
||||
forbid_list = [
|
||||
"__name__",
|
||||
"__doc__",
|
||||
"__package__",
|
||||
"__loader__",
|
||||
"__spec__",
|
||||
"__annotations__",
|
||||
"__builtins__",
|
||||
"__builtin__",
|
||||
"__display__",
|
||||
"get_ipython",
|
||||
"debugpy",
|
||||
"exit",
|
||||
"quit",
|
||||
"In",
|
||||
"Out",
|
||||
"_oh",
|
||||
"_dh",
|
||||
"_",
|
||||
"__",
|
||||
"___",
|
||||
]
|
||||
cond = variable_name not in forbid_list
|
||||
cond = cond and not bool(re.search(r"^_\d", variable_name))
|
||||
cond = cond and variable_name[0:2] != "_i"
|
||||
return cond # noqa: RET504
|
||||
|
||||
async def variables(self, message):
|
||||
"""Handle a variables message."""
|
||||
reply = {}
|
||||
if not self.stopped_threads:
|
||||
variables = self.variable_explorer.get_children_variables(
|
||||
message["arguments"]["variablesReference"]
|
||||
)
|
||||
return self._build_variables_response(message, variables)
|
||||
|
||||
reply = await self._forward_message(message)
|
||||
# TODO : check start and count arguments work as expected in debugpy
|
||||
reply["body"]["variables"] = [
|
||||
var for var in reply["body"]["variables"] if self.accept_variable(var["name"])
|
||||
]
|
||||
return reply
|
||||
|
||||
async def attach(self, message):
|
||||
"""Handle an attach message."""
|
||||
host, port = self.debugpy_client.get_host_port()
|
||||
message["arguments"]["connect"] = {"host": host, "port": port}
|
||||
message["arguments"]["logToFile"] = True
|
||||
# Experimental option to break in non-user code.
|
||||
# The ipykernel source is in the call stack, so the user
|
||||
# has to manipulate the step-over and step-into in a wize way.
|
||||
# Set debugOptions for breakpoints in python standard library source.
|
||||
if not self.just_my_code:
|
||||
message["arguments"]["debugOptions"] = ["DebugStdLib"]
|
||||
return await self._forward_message(message)
|
||||
|
||||
async def configurationDone(self, message):
|
||||
"""Handle a configuration done message."""
|
||||
return {
|
||||
"seq": message["seq"],
|
||||
"type": "response",
|
||||
"request_seq": message["seq"],
|
||||
"success": True,
|
||||
"command": message["command"],
|
||||
}
|
||||
|
||||
async def debugInfo(self, message):
|
||||
"""Handle a debug info message."""
|
||||
breakpoint_list = []
|
||||
for key, value in self.breakpoint_list.items():
|
||||
breakpoint_list.append({"source": key, "breakpoints": value})
|
||||
return {
|
||||
"type": "response",
|
||||
"request_seq": message["seq"],
|
||||
"success": True,
|
||||
"command": message["command"],
|
||||
"body": {
|
||||
"isStarted": self.is_started,
|
||||
"hashMethod": "Murmur2",
|
||||
"hashSeed": get_tmp_hash_seed(),
|
||||
"tmpFilePrefix": get_tmp_directory() + os.sep,
|
||||
"tmpFileSuffix": ".py",
|
||||
"breakpoints": breakpoint_list,
|
||||
"stoppedThreads": list(self.stopped_threads),
|
||||
"richRendering": True,
|
||||
"exceptionPaths": ["Python Exceptions"],
|
||||
"copyToGlobals": True,
|
||||
},
|
||||
}
|
||||
|
||||
async def inspectVariables(self, message):
|
||||
"""Handle an insepct variables message."""
|
||||
self.variable_explorer.untrack_all()
|
||||
# looks like the implementation of untrack_all in ptvsd
|
||||
# destroys objects we nee din track. We have no choice but
|
||||
# reinstantiate the object
|
||||
self.variable_explorer = VariableExplorer()
|
||||
self.variable_explorer.track()
|
||||
variables = self.variable_explorer.get_children_variables()
|
||||
return self._build_variables_response(message, variables)
|
||||
|
||||
async def richInspectVariables(self, message):
|
||||
"""Handle a rich inspect variables message."""
|
||||
reply = {
|
||||
"type": "response",
|
||||
"sequence_seq": message["seq"],
|
||||
"success": False,
|
||||
"command": message["command"],
|
||||
}
|
||||
|
||||
var_name = message["arguments"]["variableName"]
|
||||
valid_name = str.isidentifier(var_name)
|
||||
if not valid_name:
|
||||
reply["body"] = {"data": {}, "metadata": {}}
|
||||
if var_name == "special variables" or var_name == "function variables":
|
||||
reply["success"] = True
|
||||
return reply
|
||||
|
||||
repr_data = {}
|
||||
repr_metadata = {}
|
||||
if not self.stopped_threads:
|
||||
# The code did not hit a breakpoint, we use the interpreter
|
||||
# to get the rich representation of the variable
|
||||
result = get_ipython().user_expressions({var_name: var_name})[var_name]
|
||||
if result.get("status", "error") == "ok":
|
||||
repr_data = result.get("data", {})
|
||||
repr_metadata = result.get("metadata", {})
|
||||
else:
|
||||
# The code has stopped on a breakpoint, we use the setExpression
|
||||
# request to get the rich representation of the variable
|
||||
code = f"get_ipython().display_formatter.format({var_name})"
|
||||
frame_id = message["arguments"]["frameId"]
|
||||
seq = message["seq"]
|
||||
reply = await self._forward_message(
|
||||
{
|
||||
"type": "request",
|
||||
"command": "evaluate",
|
||||
"seq": seq + 1,
|
||||
"arguments": {"expression": code, "frameId": frame_id, "context": "clipboard"},
|
||||
}
|
||||
)
|
||||
if reply["success"]:
|
||||
repr_data, repr_metadata = eval(reply["body"]["result"], {}, {})
|
||||
|
||||
body = {
|
||||
"data": repr_data,
|
||||
"metadata": {k: v for k, v in repr_metadata.items() if k in repr_data},
|
||||
}
|
||||
|
||||
reply["body"] = body
|
||||
reply["success"] = True
|
||||
return reply
|
||||
|
||||
async def copyToGlobals(self, message):
|
||||
dst_var_name = message["arguments"]["dstVariableName"]
|
||||
src_var_name = message["arguments"]["srcVariableName"]
|
||||
src_frame_id = message["arguments"]["srcFrameId"]
|
||||
|
||||
expression = f"globals()['{dst_var_name}']"
|
||||
seq = message["seq"]
|
||||
return await self._forward_message(
|
||||
{
|
||||
"type": "request",
|
||||
"command": "setExpression",
|
||||
"seq": seq + 1,
|
||||
"arguments": {
|
||||
"expression": expression,
|
||||
"value": src_var_name,
|
||||
"frameId": src_frame_id,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
async def modules(self, message):
|
||||
"""Handle a modules message."""
|
||||
modules = list(sys.modules.values())
|
||||
startModule = message.get("startModule", 0)
|
||||
moduleCount = message.get("moduleCount", len(modules))
|
||||
mods = []
|
||||
for i in range(startModule, moduleCount):
|
||||
module = modules[i]
|
||||
filename = getattr(getattr(module, "__spec__", None), "origin", None)
|
||||
if filename and filename.endswith(".py"):
|
||||
mods.append({"id": i, "name": module.__name__, "path": filename})
|
||||
|
||||
return {"body": {"modules": mods, "totalModules": len(modules)}}
|
||||
|
||||
async def process_request(self, message):
|
||||
"""Process a request."""
|
||||
reply = {}
|
||||
|
||||
if message["command"] == "initialize":
|
||||
if self.is_started:
|
||||
self.log.info("The debugger has already started")
|
||||
else:
|
||||
self.is_started = self.start()
|
||||
if self.is_started:
|
||||
self.log.info("The debugger has started")
|
||||
else:
|
||||
reply = {
|
||||
"command": "initialize",
|
||||
"request_seq": message["seq"],
|
||||
"seq": 3,
|
||||
"success": False,
|
||||
"type": "response",
|
||||
}
|
||||
|
||||
handler = self.static_debug_handlers.get(message["command"], None)
|
||||
if handler is not None:
|
||||
reply = await handler(message)
|
||||
elif self.is_started:
|
||||
handler = self.started_debug_handlers.get(message["command"], None)
|
||||
if handler is not None:
|
||||
reply = await handler(message)
|
||||
else:
|
||||
reply = await self._forward_message(message)
|
||||
|
||||
if message["command"] == "disconnect":
|
||||
self.stop()
|
||||
self.breakpoint_list = {}
|
||||
self.stopped_threads = set()
|
||||
self.is_started = False
|
||||
self.log.info("The debugger has stopped")
|
||||
|
||||
return reply
|
||||
101
venv/lib/python3.10/site-packages/ipykernel/displayhook.py
Normal file
101
venv/lib/python3.10/site-packages/ipykernel/displayhook.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""Replacements for sys.displayhook that publish over ZMQ."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
from __future__ import annotations
|
||||
|
||||
import builtins
|
||||
import sys
|
||||
import typing as t
|
||||
|
||||
from IPython.core.displayhook import DisplayHook
|
||||
from jupyter_client.session import Session, extract_header
|
||||
from traitlets import Any, Dict, Instance
|
||||
|
||||
from ipykernel.jsonutil import encode_images, json_clean
|
||||
|
||||
|
||||
class ZMQDisplayHook:
|
||||
"""A simple displayhook that publishes the object's repr over a ZeroMQ
|
||||
socket."""
|
||||
|
||||
topic = b"execute_result"
|
||||
|
||||
def __init__(self, session, pub_socket):
|
||||
"""Initialize the hook."""
|
||||
self.session = session
|
||||
self.pub_socket = pub_socket
|
||||
self.parent_header = {}
|
||||
|
||||
def get_execution_count(self):
|
||||
"""This method is replaced in kernelapp"""
|
||||
return 0
|
||||
|
||||
def __call__(self, obj):
|
||||
"""Handle a hook call."""
|
||||
if obj is None:
|
||||
return
|
||||
|
||||
builtins._ = obj # type:ignore[attr-defined]
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
contents = {
|
||||
"execution_count": self.get_execution_count(),
|
||||
"data": {"text/plain": repr(obj)},
|
||||
"metadata": {},
|
||||
}
|
||||
self.session.send(
|
||||
self.pub_socket, "execute_result", contents, parent=self.parent_header, ident=self.topic
|
||||
)
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent header."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
|
||||
class ZMQShellDisplayHook(DisplayHook):
|
||||
"""A displayhook subclass that publishes data using ZeroMQ. This is intended
|
||||
to work with an InteractiveShell instance. It sends a dict of different
|
||||
representations of the object."""
|
||||
|
||||
topic = None
|
||||
|
||||
session = Instance(Session, allow_none=True)
|
||||
pub_socket = Any(allow_none=True)
|
||||
parent_header = Dict({})
|
||||
msg: dict[str, t.Any] | None
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent for outbound messages."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def start_displayhook(self):
|
||||
"""Start the display hook."""
|
||||
if self.session:
|
||||
self.msg = self.session.msg(
|
||||
"execute_result",
|
||||
{
|
||||
"data": {},
|
||||
"metadata": {},
|
||||
},
|
||||
parent=self.parent_header,
|
||||
)
|
||||
|
||||
def write_output_prompt(self):
|
||||
"""Write the output prompt."""
|
||||
if self.msg:
|
||||
self.msg["content"]["execution_count"] = self.prompt_count
|
||||
|
||||
def write_format_data(self, format_dict, md_dict=None):
|
||||
"""Write format data to the message."""
|
||||
if self.msg:
|
||||
self.msg["content"]["data"] = json_clean(encode_images(format_dict))
|
||||
self.msg["content"]["metadata"] = md_dict
|
||||
|
||||
def finish_displayhook(self):
|
||||
"""Finish up all displayhook activities."""
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
if self.msg and self.msg["content"]["data"] and self.session:
|
||||
self.session.send(self.pub_socket, self.msg, ident=self.topic)
|
||||
self.msg = None
|
||||
57
venv/lib/python3.10/site-packages/ipykernel/embed.py
Normal file
57
venv/lib/python3.10/site-packages/ipykernel/embed.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Simple function for embedding an IPython kernel
|
||||
"""
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
import sys
|
||||
|
||||
from IPython.utils.frame import extract_module_locals
|
||||
|
||||
from .kernelapp import IPKernelApp
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Code
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def embed_kernel(module=None, local_ns=None, **kwargs):
|
||||
"""Embed and start an IPython kernel in a given scope.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
module : ModuleType, optional
|
||||
The module to load into IPython globals (default: caller)
|
||||
local_ns : dict, optional
|
||||
The namespace to load into IPython user namespace (default: caller)
|
||||
kwargs : dict, optional
|
||||
Further keyword args are relayed to the IPKernelApp constructor,
|
||||
allowing configuration of the Kernel. Will only have an effect
|
||||
on the first embed_kernel call for a given process.
|
||||
|
||||
"""
|
||||
# get the app if it exists, or set it up if it doesn't
|
||||
if IPKernelApp.initialized():
|
||||
app = IPKernelApp.instance()
|
||||
else:
|
||||
app = IPKernelApp.instance(**kwargs)
|
||||
app.initialize([])
|
||||
# Undo unnecessary sys module mangling from init_sys_modules.
|
||||
# This would not be necessary if we could prevent it
|
||||
# in the first place by using a different InteractiveShell
|
||||
# subclass, as in the regular embed case.
|
||||
main = app.kernel.shell._orig_sys_modules_main_mod
|
||||
if main is not None:
|
||||
sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main
|
||||
|
||||
# load the calling scope if not given
|
||||
(caller_module, caller_locals) = extract_module_locals(1)
|
||||
if module is None:
|
||||
module = caller_module
|
||||
if local_ns is None:
|
||||
local_ns = caller_locals
|
||||
|
||||
app.kernel.user_module = module
|
||||
app.kernel.user_ns = local_ns
|
||||
app.shell.set_completer_frame() # type:ignore[union-attr]
|
||||
app.start()
|
||||
576
venv/lib/python3.10/site-packages/ipykernel/eventloops.py
Normal file
576
venv/lib/python3.10/site-packages/ipykernel/eventloops.py
Normal file
@@ -0,0 +1,576 @@
|
||||
"""Event loop integration for the ZeroMQ-based kernels."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from functools import partial
|
||||
|
||||
import zmq
|
||||
from packaging.version import Version as V
|
||||
from traitlets.config.application import Application
|
||||
|
||||
|
||||
def _use_appnope():
|
||||
"""Should we use appnope for dealing with OS X app nap?
|
||||
|
||||
Checks if we are on OS X 10.9 or greater.
|
||||
"""
|
||||
return sys.platform == "darwin" and V(platform.mac_ver()[0]) >= V("10.9")
|
||||
|
||||
|
||||
# mapping of keys to loop functions
|
||||
loop_map = {
|
||||
"inline": None,
|
||||
"nbagg": None,
|
||||
"webagg": None,
|
||||
"notebook": None,
|
||||
"ipympl": None,
|
||||
"widget": None,
|
||||
None: None,
|
||||
}
|
||||
|
||||
|
||||
def register_integration(*toolkitnames):
|
||||
"""Decorator to register an event loop to integrate with the IPython kernel
|
||||
|
||||
The decorator takes names to register the event loop as for the %gui magic.
|
||||
You can provide alternative names for the same toolkit.
|
||||
|
||||
The decorated function should take a single argument, the IPython kernel
|
||||
instance, arrange for the event loop to call ``kernel.do_one_iteration()``
|
||||
at least every ``kernel._poll_interval`` seconds, and start the event loop.
|
||||
|
||||
:mod:`ipykernel.eventloops` provides and registers such functions
|
||||
for a few common event loops.
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
"""Integration registration decorator."""
|
||||
for name in toolkitnames:
|
||||
loop_map[name] = func
|
||||
|
||||
func.exit_hook = lambda kernel: None # noqa: ARG005
|
||||
|
||||
def exit_decorator(exit_func):
|
||||
"""@func.exit is now a decorator
|
||||
|
||||
to register a function to be called on exit
|
||||
"""
|
||||
func.exit_hook = exit_func
|
||||
return exit_func
|
||||
|
||||
func.exit = exit_decorator
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def _notify_stream_qt(kernel):
|
||||
import operator
|
||||
from functools import lru_cache
|
||||
|
||||
from IPython.external.qt_for_kernel import QtCore
|
||||
|
||||
try:
|
||||
from IPython.external.qt_for_kernel import enum_helper
|
||||
except ImportError:
|
||||
|
||||
@lru_cache(None)
|
||||
def enum_helper(name):
|
||||
return operator.attrgetter(name.rpartition(".")[0])(sys.modules[QtCore.__package__])
|
||||
|
||||
def process_stream_events():
|
||||
"""fall back to main loop when there's a socket event"""
|
||||
# call flush to ensure that the stream doesn't lose events
|
||||
# due to our consuming of the edge-triggered FD
|
||||
# flush returns the number of events consumed.
|
||||
# if there were any, wake it up
|
||||
if kernel.shell_stream.flush(limit=1):
|
||||
kernel._qt_notifier.setEnabled(False)
|
||||
kernel.app.qt_event_loop.quit()
|
||||
|
||||
if not hasattr(kernel, "_qt_notifier"):
|
||||
fd = kernel.shell_stream.getsockopt(zmq.FD)
|
||||
kernel._qt_notifier = QtCore.QSocketNotifier(
|
||||
fd, enum_helper("QtCore.QSocketNotifier.Type").Read, kernel.app.qt_event_loop
|
||||
)
|
||||
kernel._qt_notifier.activated.connect(process_stream_events)
|
||||
else:
|
||||
kernel._qt_notifier.setEnabled(True)
|
||||
|
||||
# there may already be unprocessed events waiting.
|
||||
# these events will not wake zmq's edge-triggered FD
|
||||
# since edge-triggered notification only occurs on new i/o activity.
|
||||
# process all the waiting events immediately
|
||||
# so we start in a clean state ensuring that any new i/o events will notify.
|
||||
# schedule first call on the eventloop as soon as it's running,
|
||||
# so we don't block here processing events
|
||||
if not hasattr(kernel, "_qt_timer"):
|
||||
kernel._qt_timer = QtCore.QTimer(kernel.app)
|
||||
kernel._qt_timer.setSingleShot(True)
|
||||
kernel._qt_timer.timeout.connect(process_stream_events)
|
||||
kernel._qt_timer.start(0)
|
||||
|
||||
|
||||
@register_integration("qt", "qt5", "qt6")
|
||||
def loop_qt(kernel):
|
||||
"""Event loop for all supported versions of Qt."""
|
||||
_notify_stream_qt(kernel) # install hook to stop event loop.
|
||||
|
||||
# Start the event loop.
|
||||
kernel.app._in_event_loop = True
|
||||
|
||||
# `exec` blocks until there's ZMQ activity.
|
||||
el = kernel.app.qt_event_loop # for brevity
|
||||
el.exec() if hasattr(el, "exec") else el.exec_()
|
||||
kernel.app._in_event_loop = False
|
||||
|
||||
|
||||
# NOTE: To be removed in version 7
|
||||
loop_qt5 = loop_qt
|
||||
|
||||
|
||||
# exit and watch are the same for qt 4 and 5
|
||||
@loop_qt.exit
|
||||
def loop_qt_exit(kernel):
|
||||
kernel.app.exit()
|
||||
|
||||
|
||||
def _loop_wx(app):
|
||||
"""Inner-loop for running the Wx eventloop
|
||||
|
||||
Pulled from guisupport.start_event_loop in IPython < 5.2,
|
||||
since IPython 5.2 only checks `get_ipython().active_eventloop` is defined,
|
||||
rather than if the eventloop is actually running.
|
||||
"""
|
||||
app._in_event_loop = True
|
||||
app.MainLoop()
|
||||
app._in_event_loop = False
|
||||
|
||||
|
||||
@register_integration("wx")
|
||||
def loop_wx(kernel):
|
||||
"""Start a kernel with wx event loop support."""
|
||||
|
||||
import wx
|
||||
|
||||
# Wx uses milliseconds
|
||||
poll_interval = int(1000 * kernel._poll_interval)
|
||||
|
||||
def wake():
|
||||
"""wake from wx"""
|
||||
if kernel.shell_stream.flush(limit=1):
|
||||
kernel.app.ExitMainLoop()
|
||||
return
|
||||
|
||||
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
|
||||
# We make the Frame hidden when we create it in the main app below.
|
||||
class TimerFrame(wx.Frame): # type:ignore[misc]
|
||||
def __init__(self, func):
|
||||
wx.Frame.__init__(self, None, -1)
|
||||
self.timer = wx.Timer(self)
|
||||
# Units for the timer are in milliseconds
|
||||
self.timer.Start(poll_interval)
|
||||
self.Bind(wx.EVT_TIMER, self.on_timer)
|
||||
self.func = func
|
||||
|
||||
def on_timer(self, event):
|
||||
self.func()
|
||||
|
||||
# We need a custom wx.App to create our Frame subclass that has the
|
||||
# wx.Timer to defer back to the tornado event loop.
|
||||
class IPWxApp(wx.App): # type:ignore[misc]
|
||||
def OnInit(self):
|
||||
self.frame = TimerFrame(wake)
|
||||
self.frame.Show(False)
|
||||
return True
|
||||
|
||||
# The redirect=False here makes sure that wx doesn't replace
|
||||
# sys.stdout/stderr with its own classes.
|
||||
if not (getattr(kernel, "app", None) and isinstance(kernel.app, wx.App)):
|
||||
kernel.app = IPWxApp(redirect=False)
|
||||
|
||||
# The import of wx on Linux sets the handler for signal.SIGINT
|
||||
# to 0. This is a bug in wx or gtk. We fix by just setting it
|
||||
# back to the Python default.
|
||||
import signal
|
||||
|
||||
if not callable(signal.getsignal(signal.SIGINT)):
|
||||
signal.signal(signal.SIGINT, signal.default_int_handler)
|
||||
|
||||
_loop_wx(kernel.app)
|
||||
|
||||
|
||||
@loop_wx.exit
|
||||
def loop_wx_exit(kernel):
|
||||
"""Exit the wx loop."""
|
||||
import wx
|
||||
|
||||
wx.Exit()
|
||||
|
||||
|
||||
@register_integration("tk")
|
||||
def loop_tk(kernel):
|
||||
"""Start a kernel with the Tk event loop."""
|
||||
|
||||
from tkinter import READABLE, Tk
|
||||
|
||||
app = Tk()
|
||||
# Capability detection:
|
||||
# per https://docs.python.org/3/library/tkinter.html#file-handlers
|
||||
# file handlers are not available on Windows
|
||||
if hasattr(app, "createfilehandler"):
|
||||
# A basic wrapper for structural similarity with the Windows version
|
||||
class BasicAppWrapper:
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
self.app.withdraw()
|
||||
|
||||
def process_stream_events(stream, *a, **kw):
|
||||
"""fall back to main loop when there's a socket event"""
|
||||
if stream.flush(limit=1):
|
||||
app.tk.deletefilehandler(stream.getsockopt(zmq.FD))
|
||||
app.quit()
|
||||
app.destroy()
|
||||
del kernel.app_wrapper
|
||||
|
||||
# For Tkinter, we create a Tk object and call its withdraw method.
|
||||
kernel.app_wrapper = BasicAppWrapper(app)
|
||||
|
||||
notifier = partial(process_stream_events, kernel.shell_stream)
|
||||
# seems to be needed for tk
|
||||
notifier.__name__ = "notifier" # type:ignore[attr-defined]
|
||||
app.tk.createfilehandler(kernel.shell_stream.getsockopt(zmq.FD), READABLE, notifier)
|
||||
# schedule initial call after start
|
||||
app.after(0, notifier)
|
||||
|
||||
app.mainloop()
|
||||
|
||||
else:
|
||||
import asyncio
|
||||
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
doi = kernel.do_one_iteration
|
||||
# Tk uses milliseconds
|
||||
poll_interval = int(1000 * kernel._poll_interval)
|
||||
|
||||
class TimedAppWrapper:
|
||||
def __init__(self, app, func):
|
||||
self.app = app
|
||||
self.app.withdraw()
|
||||
self.func = func
|
||||
|
||||
def on_timer(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
loop.run_until_complete(self.func())
|
||||
except Exception:
|
||||
kernel.log.exception("Error in message handler")
|
||||
self.app.after(poll_interval, self.on_timer)
|
||||
|
||||
def start(self):
|
||||
self.on_timer() # Call it once to get things going.
|
||||
self.app.mainloop()
|
||||
|
||||
kernel.app_wrapper = TimedAppWrapper(app, doi)
|
||||
kernel.app_wrapper.start()
|
||||
|
||||
|
||||
@loop_tk.exit
|
||||
def loop_tk_exit(kernel):
|
||||
"""Exit the tk loop."""
|
||||
try:
|
||||
kernel.app_wrapper.app.destroy()
|
||||
del kernel.app_wrapper
|
||||
except (RuntimeError, AttributeError):
|
||||
pass
|
||||
|
||||
|
||||
@register_integration("gtk")
|
||||
def loop_gtk(kernel):
|
||||
"""Start the kernel, coordinating with the GTK event loop"""
|
||||
from .gui.gtkembed import GTKEmbed
|
||||
|
||||
gtk_kernel = GTKEmbed(kernel)
|
||||
gtk_kernel.start()
|
||||
kernel._gtk = gtk_kernel
|
||||
|
||||
|
||||
@loop_gtk.exit
|
||||
def loop_gtk_exit(kernel):
|
||||
"""Exit the gtk loop."""
|
||||
kernel._gtk.stop()
|
||||
|
||||
|
||||
@register_integration("gtk3")
|
||||
def loop_gtk3(kernel):
|
||||
"""Start the kernel, coordinating with the GTK event loop"""
|
||||
from .gui.gtk3embed import GTKEmbed
|
||||
|
||||
gtk_kernel = GTKEmbed(kernel)
|
||||
gtk_kernel.start()
|
||||
kernel._gtk = gtk_kernel
|
||||
|
||||
|
||||
@loop_gtk3.exit
|
||||
def loop_gtk3_exit(kernel):
|
||||
"""Exit the gtk3 loop."""
|
||||
kernel._gtk.stop()
|
||||
|
||||
|
||||
@register_integration("osx")
|
||||
def loop_cocoa(kernel):
|
||||
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
|
||||
via the matplotlib MacOSX backend.
|
||||
"""
|
||||
from ._eventloop_macos import mainloop, stop
|
||||
|
||||
real_excepthook = sys.excepthook
|
||||
|
||||
def handle_int(etype, value, tb):
|
||||
"""don't let KeyboardInterrupts look like crashes"""
|
||||
# wake the eventloop when we get a signal
|
||||
stop()
|
||||
if etype is KeyboardInterrupt:
|
||||
print("KeyboardInterrupt caught in CFRunLoop", file=sys.__stdout__)
|
||||
else:
|
||||
real_excepthook(etype, value, tb)
|
||||
|
||||
while not kernel.shell.exit_now:
|
||||
try:
|
||||
# double nested try/except, to properly catch KeyboardInterrupt
|
||||
# due to pyzmq Issue #130
|
||||
try:
|
||||
# don't let interrupts during mainloop invoke crash_handler:
|
||||
sys.excepthook = handle_int
|
||||
mainloop(kernel._poll_interval)
|
||||
if kernel.shell_stream.flush(limit=1):
|
||||
# events to process, return control to kernel
|
||||
return
|
||||
except BaseException:
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
# Ctrl-C shouldn't crash the kernel
|
||||
print("KeyboardInterrupt caught in kernel", file=sys.__stdout__)
|
||||
finally:
|
||||
# ensure excepthook is restored
|
||||
sys.excepthook = real_excepthook
|
||||
|
||||
|
||||
@loop_cocoa.exit
|
||||
def loop_cocoa_exit(kernel):
|
||||
"""Exit the cocoa loop."""
|
||||
from ._eventloop_macos import stop
|
||||
|
||||
stop()
|
||||
|
||||
|
||||
@register_integration("asyncio")
|
||||
def loop_asyncio(kernel):
|
||||
"""Start a kernel with asyncio event loop support."""
|
||||
import asyncio
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
# loop is already running (e.g. tornado 5), nothing left to do
|
||||
if loop.is_running():
|
||||
return
|
||||
|
||||
if loop.is_closed():
|
||||
# main loop is closed, create a new one
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
loop._should_close = False # type:ignore[attr-defined]
|
||||
|
||||
# pause eventloop when there's an event on a zmq socket
|
||||
def process_stream_events(stream):
|
||||
"""fall back to main loop when there's a socket event"""
|
||||
if stream.flush(limit=1):
|
||||
loop.stop()
|
||||
|
||||
notifier = partial(process_stream_events, kernel.shell_stream)
|
||||
loop.add_reader(kernel.shell_stream.getsockopt(zmq.FD), notifier)
|
||||
loop.call_soon(notifier)
|
||||
|
||||
while True:
|
||||
error = None
|
||||
try:
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
continue
|
||||
except Exception as e:
|
||||
error = e
|
||||
if loop._should_close: # type:ignore[attr-defined]
|
||||
loop.close()
|
||||
if error is not None:
|
||||
raise error
|
||||
break
|
||||
|
||||
|
||||
@loop_asyncio.exit
|
||||
def loop_asyncio_exit(kernel):
|
||||
"""Exit hook for asyncio"""
|
||||
import asyncio
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
async def close_loop():
|
||||
if hasattr(loop, "shutdown_asyncgens"):
|
||||
yield loop.shutdown_asyncgens()
|
||||
loop._should_close = True # type:ignore[attr-defined]
|
||||
loop.stop()
|
||||
|
||||
if loop.is_running():
|
||||
close_loop()
|
||||
|
||||
elif not loop.is_closed():
|
||||
loop.run_until_complete(close_loop) # type:ignore[arg-type]
|
||||
loop.close()
|
||||
|
||||
|
||||
def set_qt_api_env_from_gui(gui):
|
||||
"""
|
||||
Sets the QT_API environment variable by trying to import PyQtx or PySidex.
|
||||
|
||||
The user can generically request `qt` or a specific Qt version, e.g. `qt6`.
|
||||
For a generic Qt request, we let the mechanism in IPython choose the best
|
||||
available version by leaving the `QT_API` environment variable blank.
|
||||
|
||||
For specific versions, we check to see whether the PyQt or PySide
|
||||
implementations are present and set `QT_API` accordingly to indicate to
|
||||
IPython which version we want. If neither implementation is present, we
|
||||
leave the environment variable set so IPython will generate a helpful error
|
||||
message.
|
||||
|
||||
Notes
|
||||
-----
|
||||
- If the environment variable is already set, it will be used unchanged,
|
||||
regardless of what the user requested.
|
||||
"""
|
||||
qt_api = os.environ.get("QT_API", None)
|
||||
|
||||
from IPython.external.qt_loaders import (
|
||||
QT_API_PYQT5,
|
||||
QT_API_PYQT6,
|
||||
QT_API_PYSIDE2,
|
||||
QT_API_PYSIDE6,
|
||||
loaded_api,
|
||||
)
|
||||
|
||||
loaded = loaded_api()
|
||||
|
||||
qt_env2gui = {
|
||||
QT_API_PYSIDE2: "qt5",
|
||||
QT_API_PYQT5: "qt5",
|
||||
QT_API_PYSIDE6: "qt6",
|
||||
QT_API_PYQT6: "qt6",
|
||||
}
|
||||
if loaded is not None and gui != "qt" and qt_env2gui[loaded] != gui:
|
||||
print(f"Cannot switch Qt versions for this session; you must use {qt_env2gui[loaded]}.")
|
||||
return
|
||||
|
||||
if qt_api is not None and gui != "qt":
|
||||
if qt_env2gui[qt_api] != gui:
|
||||
print(
|
||||
f'Request for "{gui}" will be ignored because `QT_API` '
|
||||
f'environment variable is set to "{qt_api}"'
|
||||
)
|
||||
return
|
||||
else:
|
||||
if gui == "qt5":
|
||||
try:
|
||||
import PyQt5
|
||||
|
||||
os.environ["QT_API"] = "pyqt5"
|
||||
except ImportError:
|
||||
try:
|
||||
import PySide2
|
||||
|
||||
os.environ["QT_API"] = "pyside2"
|
||||
except ImportError:
|
||||
os.environ["QT_API"] = "pyqt5"
|
||||
elif gui == "qt6":
|
||||
try:
|
||||
import PyQt6
|
||||
|
||||
os.environ["QT_API"] = "pyqt6"
|
||||
except ImportError:
|
||||
try:
|
||||
import PySide6
|
||||
|
||||
os.environ["QT_API"] = "pyside6"
|
||||
except ImportError:
|
||||
os.environ["QT_API"] = "pyqt6"
|
||||
elif gui == "qt":
|
||||
# Don't set QT_API; let IPython logic choose the version.
|
||||
if "QT_API" in os.environ:
|
||||
del os.environ["QT_API"]
|
||||
else:
|
||||
print(f'Unrecognized Qt version: {gui}. Should be "qt5", "qt6", or "qt".')
|
||||
return
|
||||
|
||||
# Do the actual import now that the environment variable is set to make sure it works.
|
||||
try:
|
||||
from IPython.external.qt_for_kernel import QtCore, QtGui
|
||||
except Exception as e:
|
||||
# Clear the environment variable for the next attempt.
|
||||
if "QT_API" in os.environ:
|
||||
del os.environ["QT_API"]
|
||||
print(f"QT_API couldn't be set due to error {e}")
|
||||
return
|
||||
|
||||
|
||||
def make_qt_app_for_kernel(gui, kernel):
|
||||
"""Sets the `QT_API` environment variable if it isn't already set."""
|
||||
if hasattr(kernel, "app"):
|
||||
# Kernel is already running a Qt event loop, so there's no need to
|
||||
# create another app for it.
|
||||
return
|
||||
|
||||
set_qt_api_env_from_gui(gui)
|
||||
|
||||
# This import is guaranteed to work now:
|
||||
from IPython.external.qt_for_kernel import QtCore
|
||||
from IPython.lib.guisupport import get_app_qt4
|
||||
|
||||
kernel.app = get_app_qt4([" "])
|
||||
kernel.app.qt_event_loop = QtCore.QEventLoop(kernel.app)
|
||||
|
||||
|
||||
def enable_gui(gui, kernel=None):
|
||||
"""Enable integration with a given GUI"""
|
||||
if gui not in loop_map:
|
||||
e = f"Invalid GUI request {gui!r}, valid ones are:{loop_map.keys()}"
|
||||
raise ValueError(e)
|
||||
if kernel is None:
|
||||
if Application.initialized():
|
||||
kernel = getattr(Application.instance(), "kernel", None)
|
||||
if kernel is None:
|
||||
msg = (
|
||||
"You didn't specify a kernel,"
|
||||
" and no IPython Application with a kernel appears to be running."
|
||||
)
|
||||
raise RuntimeError(msg)
|
||||
if gui is None:
|
||||
# User wants to turn off integration; clear any evidence if Qt was the last one.
|
||||
if hasattr(kernel, "app"):
|
||||
delattr(kernel, "app")
|
||||
else:
|
||||
if gui.startswith("qt"):
|
||||
# Prepare the kernel here so any exceptions are displayed in the client.
|
||||
make_qt_app_for_kernel(gui, kernel)
|
||||
|
||||
loop = loop_map[gui]
|
||||
if (
|
||||
loop and kernel.eventloop is not None and kernel.eventloop is not loop # type:ignore[unreachable]
|
||||
):
|
||||
msg = "Cannot activate multiple GUI eventloops" # type:ignore[unreachable]
|
||||
raise RuntimeError(msg)
|
||||
kernel.eventloop = loop
|
||||
# We set `eventloop`; the function the user chose is executed in `Kernel.enter_eventloop`, thus
|
||||
# any exceptions raised during the event loop will not be shown in the client.
|
||||
15
venv/lib/python3.10/site-packages/ipykernel/gui/__init__.py
Normal file
15
venv/lib/python3.10/site-packages/ipykernel/gui/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""GUI support for the IPython ZeroMQ kernel.
|
||||
|
||||
This package contains the various toolkit-dependent utilities we use to enable
|
||||
coordination between the IPython kernel and the event loops of the various GUI
|
||||
toolkits.
|
||||
"""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2010-2011 The IPython Development Team.
|
||||
#
|
||||
# Distributed under the terms of the BSD License.
|
||||
#
|
||||
# The full license is in the file LICENSE, distributed as part of this
|
||||
# software.
|
||||
# -----------------------------------------------------------------------------
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
98
venv/lib/python3.10/site-packages/ipykernel/gui/gtk3embed.py
Normal file
98
venv/lib/python3.10/site-packages/ipykernel/gui/gtk3embed.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
|
||||
"""
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2010-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file LICENSE, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
# stdlib
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"The Gtk3 event loop for ipykernel is deprecated", category=DeprecationWarning, stacklevel=2
|
||||
)
|
||||
|
||||
# Third-party
|
||||
import gi
|
||||
|
||||
gi.require_version("Gdk", "3.0")
|
||||
gi.require_version("Gtk", "3.0")
|
||||
from gi.repository import GObject, Gtk
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Classes and functions
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class GTKEmbed:
|
||||
"""A class to embed a kernel into the GTK main event loop."""
|
||||
|
||||
def __init__(self, kernel):
|
||||
"""Initialize the embed."""
|
||||
self.kernel = kernel
|
||||
# These two will later store the real gtk functions when we hijack them
|
||||
self.gtk_main = None
|
||||
self.gtk_main_quit = None
|
||||
|
||||
def start(self):
|
||||
"""Starts the GTK main event loop and sets our kernel startup routine."""
|
||||
# Register our function to initiate the kernel and start gtk
|
||||
GObject.idle_add(self._wire_kernel)
|
||||
Gtk.main()
|
||||
|
||||
def _wire_kernel(self):
|
||||
"""Initializes the kernel inside GTK.
|
||||
|
||||
This is meant to run only once at startup, so it does its job and
|
||||
returns False to ensure it doesn't get run again by GTK.
|
||||
"""
|
||||
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
|
||||
GObject.timeout_add(int(1000 * self.kernel._poll_interval), self.iterate_kernel)
|
||||
return False
|
||||
|
||||
def iterate_kernel(self):
|
||||
"""Run one iteration of the kernel and return True.
|
||||
|
||||
GTK timer functions must return True to be called again, so we make the
|
||||
call to :meth:`do_one_iteration` and then return True for GTK.
|
||||
"""
|
||||
self.kernel.do_one_iteration()
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
"""Stop the embed."""
|
||||
# FIXME: this one isn't getting called because we have no reliable
|
||||
# kernel shutdown. We need to fix that: once the kernel has a
|
||||
# shutdown mechanism, it can call this.
|
||||
if self.gtk_main_quit:
|
||||
self.gtk_main_quit()
|
||||
sys.exit()
|
||||
|
||||
def _hijack_gtk(self):
|
||||
"""Hijack a few key functions in GTK for IPython integration.
|
||||
|
||||
Modifies pyGTK's main and main_quit with a dummy so user code does not
|
||||
block IPython. This allows us to use %run to run arbitrary pygtk
|
||||
scripts from a long-lived IPython session, and when they attempt to
|
||||
start or stop
|
||||
|
||||
Returns
|
||||
-------
|
||||
The original functions that have been hijacked:
|
||||
- Gtk.main
|
||||
- Gtk.main_quit
|
||||
"""
|
||||
|
||||
def dummy(*args, **kw):
|
||||
"""No-op."""
|
||||
|
||||
# save and trap main and main_quit from gtk
|
||||
orig_main, Gtk.main = Gtk.main, dummy
|
||||
orig_main_quit, Gtk.main_quit = Gtk.main_quit, dummy
|
||||
return orig_main, orig_main_quit
|
||||
95
venv/lib/python3.10/site-packages/ipykernel/gui/gtkembed.py
Normal file
95
venv/lib/python3.10/site-packages/ipykernel/gui/gtkembed.py
Normal file
@@ -0,0 +1,95 @@
|
||||
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
|
||||
"""
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2010-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file LICENSE, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
# stdlib
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"The Gtk3 event loop for ipykernel is deprecated", category=DeprecationWarning, stacklevel=2
|
||||
)
|
||||
|
||||
# Third-party
|
||||
import gobject
|
||||
import gtk
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Classes and functions
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class GTKEmbed:
|
||||
"""A class to embed a kernel into the GTK main event loop."""
|
||||
|
||||
def __init__(self, kernel):
|
||||
"""Initialize the embed."""
|
||||
self.kernel = kernel
|
||||
# These two will later store the real gtk functions when we hijack them
|
||||
self.gtk_main = None
|
||||
self.gtk_main_quit = None
|
||||
|
||||
def start(self):
|
||||
"""Starts the GTK main event loop and sets our kernel startup routine."""
|
||||
# Register our function to initiate the kernel and start gtk
|
||||
gobject.idle_add(self._wire_kernel)
|
||||
gtk.main()
|
||||
|
||||
def _wire_kernel(self):
|
||||
"""Initializes the kernel inside GTK.
|
||||
|
||||
This is meant to run only once at startup, so it does its job and
|
||||
returns False to ensure it doesn't get run again by GTK.
|
||||
"""
|
||||
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
|
||||
gobject.timeout_add(int(1000 * self.kernel._poll_interval), self.iterate_kernel)
|
||||
return False
|
||||
|
||||
def iterate_kernel(self):
|
||||
"""Run one iteration of the kernel and return True.
|
||||
|
||||
GTK timer functions must return True to be called again, so we make the
|
||||
call to :meth:`do_one_iteration` and then return True for GTK.
|
||||
"""
|
||||
self.kernel.do_one_iteration()
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
"""Stop the embed."""
|
||||
# FIXME: this one isn't getting called because we have no reliable
|
||||
# kernel shutdown. We need to fix that: once the kernel has a
|
||||
# shutdown mechanism, it can call this.
|
||||
if self.gtk_main_quit:
|
||||
self.gtk_main_quit()
|
||||
sys.exit()
|
||||
|
||||
def _hijack_gtk(self):
|
||||
"""Hijack a few key functions in GTK for IPython integration.
|
||||
|
||||
Modifies pyGTK's main and main_quit with a dummy so user code does not
|
||||
block IPython. This allows us to use %run to run arbitrary pygtk
|
||||
scripts from a long-lived IPython session, and when they attempt to
|
||||
start or stop
|
||||
|
||||
Returns
|
||||
-------
|
||||
The original functions that have been hijacked:
|
||||
- gtk.main
|
||||
- gtk.main_quit
|
||||
"""
|
||||
|
||||
def dummy(*args, **kw):
|
||||
"""No-op."""
|
||||
|
||||
# save and trap main and main_quit from gtk
|
||||
orig_main, gtk.main = gtk.main, dummy
|
||||
orig_main_quit, gtk.main_quit = gtk.main_quit, dummy
|
||||
return orig_main, orig_main_quit
|
||||
125
venv/lib/python3.10/site-packages/ipykernel/heartbeat.py
Normal file
125
venv/lib/python3.10/site-packages/ipykernel/heartbeat.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""The client and server for a basic ping-pong style heartbeat.
|
||||
"""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2008-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file LICENSE, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
import errno
|
||||
import socket
|
||||
from pathlib import Path
|
||||
from threading import Thread
|
||||
|
||||
import zmq
|
||||
from jupyter_client.localinterfaces import localhost
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Code
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class Heartbeat(Thread):
|
||||
"""A simple ping-pong style heartbeat that runs in a thread."""
|
||||
|
||||
def __init__(self, context, addr=None):
|
||||
"""Initialize the heartbeat thread."""
|
||||
if addr is None:
|
||||
addr = ("tcp", localhost(), 0)
|
||||
Thread.__init__(self, name="Heartbeat")
|
||||
self.context = context
|
||||
self.transport, self.ip, self.port = addr
|
||||
self.original_port = self.port
|
||||
if self.original_port == 0:
|
||||
self.pick_port()
|
||||
self.addr = (self.ip, self.port)
|
||||
self.daemon = True
|
||||
self.pydev_do_not_trace = True
|
||||
self.is_pydev_daemon_thread = True
|
||||
self.name = "Heartbeat"
|
||||
|
||||
def pick_port(self):
|
||||
"""Pick a port for the heartbeat."""
|
||||
if self.transport == "tcp":
|
||||
s = socket.socket()
|
||||
# '*' means all interfaces to 0MQ, which is '' to socket.socket
|
||||
s.bind(("" if self.ip == "*" else self.ip, 0))
|
||||
self.port = s.getsockname()[1]
|
||||
s.close()
|
||||
elif self.transport == "ipc":
|
||||
self.port = 1
|
||||
while Path(f"{self.ip}-{self.port}").exists():
|
||||
self.port = self.port + 1
|
||||
else:
|
||||
raise ValueError("Unrecognized zmq transport: %s" % self.transport)
|
||||
return self.port
|
||||
|
||||
def _try_bind_socket(self):
|
||||
c = ":" if self.transport == "tcp" else "-"
|
||||
return self.socket.bind(f"{self.transport}://{self.ip}" + c + str(self.port))
|
||||
|
||||
def _bind_socket(self):
|
||||
try:
|
||||
win_in_use = errno.WSAEADDRINUSE # type:ignore[attr-defined]
|
||||
except AttributeError:
|
||||
win_in_use = None
|
||||
|
||||
# Try up to 100 times to bind a port when in conflict to avoid
|
||||
# infinite attempts in bad setups
|
||||
max_attempts = 1 if self.original_port else 100
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
self._try_bind_socket()
|
||||
except zmq.ZMQError as ze:
|
||||
if attempt == max_attempts - 1:
|
||||
raise
|
||||
# Raise if we have any error not related to socket binding
|
||||
if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use:
|
||||
raise
|
||||
# Raise if we have any error not related to socket binding
|
||||
if self.original_port == 0:
|
||||
self.pick_port()
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
return
|
||||
|
||||
def run(self):
|
||||
"""Run the heartbeat thread."""
|
||||
self.name = "Heartbeat"
|
||||
self.socket = self.context.socket(zmq.ROUTER)
|
||||
self.socket.linger = 1000
|
||||
try:
|
||||
self._bind_socket()
|
||||
except Exception:
|
||||
self.socket.close()
|
||||
raise
|
||||
|
||||
while True:
|
||||
try:
|
||||
zmq.device(zmq.QUEUE, self.socket, self.socket)
|
||||
except zmq.ZMQError as e:
|
||||
if e.errno == errno.EINTR:
|
||||
# signal interrupt, resume heartbeat
|
||||
continue
|
||||
if e.errno == zmq.ETERM:
|
||||
# context terminated, close socket and exit
|
||||
try:
|
||||
self.socket.close()
|
||||
except zmq.ZMQError:
|
||||
# suppress further errors during cleanup
|
||||
# this shouldn't happen, though
|
||||
pass
|
||||
break
|
||||
if e.errno == zmq.ENOTSOCK:
|
||||
# socket closed elsewhere, exit
|
||||
break
|
||||
raise
|
||||
else:
|
||||
break
|
||||
@@ -0,0 +1,4 @@
|
||||
from .blocking import BlockingInProcessKernelClient
|
||||
from .channels import InProcessChannel, InProcessHBChannel
|
||||
from .client import InProcessKernelClient
|
||||
from .manager import InProcessKernelManager
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,108 @@
|
||||
""" Implements a fully blocking kernel client.
|
||||
|
||||
Useful for test suites and blocking terminal interfaces.
|
||||
"""
|
||||
import sys
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2012 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file LICENSE, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
from queue import Empty, Queue
|
||||
|
||||
# IPython imports
|
||||
from traitlets import Type
|
||||
|
||||
# Local imports
|
||||
from .channels import InProcessChannel
|
||||
from .client import InProcessKernelClient
|
||||
|
||||
|
||||
class BlockingInProcessChannel(InProcessChannel):
|
||||
"""A blocking in-process channel."""
|
||||
|
||||
def __init__(self, *args, **kwds):
|
||||
"""Initialize the channel."""
|
||||
super().__init__(*args, **kwds)
|
||||
self._in_queue: Queue[object] = Queue()
|
||||
|
||||
def call_handlers(self, msg):
|
||||
"""Call the handlers for a message."""
|
||||
self._in_queue.put(msg)
|
||||
|
||||
def get_msg(self, block=True, timeout=None):
|
||||
"""Gets a message if there is one that is ready."""
|
||||
if timeout is None:
|
||||
# Queue.get(timeout=None) has stupid uninteruptible
|
||||
# behavior, so wait for a week instead
|
||||
timeout = 604800
|
||||
return self._in_queue.get(block, timeout)
|
||||
|
||||
def get_msgs(self):
|
||||
"""Get all messages that are currently ready."""
|
||||
msgs = []
|
||||
while True:
|
||||
try:
|
||||
msgs.append(self.get_msg(block=False))
|
||||
except Empty:
|
||||
break
|
||||
return msgs
|
||||
|
||||
def msg_ready(self):
|
||||
"""Is there a message that has been received?"""
|
||||
return not self._in_queue.empty()
|
||||
|
||||
|
||||
class BlockingInProcessStdInChannel(BlockingInProcessChannel):
|
||||
"""A blocking in-process stdin channel."""
|
||||
|
||||
def call_handlers(self, msg):
|
||||
"""Overridden for the in-process channel.
|
||||
|
||||
This methods simply calls raw_input directly.
|
||||
"""
|
||||
msg_type = msg["header"]["msg_type"]
|
||||
if msg_type == "input_request":
|
||||
_raw_input = self.client.kernel._sys_raw_input
|
||||
prompt = msg["content"]["prompt"]
|
||||
print(prompt, end="", file=sys.__stdout__)
|
||||
sys.__stdout__.flush()
|
||||
self.client.input(_raw_input())
|
||||
|
||||
|
||||
class BlockingInProcessKernelClient(InProcessKernelClient):
|
||||
"""A blocking in-process kernel client."""
|
||||
|
||||
# The classes to use for the various channels.
|
||||
shell_channel_class = Type(BlockingInProcessChannel) # type:ignore[arg-type]
|
||||
iopub_channel_class = Type(BlockingInProcessChannel) # type:ignore[arg-type]
|
||||
stdin_channel_class = Type(BlockingInProcessStdInChannel) # type:ignore[arg-type]
|
||||
|
||||
def wait_for_ready(self):
|
||||
"""Wait for kernel info reply on shell channel."""
|
||||
while True:
|
||||
self.kernel_info()
|
||||
try:
|
||||
msg = self.shell_channel.get_msg(block=True, timeout=1)
|
||||
except Empty:
|
||||
pass
|
||||
else:
|
||||
if msg["msg_type"] == "kernel_info_reply":
|
||||
# Checking that IOPub is connected. If it is not connected, start over.
|
||||
try:
|
||||
self.iopub_channel.get_msg(block=True, timeout=0.2)
|
||||
except Empty:
|
||||
pass
|
||||
else:
|
||||
self._handle_kernel_info_reply(msg)
|
||||
break
|
||||
|
||||
# Flush IOPub channel
|
||||
while True:
|
||||
try:
|
||||
msg = self.iopub_channel.get_msg(block=True, timeout=0.2)
|
||||
print(msg["msg_type"])
|
||||
except Empty:
|
||||
break
|
||||
@@ -0,0 +1,109 @@
|
||||
"""A kernel client for in-process kernels."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from typing import List
|
||||
|
||||
from jupyter_client.channelsabc import HBChannelABC
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Channel classes
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InProcessChannel:
|
||||
"""Base class for in-process channels."""
|
||||
|
||||
proxy_methods: List[object] = []
|
||||
|
||||
def __init__(self, client=None):
|
||||
"""Initialize the channel."""
|
||||
super().__init__()
|
||||
self.client = client
|
||||
self._is_alive = False
|
||||
|
||||
def is_alive(self):
|
||||
"""Test if the channel is alive."""
|
||||
return self._is_alive
|
||||
|
||||
def start(self):
|
||||
"""Start the channel."""
|
||||
self._is_alive = True
|
||||
|
||||
def stop(self):
|
||||
"""Stop the channel."""
|
||||
self._is_alive = False
|
||||
|
||||
def call_handlers(self, msg):
|
||||
"""This method is called in the main thread when a message arrives.
|
||||
|
||||
Subclasses should override this method to handle incoming messages.
|
||||
"""
|
||||
msg = "call_handlers must be defined in a subclass."
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def flush(self, timeout=1.0):
|
||||
"""Flush the channel."""
|
||||
|
||||
def call_handlers_later(self, *args, **kwds):
|
||||
"""Call the message handlers later.
|
||||
|
||||
The default implementation just calls the handlers immediately, but this
|
||||
method exists so that GUI toolkits can defer calling the handlers until
|
||||
after the event loop has run, as expected by GUI frontends.
|
||||
"""
|
||||
self.call_handlers(*args, **kwds)
|
||||
|
||||
def process_events(self):
|
||||
"""Process any pending GUI events.
|
||||
|
||||
This method will be never be called from a frontend without an event
|
||||
loop (e.g., a terminal frontend).
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class InProcessHBChannel:
|
||||
"""A dummy heartbeat channel interface for in-process kernels.
|
||||
|
||||
Normally we use the heartbeat to check that the kernel process is alive.
|
||||
When the kernel is in-process, that doesn't make sense, but clients still
|
||||
expect this interface.
|
||||
"""
|
||||
|
||||
time_to_dead = 3.0
|
||||
|
||||
def __init__(self, client=None):
|
||||
"""Initialize the channel."""
|
||||
super().__init__()
|
||||
self.client = client
|
||||
self._is_alive = False
|
||||
self._pause = True
|
||||
|
||||
def is_alive(self):
|
||||
"""Test if the channel is alive."""
|
||||
return self._is_alive
|
||||
|
||||
def start(self):
|
||||
"""Start the channel."""
|
||||
self._is_alive = True
|
||||
|
||||
def stop(self):
|
||||
"""Stop the channel."""
|
||||
self._is_alive = False
|
||||
|
||||
def pause(self):
|
||||
"""Pause the channel."""
|
||||
self._pause = True
|
||||
|
||||
def unpause(self):
|
||||
"""Unpause the channel."""
|
||||
self._pause = False
|
||||
|
||||
def is_beating(self):
|
||||
"""Test if the channel is beating."""
|
||||
return not self._pause
|
||||
|
||||
|
||||
HBChannelABC.register(InProcessHBChannel)
|
||||
223
venv/lib/python3.10/site-packages/ipykernel/inprocess/client.py
Normal file
223
venv/lib/python3.10/site-packages/ipykernel/inprocess/client.py
Normal file
@@ -0,0 +1,223 @@
|
||||
"""A client for in-process kernels."""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2012 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file LICENSE, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
import asyncio
|
||||
|
||||
from jupyter_client.client import KernelClient
|
||||
from jupyter_client.clientabc import KernelClientABC
|
||||
from jupyter_core.utils import run_sync
|
||||
|
||||
# IPython imports
|
||||
from traitlets import Instance, Type, default
|
||||
|
||||
# Local imports
|
||||
from .channels import InProcessChannel, InProcessHBChannel
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Main kernel Client class
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InProcessKernelClient(KernelClient):
|
||||
"""A client for an in-process kernel.
|
||||
|
||||
This class implements the interface of
|
||||
`jupyter_client.clientabc.KernelClientABC` and allows
|
||||
(asynchronous) frontends to be used seamlessly with an in-process kernel.
|
||||
|
||||
See `jupyter_client.client.KernelClient` for docstrings.
|
||||
"""
|
||||
|
||||
# The classes to use for the various channels.
|
||||
shell_channel_class = Type(InProcessChannel) # type:ignore[arg-type]
|
||||
iopub_channel_class = Type(InProcessChannel) # type:ignore[arg-type]
|
||||
stdin_channel_class = Type(InProcessChannel) # type:ignore[arg-type]
|
||||
control_channel_class = Type(InProcessChannel) # type:ignore[arg-type]
|
||||
hb_channel_class = Type(InProcessHBChannel) # type:ignore[arg-type]
|
||||
|
||||
kernel = Instance("ipykernel.inprocess.ipkernel.InProcessKernel", allow_none=True)
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Channel management methods
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
@default("blocking_class")
|
||||
def _default_blocking_class(self):
|
||||
from .blocking import BlockingInProcessKernelClient
|
||||
|
||||
return BlockingInProcessKernelClient
|
||||
|
||||
def get_connection_info(self):
|
||||
"""Get the connection info for the client."""
|
||||
d = super().get_connection_info()
|
||||
d["kernel"] = self.kernel # type:ignore[assignment]
|
||||
return d
|
||||
|
||||
def start_channels(self, *args, **kwargs):
|
||||
"""Start the channels on the client."""
|
||||
super().start_channels()
|
||||
if self.kernel:
|
||||
self.kernel.frontends.append(self)
|
||||
|
||||
@property
|
||||
def shell_channel(self):
|
||||
if self._shell_channel is None:
|
||||
self._shell_channel = self.shell_channel_class(self) # type:ignore[abstract,call-arg]
|
||||
return self._shell_channel
|
||||
|
||||
@property
|
||||
def iopub_channel(self):
|
||||
if self._iopub_channel is None:
|
||||
self._iopub_channel = self.iopub_channel_class(self) # type:ignore[abstract,call-arg]
|
||||
return self._iopub_channel
|
||||
|
||||
@property
|
||||
def stdin_channel(self):
|
||||
if self._stdin_channel is None:
|
||||
self._stdin_channel = self.stdin_channel_class(self) # type:ignore[abstract,call-arg]
|
||||
return self._stdin_channel
|
||||
|
||||
@property
|
||||
def control_channel(self):
|
||||
if self._control_channel is None:
|
||||
self._control_channel = self.control_channel_class(self) # type:ignore[abstract,call-arg]
|
||||
return self._control_channel
|
||||
|
||||
@property
|
||||
def hb_channel(self):
|
||||
if self._hb_channel is None:
|
||||
self._hb_channel = self.hb_channel_class(self) # type:ignore[abstract,call-arg]
|
||||
return self._hb_channel
|
||||
|
||||
# Methods for sending specific messages
|
||||
# -------------------------------------
|
||||
|
||||
def execute(
|
||||
self, code, silent=False, store_history=True, user_expressions=None, allow_stdin=None
|
||||
):
|
||||
"""Execute code on the client."""
|
||||
if allow_stdin is None:
|
||||
allow_stdin = self.allow_stdin
|
||||
content = dict(
|
||||
code=code,
|
||||
silent=silent,
|
||||
store_history=store_history,
|
||||
user_expressions=user_expressions or {},
|
||||
allow_stdin=allow_stdin,
|
||||
)
|
||||
msg = self.session.msg("execute_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def complete(self, code, cursor_pos=None):
|
||||
"""Get code completion."""
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
content = dict(code=code, cursor_pos=cursor_pos)
|
||||
msg = self.session.msg("complete_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def inspect(self, code, cursor_pos=None, detail_level=0):
|
||||
"""Get code inspection."""
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
content = dict(
|
||||
code=code,
|
||||
cursor_pos=cursor_pos,
|
||||
detail_level=detail_level,
|
||||
)
|
||||
msg = self.session.msg("inspect_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def history(self, raw=True, output=False, hist_access_type="range", **kwds):
|
||||
"""Get code history."""
|
||||
content = dict(raw=raw, output=output, hist_access_type=hist_access_type, **kwds)
|
||||
msg = self.session.msg("history_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def shutdown(self, restart=False):
|
||||
"""Handle shutdown."""
|
||||
# FIXME: What to do here?
|
||||
msg = "Cannot shutdown in-process kernel"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def kernel_info(self):
|
||||
"""Request kernel info."""
|
||||
msg = self.session.msg("kernel_info_request")
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def comm_info(self, target_name=None):
|
||||
"""Request a dictionary of valid comms and their targets."""
|
||||
content = {} if target_name is None else dict(target_name=target_name)
|
||||
msg = self.session.msg("comm_info_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def input(self, string):
|
||||
"""Handle kernel input."""
|
||||
if self.kernel is None:
|
||||
msg = "Cannot send input reply. No kernel exists."
|
||||
raise RuntimeError(msg)
|
||||
self.kernel.raw_input_str = string
|
||||
|
||||
def is_complete(self, code):
|
||||
"""Handle an is_complete request."""
|
||||
msg = self.session.msg("is_complete_request", {"code": code})
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def _dispatch_to_kernel(self, msg):
|
||||
"""Send a message to the kernel and handle a reply."""
|
||||
kernel = self.kernel
|
||||
if kernel is None:
|
||||
msg = "Cannot send request. No kernel exists."
|
||||
raise RuntimeError(msg)
|
||||
|
||||
stream = kernel.shell_stream
|
||||
self.session.send(stream, msg)
|
||||
msg_parts = stream.recv_multipart()
|
||||
if run_sync is not None:
|
||||
dispatch_shell = run_sync(kernel.dispatch_shell)
|
||||
dispatch_shell(msg_parts)
|
||||
else:
|
||||
loop = asyncio.get_event_loop() # type:ignore[unreachable]
|
||||
loop.run_until_complete(kernel.dispatch_shell(msg_parts))
|
||||
idents, reply_msg = self.session.recv(stream, copy=False)
|
||||
self.shell_channel.call_handlers_later(reply_msg)
|
||||
|
||||
def get_shell_msg(self, block=True, timeout=None):
|
||||
"""Get a shell message."""
|
||||
return self.shell_channel.get_msg(block, timeout)
|
||||
|
||||
def get_iopub_msg(self, block=True, timeout=None):
|
||||
"""Get an iopub message."""
|
||||
return self.iopub_channel.get_msg(block, timeout)
|
||||
|
||||
def get_stdin_msg(self, block=True, timeout=None):
|
||||
"""Get a stdin message."""
|
||||
return self.stdin_channel.get_msg(block, timeout)
|
||||
|
||||
def get_control_msg(self, block=True, timeout=None):
|
||||
"""Get a control message."""
|
||||
return self.control_channel.get_msg(block, timeout)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# ABC Registration
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
KernelClientABC.register(InProcessKernelClient)
|
||||
@@ -0,0 +1,8 @@
|
||||
"""Shared constants.
|
||||
"""
|
||||
|
||||
# Because inprocess communication is not networked, we can use a common Session
|
||||
# key everywhere. This is not just the empty bytestring to avoid tripping
|
||||
# certain security checks in the rest of Jupyter that assumes that empty keys
|
||||
# are insecure.
|
||||
INPROCESS_KEY = b"inprocess"
|
||||
@@ -0,0 +1,203 @@
|
||||
"""An in-process kernel"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
|
||||
from IPython.core.interactiveshell import InteractiveShellABC
|
||||
from traitlets import Any, Enum, Instance, List, Type, default
|
||||
|
||||
from ipykernel.ipkernel import IPythonKernel
|
||||
from ipykernel.jsonutil import json_clean
|
||||
from ipykernel.zmqshell import ZMQInteractiveShell
|
||||
|
||||
from ..iostream import BackgroundSocket, IOPubThread, OutStream
|
||||
from .constants import INPROCESS_KEY
|
||||
from .socket import DummySocket
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Main kernel class
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InProcessKernel(IPythonKernel):
|
||||
"""An in-process kernel."""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# InProcessKernel interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
# The frontends connected to this kernel.
|
||||
frontends = List(Instance("ipykernel.inprocess.client.InProcessKernelClient", allow_none=True))
|
||||
|
||||
# The GUI environment that the kernel is running under. This need not be
|
||||
# specified for the normal operation for the kernel, but is required for
|
||||
# IPython's GUI support (including pylab). The default is 'inline' because
|
||||
# it is safe under all GUI toolkits.
|
||||
gui = Enum(("tk", "gtk", "wx", "qt", "qt4", "inline"), default_value="inline")
|
||||
|
||||
raw_input_str = Any()
|
||||
stdout = Any()
|
||||
stderr = Any()
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Kernel interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
shell_class = Type(allow_none=True) # type:ignore[assignment]
|
||||
_underlying_iopub_socket = Instance(DummySocket, ())
|
||||
iopub_thread: IOPubThread = Instance(IOPubThread) # type:ignore[assignment]
|
||||
|
||||
shell_stream = Instance(DummySocket, ()) # type:ignore[arg-type]
|
||||
|
||||
@default("iopub_thread")
|
||||
def _default_iopub_thread(self):
|
||||
thread = IOPubThread(self._underlying_iopub_socket)
|
||||
thread.start()
|
||||
return thread
|
||||
|
||||
iopub_socket: BackgroundSocket = Instance(BackgroundSocket) # type:ignore[assignment]
|
||||
|
||||
@default("iopub_socket")
|
||||
def _default_iopub_socket(self):
|
||||
return self.iopub_thread.background_socket
|
||||
|
||||
stdin_socket = Instance(DummySocket, ()) # type:ignore[assignment]
|
||||
|
||||
def __init__(self, **traits):
|
||||
"""Initialize the kernel."""
|
||||
super().__init__(**traits)
|
||||
|
||||
self._underlying_iopub_socket.observe(self._io_dispatch, names=["message_sent"])
|
||||
if self.shell:
|
||||
self.shell.kernel = self
|
||||
|
||||
async def execute_request(self, stream, ident, parent):
|
||||
"""Override for temporary IO redirection."""
|
||||
with self._redirected_io():
|
||||
await super().execute_request(stream, ident, parent)
|
||||
|
||||
def start(self):
|
||||
"""Override registration of dispatchers for streams."""
|
||||
if self.shell:
|
||||
self.shell.exit_now = False
|
||||
|
||||
def _abort_queues(self):
|
||||
"""The in-process kernel doesn't abort requests."""
|
||||
|
||||
async def _flush_control_queue(self):
|
||||
"""No need to flush control queues for in-process"""
|
||||
|
||||
def _input_request(self, prompt, ident, parent, password=False):
|
||||
# Flush output before making the request.
|
||||
self.raw_input_str = None
|
||||
sys.stderr.flush()
|
||||
sys.stdout.flush()
|
||||
|
||||
# Send the input request.
|
||||
content = json_clean(dict(prompt=prompt, password=password))
|
||||
assert self.session is not None
|
||||
msg = self.session.msg("input_request", content, parent)
|
||||
for frontend in self.frontends:
|
||||
assert frontend is not None
|
||||
if frontend.session.session == parent["header"]["session"]:
|
||||
frontend.stdin_channel.call_handlers(msg)
|
||||
break
|
||||
else:
|
||||
logging.error("No frontend found for raw_input request")
|
||||
return ""
|
||||
|
||||
# Await a response.
|
||||
while self.raw_input_str is None:
|
||||
frontend.stdin_channel.process_events()
|
||||
return self.raw_input_str # type:ignore[unreachable]
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Protected interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
@contextmanager
|
||||
def _redirected_io(self):
|
||||
"""Temporarily redirect IO to the kernel."""
|
||||
sys_stdout, sys_stderr = sys.stdout, sys.stderr
|
||||
try:
|
||||
sys.stdout, sys.stderr = self.stdout, self.stderr
|
||||
yield
|
||||
finally:
|
||||
sys.stdout, sys.stderr = sys_stdout, sys_stderr
|
||||
|
||||
# ------ Trait change handlers --------------------------------------------
|
||||
|
||||
def _io_dispatch(self, change):
|
||||
"""Called when a message is sent to the IO socket."""
|
||||
assert self.iopub_socket.io_thread is not None
|
||||
assert self.session is not None
|
||||
ident, msg = self.session.recv(self.iopub_socket.io_thread.socket, copy=False)
|
||||
for frontend in self.frontends:
|
||||
assert frontend is not None
|
||||
frontend.iopub_channel.call_handlers(msg)
|
||||
|
||||
# ------ Trait initializers -----------------------------------------------
|
||||
|
||||
@default("log")
|
||||
def _default_log(self):
|
||||
return logging.getLogger(__name__)
|
||||
|
||||
@default("session")
|
||||
def _default_session(self):
|
||||
from jupyter_client.session import Session
|
||||
|
||||
return Session(parent=self, key=INPROCESS_KEY)
|
||||
|
||||
@default("shell_class")
|
||||
def _default_shell_class(self):
|
||||
return InProcessInteractiveShell
|
||||
|
||||
@default("stdout")
|
||||
def _default_stdout(self):
|
||||
return OutStream(self.session, self.iopub_thread, "stdout", watchfd=False)
|
||||
|
||||
@default("stderr")
|
||||
def _default_stderr(self):
|
||||
return OutStream(self.session, self.iopub_thread, "stderr", watchfd=False)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Interactive shell subclass
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InProcessInteractiveShell(ZMQInteractiveShell):
|
||||
"""An in-process interactive shell."""
|
||||
|
||||
kernel: InProcessKernel = Instance(
|
||||
"ipykernel.inprocess.ipkernel.InProcessKernel", allow_none=True
|
||||
) # type:ignore[assignment]
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# InteractiveShell interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def enable_gui(self, gui=None):
|
||||
"""Enable GUI integration for the kernel."""
|
||||
if not gui:
|
||||
gui = self.kernel.gui
|
||||
self.active_eventloop = gui
|
||||
|
||||
def enable_matplotlib(self, gui=None):
|
||||
"""Enable matplotlib integration for the kernel."""
|
||||
if not gui:
|
||||
gui = self.kernel.gui
|
||||
return super().enable_matplotlib(gui)
|
||||
|
||||
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
|
||||
"""Activate pylab support at runtime."""
|
||||
if not gui:
|
||||
gui = self.kernel.gui
|
||||
return super().enable_pylab(gui, import_all, welcome_message)
|
||||
|
||||
|
||||
InteractiveShellABC.register(InProcessInteractiveShell)
|
||||
@@ -0,0 +1,92 @@
|
||||
"""A kernel manager for in-process kernels."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from jupyter_client.manager import KernelManager
|
||||
from jupyter_client.managerabc import KernelManagerABC
|
||||
from jupyter_client.session import Session
|
||||
from traitlets import DottedObjectName, Instance, default
|
||||
|
||||
from .constants import INPROCESS_KEY
|
||||
|
||||
|
||||
class InProcessKernelManager(KernelManager):
|
||||
"""A manager for an in-process kernel.
|
||||
|
||||
This class implements the interface of
|
||||
`jupyter_client.kernelmanagerabc.KernelManagerABC` and allows
|
||||
(asynchronous) frontends to be used seamlessly with an in-process kernel.
|
||||
|
||||
See `jupyter_client.kernelmanager.KernelManager` for docstrings.
|
||||
"""
|
||||
|
||||
# The kernel process with which the KernelManager is communicating.
|
||||
kernel = Instance("ipykernel.inprocess.ipkernel.InProcessKernel", allow_none=True)
|
||||
# the client class for KM.client() shortcut
|
||||
client_class = DottedObjectName("ipykernel.inprocess.BlockingInProcessKernelClient")
|
||||
|
||||
@default("blocking_class")
|
||||
def _default_blocking_class(self):
|
||||
from .blocking import BlockingInProcessKernelClient
|
||||
|
||||
return BlockingInProcessKernelClient
|
||||
|
||||
@default("session")
|
||||
def _default_session(self):
|
||||
# don't sign in-process messages
|
||||
return Session(key=INPROCESS_KEY, parent=self)
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Kernel management methods
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
def start_kernel(self, **kwds):
|
||||
"""Start the kernel."""
|
||||
from ipykernel.inprocess.ipkernel import InProcessKernel
|
||||
|
||||
self.kernel = InProcessKernel(parent=self, session=self.session)
|
||||
|
||||
def shutdown_kernel(self):
|
||||
"""Shutdown the kernel."""
|
||||
if self.kernel:
|
||||
self.kernel.iopub_thread.stop()
|
||||
self._kill_kernel()
|
||||
|
||||
def restart_kernel(self, now=False, **kwds):
|
||||
"""Restart the kernel."""
|
||||
self.shutdown_kernel()
|
||||
self.start_kernel(**kwds)
|
||||
|
||||
@property
|
||||
def has_kernel(self):
|
||||
return self.kernel is not None
|
||||
|
||||
def _kill_kernel(self):
|
||||
self.kernel = None
|
||||
|
||||
def interrupt_kernel(self):
|
||||
"""Interrupt the kernel."""
|
||||
msg = "Cannot interrupt in-process kernel."
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def signal_kernel(self, signum):
|
||||
"""Send a signal to the kernel."""
|
||||
msg = "Cannot signal in-process kernel."
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def is_alive(self):
|
||||
"""Test if the kernel is alive."""
|
||||
return self.kernel is not None
|
||||
|
||||
def client(self, **kwargs):
|
||||
"""Get a client for the kernel."""
|
||||
kwargs["kernel"] = self.kernel
|
||||
return super().client(**kwargs)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# ABC Registration
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
KernelManagerABC.register(InProcessKernelManager)
|
||||
@@ -0,0 +1,41 @@
|
||||
""" Defines a dummy socket implementing (part of) the zmq.Socket interface. """
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from queue import Queue
|
||||
|
||||
import zmq
|
||||
from traitlets import HasTraits, Instance, Int
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Dummy socket class
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class DummySocket(HasTraits):
|
||||
"""A dummy socket implementing (part of) the zmq.Socket interface."""
|
||||
|
||||
queue = Instance(Queue, ())
|
||||
message_sent = Int(0) # Should be an Event
|
||||
context = Instance(zmq.Context)
|
||||
|
||||
def _context_default(self):
|
||||
return zmq.Context()
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Socket interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def recv_multipart(self, flags=0, copy=True, track=False):
|
||||
"""Recv a multipart message."""
|
||||
return self.queue.get_nowait()
|
||||
|
||||
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
|
||||
"""Send a multipart message."""
|
||||
msg_parts = list(map(zmq.Message, msg_parts))
|
||||
self.queue.put_nowait(msg_parts)
|
||||
self.message_sent += 1
|
||||
|
||||
def flush(self, timeout=1.0):
|
||||
"""no-op to comply with stream API"""
|
||||
769
venv/lib/python3.10/site-packages/ipykernel/iostream.py
Normal file
769
venv/lib/python3.10/site-packages/ipykernel/iostream.py
Normal file
@@ -0,0 +1,769 @@
|
||||
"""Wrappers for forwarding stdout/stderr over zmq"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import asyncio
|
||||
import atexit
|
||||
import contextvars
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
import warnings
|
||||
from binascii import b2a_hex
|
||||
from collections import defaultdict, deque
|
||||
from io import StringIO, TextIOBase
|
||||
from threading import local
|
||||
from typing import Any, Callable, Deque, Dict, Optional
|
||||
|
||||
import zmq
|
||||
from jupyter_client.session import extract_header
|
||||
from tornado.ioloop import IOLoop
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Globals
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
MASTER = 0
|
||||
CHILD = 1
|
||||
|
||||
PIPE_BUFFER_SIZE = 1000
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# IO classes
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class IOPubThread:
|
||||
"""An object for sending IOPub messages in a background thread
|
||||
|
||||
Prevents a blocking main thread from delaying output from threads.
|
||||
|
||||
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
|
||||
whose IO is always run in a thread.
|
||||
"""
|
||||
|
||||
def __init__(self, socket, pipe=False):
|
||||
"""Create IOPub thread
|
||||
|
||||
Parameters
|
||||
----------
|
||||
socket : zmq.PUB Socket
|
||||
the socket on which messages will be sent.
|
||||
pipe : bool
|
||||
Whether this process should listen for IOPub messages
|
||||
piped from subprocesses.
|
||||
"""
|
||||
self.socket = socket
|
||||
self._stopped = False
|
||||
self.background_socket = BackgroundSocket(self)
|
||||
self._master_pid = os.getpid()
|
||||
self._pipe_flag = pipe
|
||||
self.io_loop = IOLoop(make_current=False)
|
||||
if pipe:
|
||||
self._setup_pipe_in()
|
||||
self._local = threading.local()
|
||||
self._events: Deque[Callable[..., Any]] = deque()
|
||||
self._event_pipes: Dict[threading.Thread, Any] = {}
|
||||
self._event_pipe_gc_lock: threading.Lock = threading.Lock()
|
||||
self._event_pipe_gc_seconds: float = 10
|
||||
self._event_pipe_gc_task: Optional[asyncio.Task[Any]] = None
|
||||
self._setup_event_pipe()
|
||||
self.thread = threading.Thread(target=self._thread_main, name="IOPub")
|
||||
self.thread.daemon = True
|
||||
self.thread.pydev_do_not_trace = True # type:ignore[attr-defined]
|
||||
self.thread.is_pydev_daemon_thread = True # type:ignore[attr-defined]
|
||||
self.thread.name = "IOPub"
|
||||
|
||||
def _thread_main(self):
|
||||
"""The inner loop that's actually run in a thread"""
|
||||
|
||||
def _start_event_gc():
|
||||
self._event_pipe_gc_task = asyncio.ensure_future(self._run_event_pipe_gc())
|
||||
|
||||
self.io_loop.run_sync(_start_event_gc)
|
||||
|
||||
if not self._stopped:
|
||||
# avoid race if stop called before start thread gets here
|
||||
# probably only comes up in tests
|
||||
self.io_loop.start()
|
||||
|
||||
if self._event_pipe_gc_task is not None:
|
||||
# cancel gc task to avoid pending task warnings
|
||||
async def _cancel():
|
||||
self._event_pipe_gc_task.cancel() # type:ignore[union-attr]
|
||||
|
||||
if not self._stopped:
|
||||
self.io_loop.run_sync(_cancel)
|
||||
else:
|
||||
self._event_pipe_gc_task.cancel()
|
||||
|
||||
self.io_loop.close(all_fds=True)
|
||||
|
||||
def _setup_event_pipe(self):
|
||||
"""Create the PULL socket listening for events that should fire in this thread."""
|
||||
ctx = self.socket.context
|
||||
pipe_in = ctx.socket(zmq.PULL)
|
||||
pipe_in.linger = 0
|
||||
|
||||
_uuid = b2a_hex(os.urandom(16)).decode("ascii")
|
||||
iface = self._event_interface = "inproc://%s" % _uuid
|
||||
pipe_in.bind(iface)
|
||||
self._event_puller = ZMQStream(pipe_in, self.io_loop)
|
||||
self._event_puller.on_recv(self._handle_event)
|
||||
|
||||
async def _run_event_pipe_gc(self):
|
||||
"""Task to run event pipe gc continuously"""
|
||||
while True:
|
||||
await asyncio.sleep(self._event_pipe_gc_seconds)
|
||||
try:
|
||||
await self._event_pipe_gc()
|
||||
except Exception as e:
|
||||
print(f"Exception in IOPubThread._event_pipe_gc: {e}", file=sys.__stderr__)
|
||||
|
||||
async def _event_pipe_gc(self):
|
||||
"""run a single garbage collection on event pipes"""
|
||||
if not self._event_pipes:
|
||||
# don't acquire the lock if there's nothing to do
|
||||
return
|
||||
with self._event_pipe_gc_lock:
|
||||
for thread, socket in list(self._event_pipes.items()):
|
||||
if not thread.is_alive():
|
||||
socket.close()
|
||||
del self._event_pipes[thread]
|
||||
|
||||
@property
|
||||
def _event_pipe(self):
|
||||
"""thread-local event pipe for signaling events that should be processed in the thread"""
|
||||
try:
|
||||
event_pipe = self._local.event_pipe
|
||||
except AttributeError:
|
||||
# new thread, new event pipe
|
||||
ctx = self.socket.context
|
||||
event_pipe = ctx.socket(zmq.PUSH)
|
||||
event_pipe.linger = 0
|
||||
event_pipe.connect(self._event_interface)
|
||||
self._local.event_pipe = event_pipe
|
||||
# associate event pipes to their threads
|
||||
# so they can be closed explicitly
|
||||
# implicit close on __del__ throws a ResourceWarning
|
||||
with self._event_pipe_gc_lock:
|
||||
self._event_pipes[threading.current_thread()] = event_pipe
|
||||
return event_pipe
|
||||
|
||||
def _handle_event(self, msg):
|
||||
"""Handle an event on the event pipe
|
||||
|
||||
Content of the message is ignored.
|
||||
|
||||
Whenever *an* event arrives on the event stream,
|
||||
*all* waiting events are processed in order.
|
||||
"""
|
||||
# freeze event count so new writes don't extend the queue
|
||||
# while we are processing
|
||||
n_events = len(self._events)
|
||||
for _ in range(n_events):
|
||||
event_f = self._events.popleft()
|
||||
event_f()
|
||||
|
||||
def _setup_pipe_in(self):
|
||||
"""setup listening pipe for IOPub from forked subprocesses"""
|
||||
ctx = self.socket.context
|
||||
|
||||
# use UUID to authenticate pipe messages
|
||||
self._pipe_uuid = os.urandom(16)
|
||||
|
||||
pipe_in = ctx.socket(zmq.PULL)
|
||||
pipe_in.linger = 0
|
||||
|
||||
try:
|
||||
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
|
||||
except zmq.ZMQError as e:
|
||||
warnings.warn(
|
||||
"Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e
|
||||
+ "\nsubprocess output will be unavailable.",
|
||||
stacklevel=2,
|
||||
)
|
||||
self._pipe_flag = False
|
||||
pipe_in.close()
|
||||
return
|
||||
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
|
||||
self._pipe_in.on_recv(self._handle_pipe_msg)
|
||||
|
||||
def _handle_pipe_msg(self, msg):
|
||||
"""handle a pipe message from a subprocess"""
|
||||
if not self._pipe_flag or not self._is_master_process():
|
||||
return
|
||||
if msg[0] != self._pipe_uuid:
|
||||
print("Bad pipe message: %s", msg, file=sys.__stderr__)
|
||||
return
|
||||
self.send_multipart(msg[1:])
|
||||
|
||||
def _setup_pipe_out(self):
|
||||
# must be new context after fork
|
||||
ctx = zmq.Context()
|
||||
pipe_out = ctx.socket(zmq.PUSH)
|
||||
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
|
||||
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
|
||||
return ctx, pipe_out
|
||||
|
||||
def _is_master_process(self):
|
||||
return os.getpid() == self._master_pid
|
||||
|
||||
def _check_mp_mode(self):
|
||||
"""check for forks, and switch to zmq pipeline if necessary"""
|
||||
if not self._pipe_flag or self._is_master_process():
|
||||
return MASTER
|
||||
return CHILD
|
||||
|
||||
def start(self):
|
||||
"""Start the IOPub thread"""
|
||||
self.thread.name = "IOPub"
|
||||
self.thread.start()
|
||||
# make sure we don't prevent process exit
|
||||
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
|
||||
atexit.register(self.stop)
|
||||
|
||||
def stop(self):
|
||||
"""Stop the IOPub thread"""
|
||||
self._stopped = True
|
||||
if not self.thread.is_alive():
|
||||
return
|
||||
self.io_loop.add_callback(self.io_loop.stop)
|
||||
|
||||
self.thread.join(timeout=30)
|
||||
if self.thread.is_alive():
|
||||
# avoid infinite hang if stop fails
|
||||
msg = "IOPub thread did not terminate in 30 seconds"
|
||||
raise TimeoutError(msg)
|
||||
# close *all* event pipes, created in any thread
|
||||
# event pipes can only be used from other threads while self.thread.is_alive()
|
||||
# so after thread.join, this should be safe
|
||||
for _thread, event_pipe in self._event_pipes.items():
|
||||
event_pipe.close()
|
||||
|
||||
def close(self):
|
||||
"""Close the IOPub thread."""
|
||||
if self.closed:
|
||||
return
|
||||
self.socket.close()
|
||||
self.socket = None
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self.socket is None
|
||||
|
||||
def schedule(self, f):
|
||||
"""Schedule a function to be called in our IO thread.
|
||||
|
||||
If the thread is not running, call immediately.
|
||||
"""
|
||||
if self.thread.is_alive():
|
||||
self._events.append(f)
|
||||
# wake event thread (message content is ignored)
|
||||
self._event_pipe.send(b"")
|
||||
else:
|
||||
f()
|
||||
|
||||
def send_multipart(self, *args, **kwargs):
|
||||
"""send_multipart schedules actual zmq send in my thread.
|
||||
|
||||
If my thread isn't running (e.g. forked process), send immediately.
|
||||
"""
|
||||
self.schedule(lambda: self._really_send(*args, **kwargs))
|
||||
|
||||
def _really_send(self, msg, *args, **kwargs):
|
||||
"""The callback that actually sends messages"""
|
||||
if self.closed:
|
||||
return
|
||||
|
||||
mp_mode = self._check_mp_mode()
|
||||
|
||||
if mp_mode != CHILD:
|
||||
# we are master, do a regular send
|
||||
self.socket.send_multipart(msg, *args, **kwargs)
|
||||
else:
|
||||
# we are a child, pipe to master
|
||||
# new context/socket for every pipe-out
|
||||
# since forks don't teardown politely, use ctx.term to ensure send has completed
|
||||
ctx, pipe_out = self._setup_pipe_out()
|
||||
pipe_out.send_multipart([self._pipe_uuid, *msg], *args, **kwargs)
|
||||
pipe_out.close()
|
||||
ctx.term()
|
||||
|
||||
|
||||
class BackgroundSocket:
|
||||
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
|
||||
|
||||
io_thread = None
|
||||
|
||||
def __init__(self, io_thread):
|
||||
"""Initialize the socket."""
|
||||
self.io_thread = io_thread
|
||||
|
||||
def __getattr__(self, attr):
|
||||
"""Wrap socket attr access for backward-compatibility"""
|
||||
if attr.startswith("__") and attr.endswith("__"):
|
||||
# don't wrap magic methods
|
||||
super().__getattr__(attr) # type:ignore[misc]
|
||||
assert self.io_thread is not None
|
||||
if hasattr(self.io_thread.socket, attr):
|
||||
warnings.warn(
|
||||
f"Accessing zmq Socket attribute {attr} on BackgroundSocket"
|
||||
f" is deprecated since ipykernel 4.3.0"
|
||||
f" use .io_thread.socket.{attr}",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return getattr(self.io_thread.socket, attr)
|
||||
return super().__getattr__(attr) # type:ignore[misc]
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
"""Set an attribute on the socket."""
|
||||
if attr == "io_thread" or (attr.startswith("__") and attr.endswith("__")):
|
||||
super().__setattr__(attr, value)
|
||||
else:
|
||||
warnings.warn(
|
||||
f"Setting zmq Socket attribute {attr} on BackgroundSocket"
|
||||
f" is deprecated since ipykernel 4.3.0"
|
||||
f" use .io_thread.socket.{attr}",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
assert self.io_thread is not None
|
||||
setattr(self.io_thread.socket, attr, value)
|
||||
|
||||
def send(self, msg, *args, **kwargs):
|
||||
"""Send a message to the socket."""
|
||||
return self.send_multipart([msg], *args, **kwargs)
|
||||
|
||||
def send_multipart(self, *args, **kwargs):
|
||||
"""Schedule send in IO thread"""
|
||||
assert self.io_thread is not None
|
||||
return self.io_thread.send_multipart(*args, **kwargs)
|
||||
|
||||
|
||||
class OutStream(TextIOBase):
|
||||
"""A file like object that publishes the stream to a 0MQ PUB socket.
|
||||
|
||||
Output is handed off to an IO Thread
|
||||
"""
|
||||
|
||||
# timeout for flush to avoid infinite hang
|
||||
# in case of misbehavior
|
||||
flush_timeout = 10
|
||||
# The time interval between automatic flushes, in seconds.
|
||||
flush_interval = 0.2
|
||||
topic = None
|
||||
encoding = "UTF-8"
|
||||
_exc: Optional[Any] = None
|
||||
|
||||
def fileno(self):
|
||||
"""
|
||||
Things like subprocess will peak and write to the fileno() of stderr/stdout.
|
||||
"""
|
||||
if getattr(self, "_original_stdstream_copy", None) is not None:
|
||||
return self._original_stdstream_copy
|
||||
msg = "fileno"
|
||||
raise io.UnsupportedOperation(msg)
|
||||
|
||||
def _watch_pipe_fd(self):
|
||||
"""
|
||||
We've redirected standards streams 0 and 1 into a pipe.
|
||||
|
||||
We need to watch in a thread and redirect them to the right places.
|
||||
|
||||
1) the ZMQ channels to show in notebook interfaces,
|
||||
2) the original stdout/err, to capture errors in terminals.
|
||||
|
||||
We cannot schedule this on the ioloop thread, as this might be blocking.
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
bts = os.read(self._fid, PIPE_BUFFER_SIZE)
|
||||
while bts and self._should_watch:
|
||||
self.write(bts.decode(errors="replace"))
|
||||
os.write(self._original_stdstream_copy, bts)
|
||||
bts = os.read(self._fid, PIPE_BUFFER_SIZE)
|
||||
except Exception:
|
||||
self._exc = sys.exc_info()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
session,
|
||||
pub_thread,
|
||||
name,
|
||||
pipe=None,
|
||||
echo=None,
|
||||
*,
|
||||
watchfd=True,
|
||||
isatty=False,
|
||||
):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
session : object
|
||||
the session object
|
||||
pub_thread : threading.Thread
|
||||
the publication thread
|
||||
name : str {'stderr', 'stdout'}
|
||||
the name of the standard stream to replace
|
||||
pipe : object
|
||||
the pipe object
|
||||
echo : bool
|
||||
whether to echo output
|
||||
watchfd : bool (default, True)
|
||||
Watch the file descriptor corresponding to the replaced stream.
|
||||
This is useful if you know some underlying code will write directly
|
||||
the file descriptor by its number. It will spawn a watching thread,
|
||||
that will swap the give file descriptor for a pipe, read from the
|
||||
pipe, and insert this into the current Stream.
|
||||
isatty : bool (default, False)
|
||||
Indication of whether this stream has terminal capabilities (e.g. can handle colors)
|
||||
|
||||
"""
|
||||
if pipe is not None:
|
||||
warnings.warn(
|
||||
"pipe argument to OutStream is deprecated and ignored since ipykernel 4.2.3.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
# This is necessary for compatibility with Python built-in streams
|
||||
self.session = session
|
||||
if not isinstance(pub_thread, IOPubThread):
|
||||
# Backward-compat: given socket, not thread. Wrap in a thread.
|
||||
warnings.warn(
|
||||
"Since IPykernel 4.3, OutStream should be created with "
|
||||
"IOPubThread, not %r" % pub_thread,
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
pub_thread = IOPubThread(pub_thread)
|
||||
pub_thread.start()
|
||||
self.pub_thread = pub_thread
|
||||
self.name = name
|
||||
self.topic = b"stream." + name.encode()
|
||||
self._parent_header: contextvars.ContextVar[Dict[str, Any]] = contextvars.ContextVar(
|
||||
"parent_header"
|
||||
)
|
||||
self._parent_header.set({})
|
||||
self._thread_to_parent = {}
|
||||
self._thread_to_parent_header = {}
|
||||
self._parent_header_global = {}
|
||||
self._master_pid = os.getpid()
|
||||
self._flush_pending = False
|
||||
self._subprocess_flush_pending = False
|
||||
self._io_loop = pub_thread.io_loop
|
||||
self._buffer_lock = threading.RLock()
|
||||
self._buffers = defaultdict(StringIO)
|
||||
self.echo = None
|
||||
self._isatty = bool(isatty)
|
||||
self._should_watch = False
|
||||
self._local = local()
|
||||
|
||||
if (
|
||||
watchfd
|
||||
and (
|
||||
(sys.platform.startswith("linux") or sys.platform.startswith("darwin"))
|
||||
# Pytest set its own capture. Don't redirect from within pytest.
|
||||
and ("PYTEST_CURRENT_TEST" not in os.environ)
|
||||
)
|
||||
# allow forcing watchfd (mainly for tests)
|
||||
or watchfd == "force"
|
||||
):
|
||||
self._should_watch = True
|
||||
self._setup_stream_redirects(name)
|
||||
|
||||
if echo:
|
||||
if hasattr(echo, "read") and hasattr(echo, "write"):
|
||||
# make sure we aren't trying to echo on the FD we're watching!
|
||||
# that would cause an infinite loop, always echoing on itself
|
||||
if self._should_watch:
|
||||
try:
|
||||
echo_fd = echo.fileno()
|
||||
except Exception:
|
||||
echo_fd = None
|
||||
|
||||
if echo_fd is not None and echo_fd == self._original_stdstream_fd:
|
||||
# echo on the _copy_ we made during
|
||||
# this is the actual terminal FD now
|
||||
echo = io.TextIOWrapper(
|
||||
io.FileIO(
|
||||
self._original_stdstream_copy,
|
||||
"w",
|
||||
)
|
||||
)
|
||||
self.echo = echo
|
||||
else:
|
||||
msg = "echo argument must be a file-like object"
|
||||
raise ValueError(msg)
|
||||
|
||||
@property
|
||||
def parent_header(self):
|
||||
try:
|
||||
# asyncio-specific
|
||||
return self._parent_header.get()
|
||||
except LookupError:
|
||||
try:
|
||||
# thread-specific
|
||||
identity = threading.current_thread().ident
|
||||
# retrieve the outermost (oldest ancestor,
|
||||
# discounting the kernel thread) thread identity
|
||||
while identity in self._thread_to_parent:
|
||||
identity = self._thread_to_parent[identity]
|
||||
# use the header of the oldest ancestor
|
||||
return self._thread_to_parent_header[identity]
|
||||
except KeyError:
|
||||
# global (fallback)
|
||||
return self._parent_header_global
|
||||
|
||||
@parent_header.setter
|
||||
def parent_header(self, value):
|
||||
self._parent_header_global = value
|
||||
return self._parent_header.set(value)
|
||||
|
||||
def isatty(self):
|
||||
"""Return a bool indicating whether this is an 'interactive' stream.
|
||||
|
||||
Returns:
|
||||
Boolean
|
||||
"""
|
||||
return self._isatty
|
||||
|
||||
def _setup_stream_redirects(self, name):
|
||||
pr, pw = os.pipe()
|
||||
fno = self._original_stdstream_fd = getattr(sys, name).fileno()
|
||||
self._original_stdstream_copy = os.dup(fno)
|
||||
os.dup2(pw, fno)
|
||||
|
||||
self._fid = pr
|
||||
|
||||
self._exc = None
|
||||
self.watch_fd_thread = threading.Thread(target=self._watch_pipe_fd)
|
||||
self.watch_fd_thread.daemon = True
|
||||
self.watch_fd_thread.start()
|
||||
|
||||
def _is_master_process(self):
|
||||
return os.getpid() == self._master_pid
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent header."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def close(self):
|
||||
"""Close the stream."""
|
||||
if self._should_watch:
|
||||
self._should_watch = False
|
||||
# thread won't wake unless there's something to read
|
||||
# writing something after _should_watch will not be echoed
|
||||
os.write(self._original_stdstream_fd, b"\0")
|
||||
self.watch_fd_thread.join()
|
||||
# restore original FDs
|
||||
os.dup2(self._original_stdstream_copy, self._original_stdstream_fd)
|
||||
os.close(self._original_stdstream_copy)
|
||||
if self._exc:
|
||||
etype, value, tb = self._exc
|
||||
traceback.print_exception(etype, value, tb)
|
||||
self.pub_thread = None
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self.pub_thread is None
|
||||
|
||||
def _schedule_flush(self):
|
||||
"""schedule a flush in the IO thread
|
||||
|
||||
call this on write, to indicate that flush should be called soon.
|
||||
"""
|
||||
if self._flush_pending:
|
||||
return
|
||||
self._flush_pending = True
|
||||
|
||||
# add_timeout has to be handed to the io thread via event pipe
|
||||
def _schedule_in_thread():
|
||||
self._io_loop.call_later(self.flush_interval, self._flush)
|
||||
|
||||
self.pub_thread.schedule(_schedule_in_thread)
|
||||
|
||||
def flush(self):
|
||||
"""trigger actual zmq send
|
||||
|
||||
send will happen in the background thread
|
||||
"""
|
||||
if (
|
||||
self.pub_thread
|
||||
and self.pub_thread.thread is not None
|
||||
and self.pub_thread.thread.is_alive()
|
||||
and self.pub_thread.thread.ident != threading.current_thread().ident
|
||||
):
|
||||
# request flush on the background thread
|
||||
self.pub_thread.schedule(self._flush)
|
||||
# wait for flush to actually get through, if we can.
|
||||
evt = threading.Event()
|
||||
self.pub_thread.schedule(evt.set)
|
||||
# and give a timeout to avoid
|
||||
if not evt.wait(self.flush_timeout):
|
||||
# write directly to __stderr__ instead of warning because
|
||||
# if this is happening sys.stderr may be the problem.
|
||||
print("IOStream.flush timed out", file=sys.__stderr__)
|
||||
else:
|
||||
self._flush()
|
||||
|
||||
def _flush(self):
|
||||
"""This is where the actual send happens.
|
||||
|
||||
_flush should generally be called in the IO thread,
|
||||
unless the thread has been destroyed (e.g. forked subprocess).
|
||||
"""
|
||||
self._flush_pending = False
|
||||
self._subprocess_flush_pending = False
|
||||
|
||||
if self.echo is not None:
|
||||
try:
|
||||
self.echo.flush()
|
||||
except OSError as e:
|
||||
if self.echo is not sys.__stderr__:
|
||||
print(f"Flush failed: {e}", file=sys.__stderr__)
|
||||
|
||||
for parent, data in self._flush_buffers():
|
||||
if data:
|
||||
# FIXME: this disables Session's fork-safe check,
|
||||
# since pub_thread is itself fork-safe.
|
||||
# There should be a better way to do this.
|
||||
self.session.pid = os.getpid()
|
||||
content = {"name": self.name, "text": data}
|
||||
msg = self.session.msg("stream", content, parent=parent)
|
||||
|
||||
# Each transform either returns a new
|
||||
# message or None. If None is returned,
|
||||
# the message has been 'used' and we return.
|
||||
for hook in self._hooks:
|
||||
msg = hook(msg)
|
||||
if msg is None:
|
||||
return
|
||||
|
||||
self.session.send(
|
||||
self.pub_thread,
|
||||
msg,
|
||||
ident=self.topic,
|
||||
)
|
||||
|
||||
def write(self, string: str) -> Optional[int]: # type:ignore[override]
|
||||
"""Write to current stream after encoding if necessary
|
||||
|
||||
Returns
|
||||
-------
|
||||
len : int
|
||||
number of items from input parameter written to stream.
|
||||
|
||||
"""
|
||||
parent = self.parent_header
|
||||
|
||||
if not isinstance(string, str):
|
||||
msg = f"write() argument must be str, not {type(string)}" # type:ignore[unreachable]
|
||||
raise TypeError(msg)
|
||||
|
||||
if self.echo is not None:
|
||||
try:
|
||||
self.echo.write(string)
|
||||
except OSError as e:
|
||||
if self.echo is not sys.__stderr__:
|
||||
print(f"Write failed: {e}", file=sys.__stderr__)
|
||||
|
||||
if self.pub_thread is None:
|
||||
msg = "I/O operation on closed file"
|
||||
raise ValueError(msg)
|
||||
|
||||
is_child = not self._is_master_process()
|
||||
# only touch the buffer in the IO thread to avoid races
|
||||
with self._buffer_lock:
|
||||
self._buffers[frozenset(parent.items())].write(string)
|
||||
if is_child:
|
||||
# mp.Pool cannot be trusted to flush promptly (or ever),
|
||||
# and this helps.
|
||||
if self._subprocess_flush_pending:
|
||||
return None
|
||||
self._subprocess_flush_pending = True
|
||||
# We can not rely on self._io_loop.call_later from a subprocess
|
||||
self.pub_thread.schedule(self._flush)
|
||||
else:
|
||||
self._schedule_flush()
|
||||
|
||||
return len(string)
|
||||
|
||||
def writelines(self, sequence):
|
||||
"""Write lines to the stream."""
|
||||
if self.pub_thread is None:
|
||||
msg = "I/O operation on closed file"
|
||||
raise ValueError(msg)
|
||||
for string in sequence:
|
||||
self.write(string)
|
||||
|
||||
def writable(self):
|
||||
"""Test whether the stream is writable."""
|
||||
return True
|
||||
|
||||
def _flush_buffers(self):
|
||||
"""clear the current buffer and return the current buffer data."""
|
||||
buffers = self._rotate_buffers()
|
||||
for frozen_parent, buffer in buffers.items():
|
||||
data = buffer.getvalue()
|
||||
buffer.close()
|
||||
yield dict(frozen_parent), data
|
||||
|
||||
def _rotate_buffers(self):
|
||||
"""Returns the current buffer and replaces it with an empty buffer."""
|
||||
with self._buffer_lock:
|
||||
old_buffers = self._buffers
|
||||
self._buffers = defaultdict(StringIO)
|
||||
return old_buffers
|
||||
|
||||
@property
|
||||
def _hooks(self):
|
||||
if not hasattr(self._local, "hooks"):
|
||||
# create new list for a new thread
|
||||
self._local.hooks = []
|
||||
return self._local.hooks
|
||||
|
||||
def register_hook(self, hook):
|
||||
"""
|
||||
Registers a hook with the thread-local storage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hook : Any callable object
|
||||
|
||||
Returns
|
||||
-------
|
||||
Either a publishable message, or `None`.
|
||||
The hook callable must return a message from
|
||||
the __call__ method if they still require the
|
||||
`session.send` method to be called after transformation.
|
||||
Returning `None` will halt that execution path, and
|
||||
session.send will not be called.
|
||||
"""
|
||||
self._hooks.append(hook)
|
||||
|
||||
def unregister_hook(self, hook):
|
||||
"""
|
||||
Un-registers a hook with the thread-local storage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hook : Any callable object which has previously been
|
||||
registered as a hook.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool - `True` if the hook was removed, `False` if it wasn't
|
||||
found.
|
||||
"""
|
||||
try:
|
||||
self._hooks.remove(hook)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
819
venv/lib/python3.10/site-packages/ipykernel/ipkernel.py
Normal file
819
venv/lib/python3.10/site-packages/ipykernel/ipkernel.py
Normal file
@@ -0,0 +1,819 @@
|
||||
"""The IPython kernel implementation"""
|
||||
|
||||
import asyncio
|
||||
import builtins
|
||||
import gc
|
||||
import getpass
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
import typing as t
|
||||
from contextlib import contextmanager
|
||||
from functools import partial
|
||||
|
||||
import comm
|
||||
from IPython.core import release
|
||||
from IPython.utils.tokenutil import line_at_cursor, token_at_cursor
|
||||
from jupyter_client.session import extract_header
|
||||
from traitlets import Any, Bool, HasTraits, Instance, List, Type, observe, observe_compat
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
from .comm.comm import BaseComm
|
||||
from .comm.manager import CommManager
|
||||
from .compiler import XCachingCompiler
|
||||
from .debugger import Debugger, _is_debugpy_available
|
||||
from .eventloops import _use_appnope
|
||||
from .iostream import OutStream
|
||||
from .kernelbase import Kernel as KernelBase
|
||||
from .kernelbase import _accepts_parameters
|
||||
from .zmqshell import ZMQInteractiveShell
|
||||
|
||||
try:
|
||||
from IPython.core.interactiveshell import _asyncio_runner # type:ignore[attr-defined]
|
||||
except ImportError:
|
||||
_asyncio_runner = None # type:ignore[assignment]
|
||||
|
||||
try:
|
||||
from IPython.core.completer import provisionalcompleter as _provisionalcompleter
|
||||
from IPython.core.completer import rectify_completions as _rectify_completions
|
||||
|
||||
_use_experimental_60_completion = True
|
||||
except ImportError:
|
||||
_use_experimental_60_completion = False
|
||||
|
||||
|
||||
_EXPERIMENTAL_KEY_NAME = "_jupyter_types_experimental"
|
||||
|
||||
|
||||
def _create_comm(*args, **kwargs):
|
||||
"""Create a new Comm."""
|
||||
return BaseComm(*args, **kwargs)
|
||||
|
||||
|
||||
# there can only be one comm manager in a ipykernel process
|
||||
_comm_lock = threading.Lock()
|
||||
_comm_manager: t.Optional[CommManager] = None
|
||||
|
||||
|
||||
def _get_comm_manager(*args, **kwargs):
|
||||
"""Create a new CommManager."""
|
||||
global _comm_manager # noqa: PLW0603
|
||||
if _comm_manager is None:
|
||||
with _comm_lock:
|
||||
if _comm_manager is None:
|
||||
_comm_manager = CommManager(*args, **kwargs)
|
||||
return _comm_manager
|
||||
|
||||
|
||||
comm.create_comm = _create_comm
|
||||
comm.get_comm_manager = _get_comm_manager
|
||||
|
||||
|
||||
class IPythonKernel(KernelBase):
|
||||
"""The IPython Kernel class."""
|
||||
|
||||
shell = Instance("IPython.core.interactiveshell.InteractiveShellABC", allow_none=True)
|
||||
shell_class = Type(ZMQInteractiveShell)
|
||||
|
||||
use_experimental_completions = Bool(
|
||||
True,
|
||||
help="Set this flag to False to deactivate the use of experimental IPython completion APIs.",
|
||||
).tag(config=True)
|
||||
|
||||
debugpy_stream = Instance(ZMQStream, allow_none=True) if _is_debugpy_available else None
|
||||
|
||||
user_module = Any()
|
||||
|
||||
@observe("user_module")
|
||||
@observe_compat
|
||||
def _user_module_changed(self, change):
|
||||
if self.shell is not None:
|
||||
self.shell.user_module = change["new"]
|
||||
|
||||
user_ns = Instance(dict, args=None, allow_none=True)
|
||||
|
||||
@observe("user_ns")
|
||||
@observe_compat
|
||||
def _user_ns_changed(self, change):
|
||||
if self.shell is not None:
|
||||
self.shell.user_ns = change["new"]
|
||||
self.shell.init_user_ns()
|
||||
|
||||
# A reference to the Python builtin 'raw_input' function.
|
||||
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
|
||||
_sys_raw_input = Any()
|
||||
_sys_eval_input = Any()
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Initialize the kernel."""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# Initialize the Debugger
|
||||
if _is_debugpy_available:
|
||||
self.debugger = Debugger(
|
||||
self.log,
|
||||
self.debugpy_stream,
|
||||
self._publish_debug_event,
|
||||
self.debug_shell_socket,
|
||||
self.session,
|
||||
self.debug_just_my_code,
|
||||
)
|
||||
|
||||
# Initialize the InteractiveShell subclass
|
||||
self.shell = self.shell_class.instance(
|
||||
parent=self,
|
||||
profile_dir=self.profile_dir,
|
||||
user_module=self.user_module,
|
||||
user_ns=self.user_ns,
|
||||
kernel=self,
|
||||
compiler_class=XCachingCompiler,
|
||||
)
|
||||
self.shell.displayhook.session = self.session # type:ignore[attr-defined]
|
||||
|
||||
jupyter_session_name = os.environ.get("JPY_SESSION_NAME")
|
||||
if jupyter_session_name:
|
||||
self.shell.user_ns["__session__"] = jupyter_session_name
|
||||
|
||||
self.shell.displayhook.pub_socket = self.iopub_socket # type:ignore[attr-defined]
|
||||
self.shell.displayhook.topic = self._topic("execute_result") # type:ignore[attr-defined]
|
||||
self.shell.display_pub.session = self.session # type:ignore[attr-defined]
|
||||
self.shell.display_pub.pub_socket = self.iopub_socket # type:ignore[attr-defined]
|
||||
|
||||
self.comm_manager = comm.get_comm_manager()
|
||||
|
||||
assert isinstance(self.comm_manager, HasTraits)
|
||||
self.shell.configurables.append(self.comm_manager) # type:ignore[arg-type]
|
||||
comm_msg_types = ["comm_open", "comm_msg", "comm_close"]
|
||||
for msg_type in comm_msg_types:
|
||||
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
|
||||
|
||||
if _use_appnope() and self._darwin_app_nap:
|
||||
# Disable app-nap as the kernel is not a gui but can have guis
|
||||
import appnope # type:ignore[import-untyped]
|
||||
|
||||
appnope.nope()
|
||||
|
||||
self._new_threads_parent_header = {}
|
||||
self._initialize_thread_hooks()
|
||||
|
||||
if hasattr(gc, "callbacks"):
|
||||
# while `gc.callbacks` exists since Python 3.3, pypy does not
|
||||
# implement it even as of 3.9.
|
||||
gc.callbacks.append(self._clean_thread_parent_frames)
|
||||
|
||||
help_links = List(
|
||||
[
|
||||
{
|
||||
"text": "Python Reference",
|
||||
"url": "https://docs.python.org/%i.%i" % sys.version_info[:2],
|
||||
},
|
||||
{
|
||||
"text": "IPython Reference",
|
||||
"url": "https://ipython.org/documentation.html",
|
||||
},
|
||||
{
|
||||
"text": "NumPy Reference",
|
||||
"url": "https://docs.scipy.org/doc/numpy/reference/",
|
||||
},
|
||||
{
|
||||
"text": "SciPy Reference",
|
||||
"url": "https://docs.scipy.org/doc/scipy/reference/",
|
||||
},
|
||||
{
|
||||
"text": "Matplotlib Reference",
|
||||
"url": "https://matplotlib.org/contents.html",
|
||||
},
|
||||
{
|
||||
"text": "SymPy Reference",
|
||||
"url": "http://docs.sympy.org/latest/index.html",
|
||||
},
|
||||
{
|
||||
"text": "pandas Reference",
|
||||
"url": "https://pandas.pydata.org/pandas-docs/stable/",
|
||||
},
|
||||
]
|
||||
).tag(config=True)
|
||||
|
||||
# Kernel info fields
|
||||
implementation = "ipython"
|
||||
implementation_version = release.version
|
||||
language_info = {
|
||||
"name": "python",
|
||||
"version": sys.version.split()[0],
|
||||
"mimetype": "text/x-python",
|
||||
"codemirror_mode": {"name": "ipython", "version": sys.version_info[0]},
|
||||
"pygments_lexer": "ipython%d" % 3,
|
||||
"nbconvert_exporter": "python",
|
||||
"file_extension": ".py",
|
||||
}
|
||||
|
||||
def dispatch_debugpy(self, msg):
|
||||
if _is_debugpy_available:
|
||||
# The first frame is the socket id, we can drop it
|
||||
frame = msg[1].bytes.decode("utf-8")
|
||||
self.log.debug("Debugpy received: %s", frame)
|
||||
self.debugger.tcp_client.receive_dap_frame(frame)
|
||||
|
||||
@property
|
||||
def banner(self):
|
||||
if self.shell:
|
||||
return self.shell.banner
|
||||
return None
|
||||
|
||||
async def poll_stopped_queue(self):
|
||||
"""Poll the stopped queue."""
|
||||
while True:
|
||||
await self.debugger.handle_stopped_event()
|
||||
|
||||
def start(self):
|
||||
"""Start the kernel."""
|
||||
if self.shell:
|
||||
self.shell.exit_now = False
|
||||
if self.debugpy_stream is None:
|
||||
self.log.warning("debugpy_stream undefined, debugging will not be enabled")
|
||||
else:
|
||||
self.debugpy_stream.on_recv(self.dispatch_debugpy, copy=False)
|
||||
super().start()
|
||||
if self.debugpy_stream:
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self.poll_stopped_queue(), self.control_thread.io_loop.asyncio_loop
|
||||
)
|
||||
|
||||
def set_parent(self, ident, parent, channel="shell"):
|
||||
"""Overridden from parent to tell the display hook and output streams
|
||||
about the parent message.
|
||||
"""
|
||||
super().set_parent(ident, parent, channel)
|
||||
if channel == "shell" and self.shell:
|
||||
self.shell.set_parent(parent)
|
||||
|
||||
def init_metadata(self, parent):
|
||||
"""Initialize metadata.
|
||||
|
||||
Run at the beginning of each execution request.
|
||||
"""
|
||||
md = super().init_metadata(parent)
|
||||
# FIXME: remove deprecated ipyparallel-specific code
|
||||
# This is required for ipyparallel < 5.0
|
||||
md.update(
|
||||
{
|
||||
"dependencies_met": True,
|
||||
"engine": self.ident,
|
||||
}
|
||||
)
|
||||
return md
|
||||
|
||||
def finish_metadata(self, parent, metadata, reply_content):
|
||||
"""Finish populating metadata.
|
||||
|
||||
Run after completing an execution request.
|
||||
"""
|
||||
# FIXME: remove deprecated ipyparallel-specific code
|
||||
# This is required by ipyparallel < 5.0
|
||||
metadata["status"] = reply_content["status"]
|
||||
if reply_content["status"] == "error" and reply_content["ename"] == "UnmetDependency":
|
||||
metadata["dependencies_met"] = False
|
||||
|
||||
return metadata
|
||||
|
||||
def _forward_input(self, allow_stdin=False):
|
||||
"""Forward raw_input and getpass to the current frontend.
|
||||
|
||||
via input_request
|
||||
"""
|
||||
self._allow_stdin = allow_stdin
|
||||
|
||||
self._sys_raw_input = builtins.input
|
||||
builtins.input = self.raw_input
|
||||
|
||||
self._save_getpass = getpass.getpass
|
||||
getpass.getpass = self.getpass
|
||||
|
||||
def _restore_input(self):
|
||||
"""Restore raw_input, getpass"""
|
||||
builtins.input = self._sys_raw_input
|
||||
|
||||
getpass.getpass = self._save_getpass
|
||||
|
||||
@property
|
||||
def execution_count(self):
|
||||
if self.shell:
|
||||
return self.shell.execution_count
|
||||
return None
|
||||
|
||||
@execution_count.setter
|
||||
def execution_count(self, value):
|
||||
# Ignore the incrementing done by KernelBase, in favour of our shell's
|
||||
# execution counter.
|
||||
pass
|
||||
|
||||
@contextmanager
|
||||
def _cancel_on_sigint(self, future):
|
||||
"""ContextManager for capturing SIGINT and cancelling a future
|
||||
|
||||
SIGINT raises in the event loop when running async code,
|
||||
but we want it to halt a coroutine.
|
||||
|
||||
Ideally, it would raise KeyboardInterrupt,
|
||||
but this turns it into a CancelledError.
|
||||
At least it gets a decent traceback to the user.
|
||||
"""
|
||||
sigint_future: asyncio.Future[int] = asyncio.Future()
|
||||
|
||||
# whichever future finishes first,
|
||||
# cancel the other one
|
||||
def cancel_unless_done(f, _ignored):
|
||||
if f.cancelled() or f.done():
|
||||
return
|
||||
f.cancel()
|
||||
|
||||
# when sigint finishes,
|
||||
# abort the coroutine with CancelledError
|
||||
sigint_future.add_done_callback(partial(cancel_unless_done, future))
|
||||
# when the main future finishes,
|
||||
# stop watching for SIGINT events
|
||||
future.add_done_callback(partial(cancel_unless_done, sigint_future))
|
||||
|
||||
def handle_sigint(*args):
|
||||
def set_sigint_result():
|
||||
if sigint_future.cancelled() or sigint_future.done():
|
||||
return
|
||||
sigint_future.set_result(1)
|
||||
|
||||
# use add_callback for thread safety
|
||||
self.io_loop.add_callback(set_sigint_result)
|
||||
|
||||
# set the custom sigint handler during this context
|
||||
save_sigint = signal.signal(signal.SIGINT, handle_sigint)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# restore the previous sigint handler
|
||||
signal.signal(signal.SIGINT, save_sigint)
|
||||
|
||||
async def execute_request(self, stream, ident, parent):
|
||||
"""Override for cell output - cell reconciliation."""
|
||||
parent_header = extract_header(parent)
|
||||
self._associate_new_top_level_threads_with(parent_header)
|
||||
await super().execute_request(stream, ident, parent)
|
||||
|
||||
async def do_execute(
|
||||
self,
|
||||
code,
|
||||
silent,
|
||||
store_history=True,
|
||||
user_expressions=None,
|
||||
allow_stdin=False,
|
||||
*,
|
||||
cell_meta=None,
|
||||
cell_id=None,
|
||||
):
|
||||
"""Handle code execution."""
|
||||
shell = self.shell # we'll need this a lot here
|
||||
assert shell is not None
|
||||
|
||||
self._forward_input(allow_stdin)
|
||||
|
||||
reply_content: t.Dict[str, t.Any] = {}
|
||||
if hasattr(shell, "run_cell_async") and hasattr(shell, "should_run_async"):
|
||||
run_cell = shell.run_cell_async
|
||||
should_run_async = shell.should_run_async
|
||||
accepts_params = _accepts_parameters(run_cell, ["cell_id"])
|
||||
else:
|
||||
should_run_async = lambda cell: False # noqa: ARG005
|
||||
# older IPython,
|
||||
# use blocking run_cell and wrap it in coroutine
|
||||
|
||||
async def run_cell(*args, **kwargs):
|
||||
return shell.run_cell(*args, **kwargs)
|
||||
|
||||
accepts_params = _accepts_parameters(shell.run_cell, ["cell_id"])
|
||||
try:
|
||||
# default case: runner is asyncio and asyncio is already running
|
||||
# TODO: this should check every case for "are we inside the runner",
|
||||
# not just asyncio
|
||||
preprocessing_exc_tuple = None
|
||||
try:
|
||||
transformed_cell = shell.transform_cell(code)
|
||||
except Exception:
|
||||
transformed_cell = code
|
||||
preprocessing_exc_tuple = sys.exc_info()
|
||||
|
||||
if (
|
||||
_asyncio_runner # type:ignore[truthy-bool]
|
||||
and shell.loop_runner is _asyncio_runner
|
||||
and asyncio.get_event_loop().is_running()
|
||||
and should_run_async(
|
||||
code,
|
||||
transformed_cell=transformed_cell,
|
||||
preprocessing_exc_tuple=preprocessing_exc_tuple,
|
||||
)
|
||||
):
|
||||
if accepts_params["cell_id"]:
|
||||
coro = run_cell(
|
||||
code,
|
||||
store_history=store_history,
|
||||
silent=silent,
|
||||
transformed_cell=transformed_cell,
|
||||
preprocessing_exc_tuple=preprocessing_exc_tuple,
|
||||
cell_id=cell_id,
|
||||
)
|
||||
else:
|
||||
coro = run_cell(
|
||||
code,
|
||||
store_history=store_history,
|
||||
silent=silent,
|
||||
transformed_cell=transformed_cell,
|
||||
preprocessing_exc_tuple=preprocessing_exc_tuple,
|
||||
)
|
||||
|
||||
coro_future = asyncio.ensure_future(coro)
|
||||
|
||||
with self._cancel_on_sigint(coro_future):
|
||||
res = None
|
||||
try:
|
||||
res = await coro_future
|
||||
finally:
|
||||
shell.events.trigger("post_execute")
|
||||
if not silent:
|
||||
shell.events.trigger("post_run_cell", res)
|
||||
else:
|
||||
# runner isn't already running,
|
||||
# make synchronous call,
|
||||
# letting shell dispatch to loop runners
|
||||
if accepts_params["cell_id"]:
|
||||
res = shell.run_cell(
|
||||
code,
|
||||
store_history=store_history,
|
||||
silent=silent,
|
||||
cell_id=cell_id,
|
||||
)
|
||||
else:
|
||||
res = shell.run_cell(code, store_history=store_history, silent=silent)
|
||||
finally:
|
||||
self._restore_input()
|
||||
|
||||
err = res.error_before_exec if res.error_before_exec is not None else res.error_in_exec
|
||||
|
||||
if res.success:
|
||||
reply_content["status"] = "ok"
|
||||
else:
|
||||
reply_content["status"] = "error"
|
||||
|
||||
reply_content.update(
|
||||
{
|
||||
"traceback": shell._last_traceback or [],
|
||||
"ename": str(type(err).__name__),
|
||||
"evalue": str(err),
|
||||
}
|
||||
)
|
||||
|
||||
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
|
||||
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method="execute")
|
||||
reply_content["engine_info"] = e_info
|
||||
|
||||
# Return the execution counter so clients can display prompts
|
||||
reply_content["execution_count"] = shell.execution_count - 1
|
||||
|
||||
if "traceback" in reply_content:
|
||||
self.log.info(
|
||||
"Exception in execute request:\n%s",
|
||||
"\n".join(reply_content["traceback"]),
|
||||
)
|
||||
|
||||
# At this point, we can tell whether the main code execution succeeded
|
||||
# or not. If it did, we proceed to evaluate user_expressions
|
||||
if reply_content["status"] == "ok":
|
||||
reply_content["user_expressions"] = shell.user_expressions(user_expressions or {})
|
||||
else:
|
||||
# If there was an error, don't even try to compute expressions
|
||||
reply_content["user_expressions"] = {}
|
||||
|
||||
# Payloads should be retrieved regardless of outcome, so we can both
|
||||
# recover partial output (that could have been generated early in a
|
||||
# block, before an error) and always clear the payload system.
|
||||
reply_content["payload"] = shell.payload_manager.read_payload()
|
||||
# Be aggressive about clearing the payload because we don't want
|
||||
# it to sit in memory until the next execute_request comes in.
|
||||
shell.payload_manager.clear_payload()
|
||||
|
||||
return reply_content
|
||||
|
||||
def do_complete(self, code, cursor_pos):
|
||||
"""Handle code completion."""
|
||||
if _use_experimental_60_completion and self.use_experimental_completions:
|
||||
return self._experimental_do_complete(code, cursor_pos)
|
||||
|
||||
# FIXME: IPython completers currently assume single line,
|
||||
# but completion messages give multi-line context
|
||||
# For now, extract line from cell, based on cursor_pos:
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
line, offset = line_at_cursor(code, cursor_pos)
|
||||
line_cursor = cursor_pos - offset
|
||||
assert self.shell is not None
|
||||
txt, matches = self.shell.complete("", line, line_cursor)
|
||||
return {
|
||||
"matches": matches,
|
||||
"cursor_end": cursor_pos,
|
||||
"cursor_start": cursor_pos - len(txt),
|
||||
"metadata": {},
|
||||
"status": "ok",
|
||||
}
|
||||
|
||||
async def do_debug_request(self, msg):
|
||||
"""Handle a debug request."""
|
||||
if _is_debugpy_available:
|
||||
return await self.debugger.process_request(msg)
|
||||
return None
|
||||
|
||||
def _experimental_do_complete(self, code, cursor_pos):
|
||||
"""
|
||||
Experimental completions from IPython, using Jedi.
|
||||
"""
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
with _provisionalcompleter():
|
||||
assert self.shell is not None
|
||||
raw_completions = self.shell.Completer.completions(code, cursor_pos)
|
||||
completions = list(_rectify_completions(code, raw_completions))
|
||||
|
||||
comps = []
|
||||
for comp in completions:
|
||||
comps.append(
|
||||
dict(
|
||||
start=comp.start,
|
||||
end=comp.end,
|
||||
text=comp.text,
|
||||
type=comp.type,
|
||||
signature=comp.signature,
|
||||
)
|
||||
)
|
||||
|
||||
if completions:
|
||||
s = completions[0].start
|
||||
e = completions[0].end
|
||||
matches = [c.text for c in completions]
|
||||
else:
|
||||
s = cursor_pos
|
||||
e = cursor_pos
|
||||
matches = []
|
||||
|
||||
return {
|
||||
"matches": matches,
|
||||
"cursor_end": e,
|
||||
"cursor_start": s,
|
||||
"metadata": {_EXPERIMENTAL_KEY_NAME: comps},
|
||||
"status": "ok",
|
||||
}
|
||||
|
||||
def do_inspect(self, code, cursor_pos, detail_level=0, omit_sections=()):
|
||||
"""Handle code inspection."""
|
||||
name = token_at_cursor(code, cursor_pos)
|
||||
|
||||
reply_content: t.Dict[str, t.Any] = {"status": "ok"}
|
||||
reply_content["data"] = {}
|
||||
reply_content["metadata"] = {}
|
||||
assert self.shell is not None
|
||||
try:
|
||||
if release.version_info >= (8,):
|
||||
# `omit_sections` keyword will be available in IPython 8, see
|
||||
# https://github.com/ipython/ipython/pull/13343
|
||||
bundle = self.shell.object_inspect_mime(
|
||||
name,
|
||||
detail_level=detail_level,
|
||||
omit_sections=omit_sections,
|
||||
)
|
||||
else:
|
||||
bundle = self.shell.object_inspect_mime(name, detail_level=detail_level)
|
||||
reply_content["data"].update(bundle)
|
||||
if not self.shell.enable_html_pager:
|
||||
reply_content["data"].pop("text/html")
|
||||
reply_content["found"] = True
|
||||
except KeyError:
|
||||
reply_content["found"] = False
|
||||
|
||||
return reply_content
|
||||
|
||||
def do_history(
|
||||
self,
|
||||
hist_access_type,
|
||||
output,
|
||||
raw,
|
||||
session=0,
|
||||
start=0,
|
||||
stop=None,
|
||||
n=None,
|
||||
pattern=None,
|
||||
unique=False,
|
||||
):
|
||||
"""Handle code history."""
|
||||
assert self.shell is not None
|
||||
if hist_access_type == "tail":
|
||||
hist = self.shell.history_manager.get_tail(
|
||||
n, raw=raw, output=output, include_latest=True
|
||||
)
|
||||
|
||||
elif hist_access_type == "range":
|
||||
hist = self.shell.history_manager.get_range(
|
||||
session, start, stop, raw=raw, output=output
|
||||
)
|
||||
|
||||
elif hist_access_type == "search":
|
||||
hist = self.shell.history_manager.search(
|
||||
pattern, raw=raw, output=output, n=n, unique=unique
|
||||
)
|
||||
else:
|
||||
hist = []
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"history": list(hist),
|
||||
}
|
||||
|
||||
def do_shutdown(self, restart):
|
||||
"""Handle kernel shutdown."""
|
||||
if self.shell:
|
||||
self.shell.exit_now = True
|
||||
return dict(status="ok", restart=restart)
|
||||
|
||||
def do_is_complete(self, code):
|
||||
"""Handle an is_complete request."""
|
||||
transformer_manager = getattr(self.shell, "input_transformer_manager", None)
|
||||
if transformer_manager is None:
|
||||
# input_splitter attribute is deprecated
|
||||
assert self.shell is not None
|
||||
transformer_manager = self.shell.input_splitter
|
||||
status, indent_spaces = transformer_manager.check_complete(code)
|
||||
r = {"status": status}
|
||||
if status == "incomplete":
|
||||
r["indent"] = " " * indent_spaces
|
||||
return r
|
||||
|
||||
def do_apply(self, content, bufs, msg_id, reply_metadata):
|
||||
"""Handle an apply request."""
|
||||
try:
|
||||
from ipyparallel.serialize import serialize_object, unpack_apply_message
|
||||
except ImportError:
|
||||
from .serialize import serialize_object, unpack_apply_message
|
||||
|
||||
shell = self.shell
|
||||
assert shell is not None
|
||||
try:
|
||||
working = shell.user_ns
|
||||
|
||||
prefix = "_" + str(msg_id).replace("-", "") + "_"
|
||||
f, args, kwargs = unpack_apply_message(bufs, working, copy=False)
|
||||
|
||||
fname = getattr(f, "__name__", "f")
|
||||
|
||||
fname = prefix + "f"
|
||||
argname = prefix + "args"
|
||||
kwargname = prefix + "kwargs"
|
||||
resultname = prefix + "result"
|
||||
|
||||
ns = {fname: f, argname: args, kwargname: kwargs, resultname: None}
|
||||
# print ns
|
||||
working.update(ns)
|
||||
code = f"{resultname} = {fname}(*{argname},**{kwargname})"
|
||||
try:
|
||||
exec(code, shell.user_global_ns, shell.user_ns)
|
||||
result = working.get(resultname)
|
||||
finally:
|
||||
for key in ns:
|
||||
working.pop(key)
|
||||
|
||||
assert self.session is not None
|
||||
result_buf = serialize_object(
|
||||
result,
|
||||
buffer_threshold=self.session.buffer_threshold,
|
||||
item_threshold=self.session.item_threshold,
|
||||
)
|
||||
|
||||
except BaseException as e:
|
||||
# invoke IPython traceback formatting
|
||||
shell.showtraceback()
|
||||
reply_content = {
|
||||
"traceback": shell._last_traceback or [],
|
||||
"ename": str(type(e).__name__),
|
||||
"evalue": str(e),
|
||||
}
|
||||
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
|
||||
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method="apply")
|
||||
reply_content["engine_info"] = e_info
|
||||
|
||||
self.send_response(
|
||||
self.iopub_socket,
|
||||
"error",
|
||||
reply_content,
|
||||
ident=self._topic("error"),
|
||||
)
|
||||
self.log.info("Exception in apply request:\n%s", "\n".join(reply_content["traceback"]))
|
||||
result_buf = []
|
||||
reply_content["status"] = "error"
|
||||
else:
|
||||
reply_content = {"status": "ok"}
|
||||
|
||||
return reply_content, result_buf
|
||||
|
||||
def do_clear(self):
|
||||
"""Clear the kernel."""
|
||||
if self.shell:
|
||||
self.shell.reset(False)
|
||||
return dict(status="ok")
|
||||
|
||||
def _associate_new_top_level_threads_with(self, parent_header):
|
||||
"""Store the parent header to associate it with new top-level threads"""
|
||||
self._new_threads_parent_header = parent_header
|
||||
|
||||
def _initialize_thread_hooks(self):
|
||||
"""Store thread hierarchy and thread-parent_header associations."""
|
||||
stdout = self._stdout
|
||||
stderr = self._stderr
|
||||
kernel_thread_ident = threading.get_ident()
|
||||
kernel = self
|
||||
_threading_Thread_run = threading.Thread.run
|
||||
_threading_Thread__init__ = threading.Thread.__init__
|
||||
|
||||
def run_closure(self: threading.Thread):
|
||||
"""Wrap the `threading.Thread.start` to intercept thread identity.
|
||||
|
||||
This is needed because there is no "start" hook yet, but there
|
||||
might be one in the future: https://bugs.python.org/issue14073
|
||||
|
||||
This is a no-op if the `self._stdout` and `self._stderr` are not
|
||||
sub-classes of `OutStream`.
|
||||
"""
|
||||
|
||||
try:
|
||||
parent = self._ipykernel_parent_thread_ident # type:ignore[attr-defined]
|
||||
except AttributeError:
|
||||
return
|
||||
for stream in [stdout, stderr]:
|
||||
if isinstance(stream, OutStream):
|
||||
if parent == kernel_thread_ident:
|
||||
stream._thread_to_parent_header[
|
||||
self.ident
|
||||
] = kernel._new_threads_parent_header
|
||||
else:
|
||||
stream._thread_to_parent[self.ident] = parent
|
||||
_threading_Thread_run(self)
|
||||
|
||||
def init_closure(self: threading.Thread, *args, **kwargs):
|
||||
_threading_Thread__init__(self, *args, **kwargs)
|
||||
self._ipykernel_parent_thread_ident = threading.get_ident() # type:ignore[attr-defined]
|
||||
|
||||
threading.Thread.__init__ = init_closure # type:ignore[method-assign]
|
||||
threading.Thread.run = run_closure # type:ignore[method-assign]
|
||||
|
||||
def _clean_thread_parent_frames(
|
||||
self, phase: t.Literal["start", "stop"], info: t.Dict[str, t.Any]
|
||||
):
|
||||
"""Clean parent frames of threads which are no longer running.
|
||||
This is meant to be invoked by garbage collector callback hook.
|
||||
|
||||
The implementation enumerates the threads because there is no "exit" hook yet,
|
||||
but there might be one in the future: https://bugs.python.org/issue14073
|
||||
|
||||
This is a no-op if the `self._stdout` and `self._stderr` are not
|
||||
sub-classes of `OutStream`.
|
||||
"""
|
||||
# Only run before the garbage collector starts
|
||||
if phase != "start":
|
||||
return
|
||||
active_threads = {thread.ident for thread in threading.enumerate()}
|
||||
for stream in [self._stdout, self._stderr]:
|
||||
if isinstance(stream, OutStream):
|
||||
thread_to_parent_header = stream._thread_to_parent_header
|
||||
for identity in list(thread_to_parent_header.keys()):
|
||||
if identity not in active_threads:
|
||||
try:
|
||||
del thread_to_parent_header[identity]
|
||||
except KeyError:
|
||||
pass
|
||||
thread_to_parent = stream._thread_to_parent
|
||||
for identity in list(thread_to_parent.keys()):
|
||||
if identity not in active_threads:
|
||||
try:
|
||||
del thread_to_parent[identity]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
# This exists only for backwards compatibility - use IPythonKernel instead
|
||||
|
||||
|
||||
class Kernel(IPythonKernel):
|
||||
"""DEPRECATED. An alias for the IPython kernel class."""
|
||||
|
||||
def __init__(self, *args, **kwargs): # pragma: no cover
|
||||
"""DEPRECATED."""
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
super().__init__(*args, **kwargs)
|
||||
163
venv/lib/python3.10/site-packages/ipykernel/jsonutil.py
Normal file
163
venv/lib/python3.10/site-packages/ipykernel/jsonutil.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""Utilities to manipulate JSON objects."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import math
|
||||
import numbers
|
||||
import re
|
||||
import types
|
||||
from binascii import b2a_base64
|
||||
from datetime import date, datetime
|
||||
|
||||
from jupyter_client._version import version_info as jupyter_client_version
|
||||
|
||||
next_attr_name = "__next__"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Globals and constants
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# timestamp formats
|
||||
ISO8601 = "%Y-%m-%dT%H:%M:%S.%f"
|
||||
ISO8601_PAT = re.compile(
|
||||
r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?Z?([\+\-]\d{2}:?\d{2})?$"
|
||||
)
|
||||
|
||||
# holy crap, strptime is not threadsafe.
|
||||
# Calling it once at import seems to help.
|
||||
datetime.strptime("1", "%d")
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Classes and functions
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
# constants for identifying png/jpeg data
|
||||
PNG = b"\x89PNG\r\n\x1a\n"
|
||||
# front of PNG base64-encoded
|
||||
PNG64 = b"iVBORw0KG"
|
||||
JPEG = b"\xff\xd8"
|
||||
# front of JPEG base64-encoded
|
||||
JPEG64 = b"/9"
|
||||
# constants for identifying gif data
|
||||
GIF_64 = b"R0lGODdh"
|
||||
GIF89_64 = b"R0lGODlh"
|
||||
# front of PDF base64-encoded
|
||||
PDF64 = b"JVBER"
|
||||
|
||||
JUPYTER_CLIENT_MAJOR_VERSION = jupyter_client_version[0]
|
||||
|
||||
|
||||
def encode_images(format_dict):
|
||||
"""b64-encodes images in a displaypub format dict
|
||||
|
||||
Perhaps this should be handled in json_clean itself?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
format_dict : dict
|
||||
A dictionary of display data keyed by mime-type
|
||||
|
||||
Returns
|
||||
-------
|
||||
format_dict : dict
|
||||
A copy of the same dictionary,
|
||||
but binary image data ('image/png', 'image/jpeg' or 'application/pdf')
|
||||
is base64-encoded.
|
||||
|
||||
"""
|
||||
|
||||
# no need for handling of ambiguous bytestrings on Python 3,
|
||||
# where bytes objects always represent binary data and thus
|
||||
# base64-encoded.
|
||||
return format_dict
|
||||
|
||||
|
||||
def json_clean(obj): # pragma: no cover
|
||||
"""Deprecated, this is a no-op for jupyter-client>=7.
|
||||
|
||||
Clean an object to ensure it's safe to encode in JSON.
|
||||
|
||||
Atomic, immutable objects are returned unmodified. Sets and tuples are
|
||||
converted to lists, lists are copied and dicts are also copied.
|
||||
|
||||
Note: dicts whose keys could cause collisions upon encoding (such as a dict
|
||||
with both the number 1 and the string '1' as keys) will cause a ValueError
|
||||
to be raised.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj : any python object
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : object
|
||||
A version of the input which will not cause an encoding error when
|
||||
encoded as JSON. Note that this function does not *encode* its inputs,
|
||||
it simply sanitizes it so that there will be no encoding errors later.
|
||||
|
||||
"""
|
||||
if int(JUPYTER_CLIENT_MAJOR_VERSION) >= 7:
|
||||
return obj
|
||||
|
||||
# types that are 'atomic' and ok in json as-is.
|
||||
atomic_ok = (str, type(None))
|
||||
|
||||
# containers that we need to convert into lists
|
||||
container_to_list = (tuple, set, types.GeneratorType)
|
||||
|
||||
# Since bools are a subtype of Integrals, which are a subtype of Reals,
|
||||
# we have to check them in that order.
|
||||
|
||||
if isinstance(obj, bool):
|
||||
return obj
|
||||
|
||||
if isinstance(obj, numbers.Integral):
|
||||
# cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)
|
||||
return int(obj)
|
||||
|
||||
if isinstance(obj, numbers.Real):
|
||||
# cast out-of-range floats to their reprs
|
||||
if math.isnan(obj) or math.isinf(obj):
|
||||
return repr(obj)
|
||||
return float(obj)
|
||||
|
||||
if isinstance(obj, atomic_ok):
|
||||
return obj
|
||||
|
||||
if isinstance(obj, bytes):
|
||||
# unanmbiguous binary data is base64-encoded
|
||||
# (this probably should have happened upstream)
|
||||
return b2a_base64(obj).decode("ascii")
|
||||
|
||||
if isinstance(obj, container_to_list) or (
|
||||
hasattr(obj, "__iter__") and hasattr(obj, next_attr_name)
|
||||
):
|
||||
obj = list(obj)
|
||||
|
||||
if isinstance(obj, list):
|
||||
return [json_clean(x) for x in obj]
|
||||
|
||||
if isinstance(obj, dict):
|
||||
# First, validate that the dict won't lose data in conversion due to
|
||||
# key collisions after stringification. This can happen with keys like
|
||||
# True and 'true' or 1 and '1', which collide in JSON.
|
||||
nkeys = len(obj)
|
||||
nkeys_collapsed = len(set(map(str, obj)))
|
||||
if nkeys != nkeys_collapsed:
|
||||
msg = (
|
||||
"dict cannot be safely converted to JSON: "
|
||||
"key collision would lead to dropped values"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
# If all OK, proceed by making the new dict that will be json-safe
|
||||
out = {}
|
||||
for k, v in obj.items():
|
||||
out[str(k)] = json_clean(v)
|
||||
return out
|
||||
if isinstance(obj, (datetime, date)):
|
||||
return obj.strftime(ISO8601)
|
||||
|
||||
# we don't understand it, it's probably an unserializable object
|
||||
raise ValueError("Can't clean for JSON: %r" % obj)
|
||||
755
venv/lib/python3.10/site-packages/ipykernel/kernelapp.py
Normal file
755
venv/lib/python3.10/site-packages/ipykernel/kernelapp.py
Normal file
@@ -0,0 +1,755 @@
|
||||
"""An Application for launching a kernel"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
from __future__ import annotations
|
||||
|
||||
import atexit
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
import typing as t
|
||||
from functools import partial
|
||||
from io import FileIO, TextIOWrapper
|
||||
from logging import StreamHandler
|
||||
from pathlib import Path
|
||||
|
||||
import zmq
|
||||
from IPython.core.application import ( # type:ignore[attr-defined]
|
||||
BaseIPythonApplication,
|
||||
base_aliases,
|
||||
base_flags,
|
||||
catch_config_error,
|
||||
)
|
||||
from IPython.core.profiledir import ProfileDir
|
||||
from IPython.core.shellapp import InteractiveShellApp, shell_aliases, shell_flags
|
||||
from jupyter_client.connect import ConnectionFileMixin
|
||||
from jupyter_client.session import Session, session_aliases, session_flags
|
||||
from jupyter_core.paths import jupyter_runtime_dir
|
||||
from tornado import ioloop
|
||||
from traitlets.traitlets import (
|
||||
Any,
|
||||
Bool,
|
||||
Dict,
|
||||
DottedObjectName,
|
||||
Instance,
|
||||
Integer,
|
||||
Type,
|
||||
Unicode,
|
||||
default,
|
||||
)
|
||||
from traitlets.utils import filefind
|
||||
from traitlets.utils.importstring import import_item
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
from .connect import get_connection_info, write_connection_file
|
||||
|
||||
# local imports
|
||||
from .control import ControlThread
|
||||
from .heartbeat import Heartbeat
|
||||
from .iostream import IOPubThread
|
||||
from .ipkernel import IPythonKernel
|
||||
from .parentpoller import ParentPollerUnix, ParentPollerWindows
|
||||
from .zmqshell import ZMQInteractiveShell
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Flags and Aliases
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
kernel_aliases = dict(base_aliases)
|
||||
kernel_aliases.update(
|
||||
{
|
||||
"ip": "IPKernelApp.ip",
|
||||
"hb": "IPKernelApp.hb_port",
|
||||
"shell": "IPKernelApp.shell_port",
|
||||
"iopub": "IPKernelApp.iopub_port",
|
||||
"stdin": "IPKernelApp.stdin_port",
|
||||
"control": "IPKernelApp.control_port",
|
||||
"f": "IPKernelApp.connection_file",
|
||||
"transport": "IPKernelApp.transport",
|
||||
}
|
||||
)
|
||||
|
||||
kernel_flags = dict(base_flags)
|
||||
kernel_flags.update(
|
||||
{
|
||||
"no-stdout": ({"IPKernelApp": {"no_stdout": True}}, "redirect stdout to the null device"),
|
||||
"no-stderr": ({"IPKernelApp": {"no_stderr": True}}, "redirect stderr to the null device"),
|
||||
"pylab": (
|
||||
{"IPKernelApp": {"pylab": "auto"}},
|
||||
"""Pre-load matplotlib and numpy for interactive use with
|
||||
the default matplotlib backend.""",
|
||||
),
|
||||
"trio-loop": (
|
||||
{"InteractiveShell": {"trio_loop": False}},
|
||||
"Enable Trio as main event loop.",
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
# inherit flags&aliases for any IPython shell apps
|
||||
kernel_aliases.update(shell_aliases)
|
||||
kernel_flags.update(shell_flags)
|
||||
|
||||
# inherit flags&aliases for Sessions
|
||||
kernel_aliases.update(session_aliases)
|
||||
kernel_flags.update(session_flags)
|
||||
|
||||
_ctrl_c_message = """\
|
||||
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
|
||||
|
||||
To exit, you will have to explicitly quit this process, by either sending
|
||||
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
|
||||
|
||||
To read more about this, see https://github.com/ipython/ipython/issues/2049
|
||||
|
||||
"""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Application class for starting an IPython Kernel
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp, ConnectionFileMixin):
|
||||
"""The IPYKernel application class."""
|
||||
|
||||
name = "ipython-kernel"
|
||||
aliases = Dict(kernel_aliases) # type:ignore[assignment]
|
||||
flags = Dict(kernel_flags) # type:ignore[assignment]
|
||||
classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session]
|
||||
# the kernel class, as an importstring
|
||||
kernel_class = Type(
|
||||
"ipykernel.ipkernel.IPythonKernel",
|
||||
klass="ipykernel.kernelbase.Kernel",
|
||||
help="""The Kernel subclass to be used.
|
||||
|
||||
This should allow easy reuse of the IPKernelApp entry point
|
||||
to configure and launch kernels other than IPython's own.
|
||||
""",
|
||||
).tag(config=True)
|
||||
kernel = Any()
|
||||
poller = Any() # don't restrict this even though current pollers are all Threads
|
||||
heartbeat = Instance(Heartbeat, allow_none=True)
|
||||
|
||||
context: zmq.Context[t.Any] | None = Any() # type:ignore[assignment]
|
||||
shell_socket = Any()
|
||||
control_socket = Any()
|
||||
debugpy_socket = Any()
|
||||
debug_shell_socket = Any()
|
||||
stdin_socket = Any()
|
||||
iopub_socket = Any()
|
||||
iopub_thread = Any()
|
||||
control_thread = Any()
|
||||
|
||||
_ports = Dict()
|
||||
|
||||
subcommands = {
|
||||
"install": (
|
||||
"ipykernel.kernelspec.InstallIPythonKernelSpecApp",
|
||||
"Install the IPython kernel",
|
||||
),
|
||||
}
|
||||
|
||||
# connection info:
|
||||
connection_dir = Unicode()
|
||||
|
||||
@default("connection_dir")
|
||||
def _default_connection_dir(self):
|
||||
return jupyter_runtime_dir()
|
||||
|
||||
@property
|
||||
def abs_connection_file(self):
|
||||
if Path(self.connection_file).name == self.connection_file and self.connection_dir:
|
||||
return str(Path(str(self.connection_dir)) / self.connection_file)
|
||||
return self.connection_file
|
||||
|
||||
# streams, etc.
|
||||
no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True)
|
||||
no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True)
|
||||
trio_loop = Bool(False, help="Set main event loop.").tag(config=True)
|
||||
quiet = Bool(True, help="Only send stdout/stderr to output stream").tag(config=True)
|
||||
outstream_class = DottedObjectName(
|
||||
"ipykernel.iostream.OutStream",
|
||||
help="The importstring for the OutStream factory",
|
||||
allow_none=True,
|
||||
).tag(config=True)
|
||||
displayhook_class = DottedObjectName(
|
||||
"ipykernel.displayhook.ZMQDisplayHook", help="The importstring for the DisplayHook factory"
|
||||
).tag(config=True)
|
||||
|
||||
capture_fd_output = Bool(
|
||||
True,
|
||||
help="""Attempt to capture and forward low-level output, e.g. produced by Extension libraries.
|
||||
""",
|
||||
).tag(config=True)
|
||||
|
||||
# polling
|
||||
parent_handle = Integer(
|
||||
int(os.environ.get("JPY_PARENT_PID") or 0),
|
||||
help="""kill this process if its parent dies. On Windows, the argument
|
||||
specifies the HANDLE of the parent process, otherwise it is simply boolean.
|
||||
""",
|
||||
).tag(config=True)
|
||||
interrupt = Integer(
|
||||
int(os.environ.get("JPY_INTERRUPT_EVENT") or 0),
|
||||
help="""ONLY USED ON WINDOWS
|
||||
Interrupt this process when the parent is signaled.
|
||||
""",
|
||||
).tag(config=True)
|
||||
|
||||
def init_crash_handler(self):
|
||||
"""Initialize the crash handler."""
|
||||
sys.excepthook = self.excepthook
|
||||
|
||||
def excepthook(self, etype, evalue, tb):
|
||||
"""Handle an exception."""
|
||||
# write uncaught traceback to 'real' stderr, not zmq-forwarder
|
||||
traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)
|
||||
|
||||
def init_poller(self):
|
||||
"""Initialize the poller."""
|
||||
if sys.platform == "win32":
|
||||
if self.interrupt or self.parent_handle:
|
||||
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
|
||||
elif self.parent_handle and self.parent_handle != 1:
|
||||
# PID 1 (init) is special and will never go away,
|
||||
# only be reassigned.
|
||||
# Parent polling doesn't work if ppid == 1 to start with.
|
||||
self.poller = ParentPollerUnix()
|
||||
|
||||
def _try_bind_socket(self, s, port):
|
||||
iface = f"{self.transport}://{self.ip}"
|
||||
if self.transport == "tcp":
|
||||
if port <= 0:
|
||||
port = s.bind_to_random_port(iface)
|
||||
else:
|
||||
s.bind("tcp://%s:%i" % (self.ip, port))
|
||||
elif self.transport == "ipc":
|
||||
if port <= 0:
|
||||
port = 1
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
while Path(path).exists():
|
||||
port = port + 1
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
else:
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
s.bind("ipc://%s" % path)
|
||||
return port
|
||||
|
||||
def _bind_socket(self, s, port):
|
||||
try:
|
||||
win_in_use = errno.WSAEADDRINUSE # type:ignore[attr-defined]
|
||||
except AttributeError:
|
||||
win_in_use = None
|
||||
|
||||
# Try up to 100 times to bind a port when in conflict to avoid
|
||||
# infinite attempts in bad setups
|
||||
max_attempts = 1 if port else 100
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
return self._try_bind_socket(s, port)
|
||||
except zmq.ZMQError as ze:
|
||||
# Raise if we have any error not related to socket binding
|
||||
if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use:
|
||||
raise
|
||||
if attempt == max_attempts - 1:
|
||||
raise
|
||||
return None
|
||||
|
||||
def write_connection_file(self):
|
||||
"""write connection info to JSON file"""
|
||||
cf = self.abs_connection_file
|
||||
connection_info = dict(
|
||||
ip=self.ip,
|
||||
key=self.session.key,
|
||||
transport=self.transport,
|
||||
shell_port=self.shell_port,
|
||||
stdin_port=self.stdin_port,
|
||||
hb_port=self.hb_port,
|
||||
iopub_port=self.iopub_port,
|
||||
control_port=self.control_port,
|
||||
)
|
||||
if Path(cf).exists():
|
||||
# If the file exists, merge our info into it. For example, if the
|
||||
# original file had port number 0, we update with the actual port
|
||||
# used.
|
||||
existing_connection_info = get_connection_info(cf, unpack=True)
|
||||
assert isinstance(existing_connection_info, dict)
|
||||
connection_info = dict(existing_connection_info, **connection_info)
|
||||
if connection_info == existing_connection_info:
|
||||
self.log.debug("Connection file %s with current information already exists", cf)
|
||||
return
|
||||
|
||||
self.log.debug("Writing connection file: %s", cf)
|
||||
|
||||
write_connection_file(cf, **connection_info)
|
||||
|
||||
def cleanup_connection_file(self):
|
||||
"""Clean up our connection file."""
|
||||
cf = self.abs_connection_file
|
||||
self.log.debug("Cleaning up connection file: %s", cf)
|
||||
try:
|
||||
Path(cf).unlink()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
self.cleanup_ipc_files()
|
||||
|
||||
def init_connection_file(self):
|
||||
"""Initialize our connection file."""
|
||||
if not self.connection_file:
|
||||
self.connection_file = "kernel-%s.json" % os.getpid()
|
||||
try:
|
||||
self.connection_file = filefind(self.connection_file, [".", self.connection_dir])
|
||||
except OSError:
|
||||
self.log.debug("Connection file not found: %s", self.connection_file)
|
||||
# This means I own it, and I'll create it in this directory:
|
||||
Path(self.abs_connection_file).parent.mkdir(mode=0o700, exist_ok=True, parents=True)
|
||||
# Also, I will clean it up:
|
||||
atexit.register(self.cleanup_connection_file)
|
||||
return
|
||||
try:
|
||||
self.load_connection_file()
|
||||
except Exception:
|
||||
self.log.error( # noqa: G201
|
||||
"Failed to load connection file: %r", self.connection_file, exc_info=True
|
||||
)
|
||||
self.exit(1)
|
||||
|
||||
def init_sockets(self):
|
||||
"""Create a context, a session, and the kernel sockets."""
|
||||
self.log.info("Starting the kernel at pid: %i", os.getpid())
|
||||
assert self.context is None, "init_sockets cannot be called twice!"
|
||||
self.context = context = zmq.Context()
|
||||
atexit.register(self.close)
|
||||
|
||||
self.shell_socket = context.socket(zmq.ROUTER)
|
||||
self.shell_socket.linger = 1000
|
||||
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
|
||||
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
|
||||
|
||||
self.stdin_socket = context.socket(zmq.ROUTER)
|
||||
self.stdin_socket.linger = 1000
|
||||
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
|
||||
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
|
||||
|
||||
if hasattr(zmq, "ROUTER_HANDOVER"):
|
||||
# set router-handover to workaround zeromq reconnect problems
|
||||
# in certain rare circumstances
|
||||
# see ipython/ipykernel#270 and zeromq/libzmq#2892
|
||||
self.shell_socket.router_handover = self.stdin_socket.router_handover = 1
|
||||
|
||||
self.init_control(context)
|
||||
self.init_iopub(context)
|
||||
|
||||
def init_control(self, context):
|
||||
"""Initialize the control channel."""
|
||||
self.control_socket = context.socket(zmq.ROUTER)
|
||||
self.control_socket.linger = 1000
|
||||
self.control_port = self._bind_socket(self.control_socket, self.control_port)
|
||||
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
|
||||
|
||||
self.debugpy_socket = context.socket(zmq.STREAM)
|
||||
self.debugpy_socket.linger = 1000
|
||||
|
||||
self.debug_shell_socket = context.socket(zmq.DEALER)
|
||||
self.debug_shell_socket.linger = 1000
|
||||
if self.shell_socket.getsockopt(zmq.LAST_ENDPOINT):
|
||||
self.debug_shell_socket.connect(self.shell_socket.getsockopt(zmq.LAST_ENDPOINT))
|
||||
|
||||
if hasattr(zmq, "ROUTER_HANDOVER"):
|
||||
# set router-handover to workaround zeromq reconnect problems
|
||||
# in certain rare circumstances
|
||||
# see ipython/ipykernel#270 and zeromq/libzmq#2892
|
||||
self.control_socket.router_handover = 1
|
||||
|
||||
self.control_thread = ControlThread(daemon=True)
|
||||
|
||||
def init_iopub(self, context):
|
||||
"""Initialize the iopub channel."""
|
||||
self.iopub_socket = context.socket(zmq.PUB)
|
||||
self.iopub_socket.linger = 1000
|
||||
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
|
||||
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
|
||||
self.configure_tornado_logger()
|
||||
self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
|
||||
self.iopub_thread.start()
|
||||
# backward-compat: wrap iopub socket API in background thread
|
||||
self.iopub_socket = self.iopub_thread.background_socket
|
||||
|
||||
def init_heartbeat(self):
|
||||
"""start the heart beating"""
|
||||
# heartbeat doesn't share context, because it mustn't be blocked
|
||||
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
|
||||
hb_ctx = zmq.Context()
|
||||
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
|
||||
self.hb_port = self.heartbeat.port
|
||||
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
|
||||
self.heartbeat.start()
|
||||
|
||||
def close(self):
|
||||
"""Close zmq sockets in an orderly fashion"""
|
||||
# un-capture IO before we start closing channels
|
||||
self.reset_io()
|
||||
self.log.info("Cleaning up sockets")
|
||||
if self.heartbeat:
|
||||
self.log.debug("Closing heartbeat channel")
|
||||
self.heartbeat.context.term()
|
||||
if self.iopub_thread:
|
||||
self.log.debug("Closing iopub channel")
|
||||
self.iopub_thread.stop()
|
||||
self.iopub_thread.close()
|
||||
if self.control_thread and self.control_thread.is_alive():
|
||||
self.log.debug("Closing control thread")
|
||||
self.control_thread.stop()
|
||||
self.control_thread.join()
|
||||
|
||||
if self.debugpy_socket and not self.debugpy_socket.closed:
|
||||
self.debugpy_socket.close()
|
||||
if self.debug_shell_socket and not self.debug_shell_socket.closed:
|
||||
self.debug_shell_socket.close()
|
||||
|
||||
for channel in ("shell", "control", "stdin"):
|
||||
self.log.debug("Closing %s channel", channel)
|
||||
socket = getattr(self, channel + "_socket", None)
|
||||
if socket and not socket.closed:
|
||||
socket.close()
|
||||
self.log.debug("Terminating zmq context")
|
||||
if self.context:
|
||||
self.context.term()
|
||||
self.log.debug("Terminated zmq context")
|
||||
|
||||
def log_connection_info(self):
|
||||
"""display connection info, and store ports"""
|
||||
basename = Path(self.connection_file).name
|
||||
if (
|
||||
basename == self.connection_file
|
||||
or str(Path(self.connection_file).parent) == self.connection_dir
|
||||
):
|
||||
# use shortname
|
||||
tail = basename
|
||||
else:
|
||||
tail = self.connection_file
|
||||
lines = [
|
||||
"To connect another client to this kernel, use:",
|
||||
" --existing %s" % tail,
|
||||
]
|
||||
# log connection info
|
||||
# info-level, so often not shown.
|
||||
# frontends should use the %connect_info magic
|
||||
# to see the connection info
|
||||
for line in lines:
|
||||
self.log.info(line)
|
||||
# also raw print to the terminal if no parent_handle (`ipython kernel`)
|
||||
# unless log-level is CRITICAL (--quiet)
|
||||
if not self.parent_handle and int(self.log_level) < logging.CRITICAL: # type:ignore[call-overload]
|
||||
print(_ctrl_c_message, file=sys.__stdout__)
|
||||
for line in lines:
|
||||
print(line, file=sys.__stdout__)
|
||||
|
||||
self._ports = dict(
|
||||
shell=self.shell_port,
|
||||
iopub=self.iopub_port,
|
||||
stdin=self.stdin_port,
|
||||
hb=self.hb_port,
|
||||
control=self.control_port,
|
||||
)
|
||||
|
||||
def init_blackhole(self):
|
||||
"""redirects stdout/stderr to devnull if necessary"""
|
||||
if self.no_stdout or self.no_stderr:
|
||||
blackhole = open(os.devnull, "w") # noqa: SIM115
|
||||
if self.no_stdout:
|
||||
sys.stdout = sys.__stdout__ = blackhole # type:ignore[misc]
|
||||
if self.no_stderr:
|
||||
sys.stderr = sys.__stderr__ = blackhole # type:ignore[misc]
|
||||
|
||||
def init_io(self):
|
||||
"""Redirect input streams and set a display hook."""
|
||||
if self.outstream_class:
|
||||
outstream_factory = import_item(str(self.outstream_class))
|
||||
if sys.stdout is not None:
|
||||
sys.stdout.flush()
|
||||
|
||||
e_stdout = None if self.quiet else sys.__stdout__
|
||||
e_stderr = None if self.quiet else sys.__stderr__
|
||||
|
||||
if not self.capture_fd_output:
|
||||
outstream_factory = partial(outstream_factory, watchfd=False)
|
||||
|
||||
sys.stdout = outstream_factory(self.session, self.iopub_thread, "stdout", echo=e_stdout)
|
||||
if sys.stderr is not None:
|
||||
sys.stderr.flush()
|
||||
sys.stderr = outstream_factory(self.session, self.iopub_thread, "stderr", echo=e_stderr)
|
||||
if hasattr(sys.stderr, "_original_stdstream_copy"):
|
||||
for handler in self.log.handlers:
|
||||
if isinstance(handler, StreamHandler) and (handler.stream.buffer.fileno() == 2):
|
||||
self.log.debug("Seeing logger to stderr, rerouting to raw filedescriptor.")
|
||||
|
||||
handler.stream = TextIOWrapper(
|
||||
FileIO(
|
||||
sys.stderr._original_stdstream_copy,
|
||||
"w",
|
||||
)
|
||||
)
|
||||
if self.displayhook_class:
|
||||
displayhook_factory = import_item(str(self.displayhook_class))
|
||||
self.displayhook = displayhook_factory(self.session, self.iopub_socket)
|
||||
sys.displayhook = self.displayhook
|
||||
|
||||
self.patch_io()
|
||||
|
||||
def reset_io(self):
|
||||
"""restore original io
|
||||
|
||||
restores state after init_io
|
||||
"""
|
||||
sys.stdout = sys.__stdout__
|
||||
sys.stderr = sys.__stderr__
|
||||
sys.displayhook = sys.__displayhook__
|
||||
|
||||
def patch_io(self):
|
||||
"""Patch important libraries that can't handle sys.stdout forwarding"""
|
||||
try:
|
||||
import faulthandler
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible
|
||||
# updates to the upstream API and update accordingly (up-to-date as of Python 3.5):
|
||||
# https://docs.python.org/3/library/faulthandler.html#faulthandler.enable
|
||||
|
||||
# change default file to __stderr__ from forwarded stderr
|
||||
faulthandler_enable = faulthandler.enable
|
||||
|
||||
def enable(file=sys.__stderr__, all_threads=True, **kwargs):
|
||||
return faulthandler_enable(file=file, all_threads=all_threads, **kwargs)
|
||||
|
||||
faulthandler.enable = enable
|
||||
|
||||
if hasattr(faulthandler, "register"):
|
||||
faulthandler_register = faulthandler.register
|
||||
|
||||
def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs):
|
||||
return faulthandler_register(
|
||||
signum, file=file, all_threads=all_threads, chain=chain, **kwargs
|
||||
)
|
||||
|
||||
faulthandler.register = register
|
||||
|
||||
def init_signal(self):
|
||||
"""Initialize the signal handler."""
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def init_kernel(self):
|
||||
"""Create the Kernel object itself"""
|
||||
shell_stream = ZMQStream(self.shell_socket)
|
||||
control_stream = ZMQStream(self.control_socket, self.control_thread.io_loop)
|
||||
debugpy_stream = ZMQStream(self.debugpy_socket, self.control_thread.io_loop)
|
||||
self.control_thread.start()
|
||||
kernel_factory = self.kernel_class.instance # type:ignore[attr-defined]
|
||||
|
||||
kernel = kernel_factory(
|
||||
parent=self,
|
||||
session=self.session,
|
||||
control_stream=control_stream,
|
||||
debugpy_stream=debugpy_stream,
|
||||
debug_shell_socket=self.debug_shell_socket,
|
||||
shell_stream=shell_stream,
|
||||
control_thread=self.control_thread,
|
||||
iopub_thread=self.iopub_thread,
|
||||
iopub_socket=self.iopub_socket,
|
||||
stdin_socket=self.stdin_socket,
|
||||
log=self.log,
|
||||
profile_dir=self.profile_dir,
|
||||
user_ns=self.user_ns,
|
||||
)
|
||||
kernel.record_ports({name + "_port": port for name, port in self._ports.items()})
|
||||
self.kernel = kernel
|
||||
|
||||
# Allow the displayhook to get the execution count
|
||||
self.displayhook.get_execution_count = lambda: kernel.execution_count
|
||||
|
||||
def init_gui_pylab(self):
|
||||
"""Enable GUI event loop integration, taking pylab into account."""
|
||||
|
||||
# Register inline backend as default
|
||||
# this is higher priority than matplotlibrc,
|
||||
# but lower priority than anything else (mpl.use() for instance).
|
||||
# This only affects matplotlib >= 1.5
|
||||
if not os.environ.get("MPLBACKEND"):
|
||||
os.environ["MPLBACKEND"] = "module://matplotlib_inline.backend_inline"
|
||||
|
||||
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
|
||||
# to ensure that any exception is printed straight to stderr.
|
||||
# Normally _showtraceback associates the reply with an execution,
|
||||
# which means frontends will never draw it, as this exception
|
||||
# is not associated with any execute request.
|
||||
|
||||
shell = self.shell
|
||||
assert shell is not None
|
||||
_showtraceback = shell._showtraceback
|
||||
try:
|
||||
# replace error-sending traceback with stderr
|
||||
def print_tb(etype, evalue, stb):
|
||||
print("GUI event loop or pylab initialization failed", file=sys.stderr)
|
||||
assert shell is not None
|
||||
print(shell.InteractiveTB.stb2text(stb), file=sys.stderr)
|
||||
|
||||
shell._showtraceback = print_tb
|
||||
InteractiveShellApp.init_gui_pylab(self)
|
||||
finally:
|
||||
shell._showtraceback = _showtraceback
|
||||
|
||||
def init_shell(self):
|
||||
"""Initialize the shell channel."""
|
||||
self.shell = getattr(self.kernel, "shell", None)
|
||||
if self.shell:
|
||||
self.shell.configurables.append(self)
|
||||
|
||||
def configure_tornado_logger(self):
|
||||
"""Configure the tornado logging.Logger.
|
||||
|
||||
Must set up the tornado logger or else tornado will call
|
||||
basicConfig for the root logger which makes the root logger
|
||||
go to the real sys.stderr instead of the capture streams.
|
||||
This function mimics the setup of logging.basicConfig.
|
||||
"""
|
||||
logger = logging.getLogger("tornado")
|
||||
handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter(logging.BASIC_FORMAT)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
def _init_asyncio_patch(self):
|
||||
"""set default asyncio policy to be compatible with tornado
|
||||
|
||||
Tornado 6 (at least) is not compatible with the default
|
||||
asyncio implementation on Windows
|
||||
|
||||
Pick the older SelectorEventLoopPolicy on Windows
|
||||
if the known-incompatible default policy is in use.
|
||||
|
||||
Support for Proactor via a background thread is available in tornado 6.1,
|
||||
but it is still preferable to run the Selector in the main thread
|
||||
instead of the background.
|
||||
|
||||
do this as early as possible to make it a low priority and overridable
|
||||
|
||||
ref: https://github.com/tornadoweb/tornado/issues/2608
|
||||
|
||||
FIXME: if/when tornado supports the defaults in asyncio without threads,
|
||||
remove and bump tornado requirement for py38.
|
||||
Most likely, this will mean a new Python version
|
||||
where asyncio.ProactorEventLoop supports add_reader and friends.
|
||||
|
||||
"""
|
||||
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
from asyncio import WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy
|
||||
except ImportError:
|
||||
pass
|
||||
# not affected
|
||||
else:
|
||||
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
|
||||
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
|
||||
# fallback to the pre-3.8 default of Selector
|
||||
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
|
||||
|
||||
def init_pdb(self):
|
||||
"""Replace pdb with IPython's version that is interruptible.
|
||||
|
||||
With the non-interruptible version, stopping pdb() locks up the kernel in a
|
||||
non-recoverable state.
|
||||
"""
|
||||
import pdb
|
||||
|
||||
from IPython.core import debugger
|
||||
|
||||
if hasattr(debugger, "InterruptiblePdb"):
|
||||
# Only available in newer IPython releases:
|
||||
debugger.Pdb = debugger.InterruptiblePdb # type:ignore[misc]
|
||||
pdb.Pdb = debugger.Pdb # type:ignore[assignment,misc]
|
||||
pdb.set_trace = debugger.set_trace
|
||||
|
||||
@catch_config_error
|
||||
def initialize(self, argv=None):
|
||||
"""Initialize the application."""
|
||||
self._init_asyncio_patch()
|
||||
super().initialize(argv)
|
||||
if self.subapp is not None:
|
||||
return
|
||||
|
||||
self.init_pdb()
|
||||
self.init_blackhole()
|
||||
self.init_connection_file()
|
||||
self.init_poller()
|
||||
self.init_sockets()
|
||||
self.init_heartbeat()
|
||||
# writing/displaying connection info must be *after* init_sockets/heartbeat
|
||||
self.write_connection_file()
|
||||
# Log connection info after writing connection file, so that the connection
|
||||
# file is definitely available at the time someone reads the log.
|
||||
self.log_connection_info()
|
||||
self.init_io()
|
||||
try:
|
||||
self.init_signal()
|
||||
except Exception:
|
||||
# Catch exception when initializing signal fails, eg when running the
|
||||
# kernel on a separate thread
|
||||
if int(self.log_level) < logging.CRITICAL: # type:ignore[call-overload]
|
||||
self.log.error("Unable to initialize signal:", exc_info=True) # noqa: G201
|
||||
self.init_kernel()
|
||||
# shell init steps
|
||||
self.init_path()
|
||||
self.init_shell()
|
||||
if self.shell:
|
||||
self.init_gui_pylab()
|
||||
self.init_extensions()
|
||||
self.init_code()
|
||||
# flush stdout/stderr, so that anything written to these streams during
|
||||
# initialization do not get associated with the first execution request
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
def start(self):
|
||||
"""Start the application."""
|
||||
if self.subapp is not None:
|
||||
return self.subapp.start()
|
||||
if self.poller is not None:
|
||||
self.poller.start()
|
||||
self.kernel.start()
|
||||
self.io_loop = ioloop.IOLoop.current()
|
||||
if self.trio_loop:
|
||||
from ipykernel.trio_runner import TrioRunner
|
||||
|
||||
tr = TrioRunner()
|
||||
tr.initialize(self.kernel, self.io_loop)
|
||||
try:
|
||||
tr.run()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
self.io_loop.start()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
launch_new_instance = IPKernelApp.launch_instance
|
||||
|
||||
|
||||
def main(): # pragma: no cover
|
||||
"""Run an IPKernel as an application"""
|
||||
app = IPKernelApp.instance()
|
||||
app.initialize()
|
||||
app.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1407
venv/lib/python3.10/site-packages/ipykernel/kernelbase.py
Normal file
1407
venv/lib/python3.10/site-packages/ipykernel/kernelbase.py
Normal file
File diff suppressed because it is too large
Load Diff
263
venv/lib/python3.10/site-packages/ipykernel/kernelspec.py
Normal file
263
venv/lib/python3.10/site-packages/ipykernel/kernelspec.py
Normal file
@@ -0,0 +1,263 @@
|
||||
"""The IPython kernel spec for Jupyter"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import stat
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from jupyter_client.kernelspec import KernelSpecManager
|
||||
from traitlets import Unicode
|
||||
|
||||
try:
|
||||
from .debugger import _is_debugpy_available
|
||||
except ImportError:
|
||||
_is_debugpy_available = False
|
||||
|
||||
pjoin = os.path.join
|
||||
|
||||
KERNEL_NAME = "python%i" % sys.version_info[0]
|
||||
|
||||
# path to kernelspec resources
|
||||
RESOURCES = pjoin(Path(__file__).parent, "resources")
|
||||
|
||||
|
||||
def make_ipkernel_cmd(
|
||||
mod: str = "ipykernel_launcher",
|
||||
executable: str | None = None,
|
||||
extra_arguments: list[str] | None = None,
|
||||
) -> list[str]:
|
||||
"""Build Popen command list for launching an IPython kernel.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mod : str, optional (default 'ipykernel')
|
||||
A string of an IPython module whose __main__ starts an IPython kernel
|
||||
executable : str, optional (default sys.executable)
|
||||
The Python executable to use for the kernel process.
|
||||
extra_arguments : list, optional
|
||||
A list of extra arguments to pass when executing the launch code.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A Popen command list
|
||||
"""
|
||||
if executable is None:
|
||||
executable = sys.executable
|
||||
extra_arguments = extra_arguments or []
|
||||
arguments = [executable, "-m", mod, "-f", "{connection_file}"]
|
||||
arguments.extend(extra_arguments)
|
||||
|
||||
return arguments
|
||||
|
||||
|
||||
def get_kernel_dict(extra_arguments: list[str] | None = None) -> dict[str, Any]:
|
||||
"""Construct dict for kernel.json"""
|
||||
return {
|
||||
"argv": make_ipkernel_cmd(extra_arguments=extra_arguments),
|
||||
"display_name": "Python %i (ipykernel)" % sys.version_info[0],
|
||||
"language": "python",
|
||||
"metadata": {"debugger": True},
|
||||
}
|
||||
|
||||
|
||||
def write_kernel_spec(
|
||||
path: Path | str | None = None,
|
||||
overrides: dict[str, Any] | None = None,
|
||||
extra_arguments: list[str] | None = None,
|
||||
) -> str:
|
||||
"""Write a kernel spec directory to `path`
|
||||
|
||||
If `path` is not specified, a temporary directory is created.
|
||||
If `overrides` is given, the kernelspec JSON is updated before writing.
|
||||
|
||||
The path to the kernelspec is always returned.
|
||||
"""
|
||||
if path is None:
|
||||
path = Path(tempfile.mkdtemp(suffix="_kernels")) / KERNEL_NAME
|
||||
|
||||
# stage resources
|
||||
shutil.copytree(RESOURCES, path)
|
||||
|
||||
# ensure path is writable
|
||||
mask = Path(path).stat().st_mode
|
||||
if not mask & stat.S_IWUSR:
|
||||
Path(path).chmod(mask | stat.S_IWUSR)
|
||||
|
||||
# write kernel.json
|
||||
kernel_dict = get_kernel_dict(extra_arguments)
|
||||
|
||||
if overrides:
|
||||
kernel_dict.update(overrides)
|
||||
with open(pjoin(path, "kernel.json"), "w") as f:
|
||||
json.dump(kernel_dict, f, indent=1)
|
||||
|
||||
return str(path)
|
||||
|
||||
|
||||
def install(
|
||||
kernel_spec_manager: KernelSpecManager | None = None,
|
||||
user: bool = False,
|
||||
kernel_name: str = KERNEL_NAME,
|
||||
display_name: str | None = None,
|
||||
prefix: str | None = None,
|
||||
profile: str | None = None,
|
||||
env: dict[str, str] | None = None,
|
||||
) -> str:
|
||||
"""Install the IPython kernelspec for Jupyter
|
||||
|
||||
Parameters
|
||||
----------
|
||||
kernel_spec_manager : KernelSpecManager [optional]
|
||||
A KernelSpecManager to use for installation.
|
||||
If none provided, a default instance will be created.
|
||||
user : bool [default: False]
|
||||
Whether to do a user-only install, or system-wide.
|
||||
kernel_name : str, optional
|
||||
Specify a name for the kernelspec.
|
||||
This is needed for having multiple IPython kernels for different environments.
|
||||
display_name : str, optional
|
||||
Specify the display name for the kernelspec
|
||||
profile : str, optional
|
||||
Specify a custom profile to be loaded by the kernel.
|
||||
prefix : str, optional
|
||||
Specify an install prefix for the kernelspec.
|
||||
This is needed to install into a non-default location, such as a conda/virtual-env.
|
||||
env : dict, optional
|
||||
A dictionary of extra environment variables for the kernel.
|
||||
These will be added to the current environment variables before the
|
||||
kernel is started
|
||||
|
||||
Returns
|
||||
-------
|
||||
The path where the kernelspec was installed.
|
||||
"""
|
||||
if kernel_spec_manager is None:
|
||||
kernel_spec_manager = KernelSpecManager()
|
||||
|
||||
if (kernel_name != KERNEL_NAME) and (display_name is None):
|
||||
# kernel_name is specified and display_name is not
|
||||
# default display_name to kernel_name
|
||||
display_name = kernel_name
|
||||
overrides: dict[str, Any] = {}
|
||||
if display_name:
|
||||
overrides["display_name"] = display_name
|
||||
if profile:
|
||||
extra_arguments = ["--profile", profile]
|
||||
if not display_name:
|
||||
# add the profile to the default display name
|
||||
overrides["display_name"] = "Python %i [profile=%s]" % (sys.version_info[0], profile)
|
||||
else:
|
||||
extra_arguments = None
|
||||
if env:
|
||||
overrides["env"] = env
|
||||
path = write_kernel_spec(overrides=overrides, extra_arguments=extra_arguments)
|
||||
dest = kernel_spec_manager.install_kernel_spec(
|
||||
path, kernel_name=kernel_name, user=user, prefix=prefix
|
||||
)
|
||||
# cleanup afterward
|
||||
shutil.rmtree(path)
|
||||
return dest
|
||||
|
||||
|
||||
# Entrypoint
|
||||
|
||||
from traitlets.config import Application
|
||||
|
||||
|
||||
class InstallIPythonKernelSpecApp(Application):
|
||||
"""Dummy app wrapping argparse"""
|
||||
|
||||
name = Unicode("ipython-kernel-install")
|
||||
|
||||
def initialize(self, argv: list[str] | None = None) -> None:
|
||||
"""Initialize the app."""
|
||||
if argv is None:
|
||||
argv = sys.argv[1:]
|
||||
self.argv = argv
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start the app."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
prog=self.name, description="Install the IPython kernel spec."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--user",
|
||||
action="store_true",
|
||||
help="Install for the current user instead of system-wide",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--name",
|
||||
type=str,
|
||||
default=KERNEL_NAME,
|
||||
help="Specify a name for the kernelspec."
|
||||
" This is needed to have multiple IPython kernels at the same time.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--display-name",
|
||||
type=str,
|
||||
help="Specify the display name for the kernelspec."
|
||||
" This is helpful when you have multiple IPython kernels.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--profile",
|
||||
type=str,
|
||||
help="Specify an IPython profile to load. "
|
||||
"This can be used to create custom versions of the kernel.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prefix",
|
||||
type=str,
|
||||
help="Specify an install prefix for the kernelspec."
|
||||
" This is needed to install into a non-default location, such as a conda/virtual-env.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sys-prefix",
|
||||
action="store_const",
|
||||
const=sys.prefix,
|
||||
dest="prefix",
|
||||
help="Install to Python's sys.prefix."
|
||||
" Shorthand for --prefix='%s'. For use in conda/virtual-envs." % sys.prefix,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--env",
|
||||
action="append",
|
||||
nargs=2,
|
||||
metavar=("ENV", "VALUE"),
|
||||
help="Set environment variables for the kernel.",
|
||||
)
|
||||
opts = parser.parse_args(self.argv)
|
||||
if opts.env:
|
||||
opts.env = dict(opts.env)
|
||||
try:
|
||||
dest = install(
|
||||
user=opts.user,
|
||||
kernel_name=opts.name,
|
||||
profile=opts.profile,
|
||||
prefix=opts.prefix,
|
||||
display_name=opts.display_name,
|
||||
env=opts.env,
|
||||
)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EACCES:
|
||||
print(e, file=sys.stderr)
|
||||
if opts.user:
|
||||
print("Perhaps you want `sudo` or `--user`?", file=sys.stderr)
|
||||
self.exit(1)
|
||||
raise
|
||||
print(f"Installed kernelspec {opts.name} in {dest}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
InstallIPythonKernelSpecApp.launch_instance()
|
||||
29
venv/lib/python3.10/site-packages/ipykernel/log.py
Normal file
29
venv/lib/python3.10/site-packages/ipykernel/log.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""A PUB log handler."""
|
||||
import warnings
|
||||
|
||||
from zmq.log.handlers import PUBHandler
|
||||
|
||||
warnings.warn(
|
||||
"ipykernel.log is deprecated. It has moved to ipyparallel.engine.log",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
class EnginePUBHandler(PUBHandler):
|
||||
"""A simple PUBHandler subclass that sets root_topic"""
|
||||
|
||||
engine = None
|
||||
|
||||
def __init__(self, engine, *args, **kwargs):
|
||||
"""Initialize the handler."""
|
||||
PUBHandler.__init__(self, *args, **kwargs)
|
||||
self.engine = engine
|
||||
|
||||
@property # type:ignore[misc]
|
||||
def root_topic(self):
|
||||
"""this is a property, in case the handler is created
|
||||
before the engine gets registered with an id"""
|
||||
if isinstance(getattr(self.engine, "id", None), int):
|
||||
return "engine.%i" % self.engine.id # type:ignore[union-attr]
|
||||
return "engine"
|
||||
121
venv/lib/python3.10/site-packages/ipykernel/parentpoller.py
Normal file
121
venv/lib/python3.10/site-packages/ipykernel/parentpoller.py
Normal file
@@ -0,0 +1,121 @@
|
||||
"""A parent poller for unix."""
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
try:
|
||||
import ctypes
|
||||
except ImportError:
|
||||
ctypes = None # type:ignore[assignment]
|
||||
import os
|
||||
import platform
|
||||
import signal
|
||||
import time
|
||||
import warnings
|
||||
from _thread import interrupt_main # Py 3
|
||||
from threading import Thread
|
||||
|
||||
from traitlets.log import get_logger
|
||||
|
||||
|
||||
class ParentPollerUnix(Thread):
|
||||
"""A Unix-specific daemon thread that terminates the program immediately
|
||||
when the parent process no longer exists.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the poller."""
|
||||
super().__init__()
|
||||
self.daemon = True
|
||||
|
||||
def run(self):
|
||||
"""Run the poller."""
|
||||
# We cannot use os.waitpid because it works only for child processes.
|
||||
from errno import EINTR
|
||||
|
||||
while True:
|
||||
try:
|
||||
if os.getppid() == 1:
|
||||
get_logger().warning("Parent appears to have exited, shutting down.")
|
||||
os._exit(1)
|
||||
time.sleep(1.0)
|
||||
except OSError as e:
|
||||
if e.errno == EINTR:
|
||||
continue
|
||||
raise
|
||||
|
||||
|
||||
class ParentPollerWindows(Thread):
|
||||
"""A Windows-specific daemon thread that listens for a special event that
|
||||
signals an interrupt and, optionally, terminates the program immediately
|
||||
when the parent process no longer exists.
|
||||
"""
|
||||
|
||||
def __init__(self, interrupt_handle=None, parent_handle=None):
|
||||
"""Create the poller. At least one of the optional parameters must be
|
||||
provided.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
interrupt_handle : HANDLE (int), optional
|
||||
If provided, the program will generate a Ctrl+C event when this
|
||||
handle is signaled.
|
||||
parent_handle : HANDLE (int), optional
|
||||
If provided, the program will terminate immediately when this
|
||||
handle is signaled.
|
||||
"""
|
||||
assert interrupt_handle or parent_handle
|
||||
super().__init__()
|
||||
if ctypes is None:
|
||||
msg = "ParentPollerWindows requires ctypes" # type:ignore[unreachable]
|
||||
raise ImportError(msg)
|
||||
self.daemon = True
|
||||
self.interrupt_handle = interrupt_handle
|
||||
self.parent_handle = parent_handle
|
||||
|
||||
def run(self):
|
||||
"""Run the poll loop. This method never returns."""
|
||||
try:
|
||||
from _winapi import INFINITE, WAIT_OBJECT_0 # type:ignore[attr-defined]
|
||||
except ImportError:
|
||||
from _subprocess import INFINITE, WAIT_OBJECT_0
|
||||
|
||||
# Build the list of handle to listen on.
|
||||
handles = []
|
||||
if self.interrupt_handle:
|
||||
handles.append(self.interrupt_handle)
|
||||
if self.parent_handle:
|
||||
handles.append(self.parent_handle)
|
||||
arch = platform.architecture()[0]
|
||||
c_int = ctypes.c_int64 if arch.startswith("64") else ctypes.c_int
|
||||
|
||||
# Listen forever.
|
||||
while True:
|
||||
result = ctypes.windll.kernel32.WaitForMultipleObjects( # type:ignore[attr-defined]
|
||||
len(handles), # nCount
|
||||
(c_int * len(handles))(*handles), # lpHandles
|
||||
False, # bWaitAll
|
||||
INFINITE,
|
||||
) # dwMilliseconds
|
||||
|
||||
if WAIT_OBJECT_0 <= result < len(handles):
|
||||
handle = handles[result - WAIT_OBJECT_0]
|
||||
|
||||
if handle == self.interrupt_handle:
|
||||
# check if signal handler is callable
|
||||
# to avoid 'int not callable' error (Python issue #23395)
|
||||
if callable(signal.getsignal(signal.SIGINT)):
|
||||
interrupt_main()
|
||||
|
||||
elif handle == self.parent_handle:
|
||||
get_logger().warning("Parent appears to have exited, shutting down.")
|
||||
os._exit(1)
|
||||
elif result < 0:
|
||||
# wait failed, just give up and stop polling.
|
||||
warnings.warn(
|
||||
"""Parent poll failed. If the frontend dies,
|
||||
the kernel may be left running. Please let us know
|
||||
about your system (bitness, Python, etc.) at
|
||||
ipython-dev@scipy.org""",
|
||||
stacklevel=2,
|
||||
)
|
||||
return
|
||||
483
venv/lib/python3.10/site-packages/ipykernel/pickleutil.py
Normal file
483
venv/lib/python3.10/site-packages/ipykernel/pickleutil.py
Normal file
@@ -0,0 +1,483 @@
|
||||
"""Pickle related utilities. Perhaps this should be called 'can'."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
import typing
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"ipykernel.pickleutil is deprecated. It has moved to ipyparallel.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
import copy
|
||||
import pickle
|
||||
import sys
|
||||
from types import FunctionType
|
||||
|
||||
# This registers a hook when it's imported
|
||||
from ipyparallel.serialize import codeutil
|
||||
from traitlets.log import get_logger
|
||||
from traitlets.utils.importstring import import_item
|
||||
|
||||
buffer = memoryview
|
||||
class_type = type
|
||||
|
||||
PICKLE_PROTOCOL = pickle.DEFAULT_PROTOCOL
|
||||
|
||||
|
||||
def _get_cell_type(a=None):
|
||||
"""the type of a closure cell doesn't seem to be importable,
|
||||
so just create one
|
||||
"""
|
||||
|
||||
def inner():
|
||||
return a
|
||||
|
||||
return type(inner.__closure__[0]) # type:ignore[index]
|
||||
|
||||
|
||||
cell_type = _get_cell_type()
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# Functions
|
||||
# -------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def interactive(f):
|
||||
"""decorator for making functions appear as interactively defined.
|
||||
This results in the function being linked to the user_ns as globals()
|
||||
instead of the module globals().
|
||||
"""
|
||||
|
||||
# build new FunctionType, so it can have the right globals
|
||||
# interactive functions never have closures, that's kind of the point
|
||||
if isinstance(f, FunctionType):
|
||||
mainmod = __import__("__main__")
|
||||
f = FunctionType(
|
||||
f.__code__,
|
||||
mainmod.__dict__,
|
||||
f.__name__,
|
||||
f.__defaults__,
|
||||
)
|
||||
# associate with __main__ for uncanning
|
||||
f.__module__ = "__main__"
|
||||
return f
|
||||
|
||||
|
||||
def use_dill():
|
||||
"""use dill to expand serialization support
|
||||
|
||||
adds support for object methods and closures to serialization.
|
||||
"""
|
||||
# import dill causes most of the magic
|
||||
import dill
|
||||
|
||||
# dill doesn't work with cPickle,
|
||||
# tell the two relevant modules to use plain pickle
|
||||
|
||||
global pickle # noqa: PLW0603
|
||||
pickle = dill
|
||||
|
||||
try:
|
||||
from ipykernel import serialize
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
serialize.pickle = dill # type:ignore[attr-defined]
|
||||
|
||||
# disable special function handling, let dill take care of it
|
||||
can_map.pop(FunctionType, None)
|
||||
|
||||
|
||||
def use_cloudpickle():
|
||||
"""use cloudpickle to expand serialization support
|
||||
|
||||
adds support for object methods and closures to serialization.
|
||||
"""
|
||||
import cloudpickle
|
||||
|
||||
global pickle # noqa: PLW0603
|
||||
pickle = cloudpickle
|
||||
|
||||
try:
|
||||
from ipykernel import serialize
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
serialize.pickle = cloudpickle # type:ignore[attr-defined]
|
||||
|
||||
# disable special function handling, let cloudpickle take care of it
|
||||
can_map.pop(FunctionType, None)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# Classes
|
||||
# -------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class CannedObject:
|
||||
"""A canned object."""
|
||||
|
||||
def __init__(self, obj, keys=None, hook=None):
|
||||
"""can an object for safe pickling
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj
|
||||
The object to be canned
|
||||
keys : list (optional)
|
||||
list of attribute names that will be explicitly canned / uncanned
|
||||
hook : callable (optional)
|
||||
An optional extra callable,
|
||||
which can do additional processing of the uncanned object.
|
||||
|
||||
Notes
|
||||
-----
|
||||
large data may be offloaded into the buffers list,
|
||||
used for zero-copy transfers.
|
||||
"""
|
||||
self.keys = keys or []
|
||||
self.obj = copy.copy(obj)
|
||||
self.hook = can(hook)
|
||||
for key in keys:
|
||||
setattr(self.obj, key, can(getattr(obj, key)))
|
||||
|
||||
self.buffers = []
|
||||
|
||||
def get_object(self, g=None):
|
||||
"""Get an object."""
|
||||
if g is None:
|
||||
g = {}
|
||||
obj = self.obj
|
||||
for key in self.keys:
|
||||
setattr(obj, key, uncan(getattr(obj, key), g))
|
||||
|
||||
if self.hook:
|
||||
self.hook = uncan(self.hook, g)
|
||||
self.hook(obj, g)
|
||||
return self.obj
|
||||
|
||||
|
||||
class Reference(CannedObject):
|
||||
"""object for wrapping a remote reference by name."""
|
||||
|
||||
def __init__(self, name):
|
||||
"""Initialize the reference."""
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("illegal name: %r" % name)
|
||||
self.name = name
|
||||
self.buffers = []
|
||||
|
||||
def __repr__(self):
|
||||
"""Get the string repr of the reference."""
|
||||
return "<Reference: %r>" % self.name
|
||||
|
||||
def get_object(self, g=None):
|
||||
"""Get an object in the reference."""
|
||||
if g is None:
|
||||
g = {}
|
||||
|
||||
return eval(self.name, g)
|
||||
|
||||
|
||||
class CannedCell(CannedObject):
|
||||
"""Can a closure cell"""
|
||||
|
||||
def __init__(self, cell):
|
||||
"""Initialize the canned cell."""
|
||||
self.cell_contents = can(cell.cell_contents)
|
||||
|
||||
def get_object(self, g=None):
|
||||
"""Get an object in the cell."""
|
||||
cell_contents = uncan(self.cell_contents, g)
|
||||
|
||||
def inner():
|
||||
"""Inner function."""
|
||||
return cell_contents
|
||||
|
||||
return inner.__closure__[0] # type:ignore[index]
|
||||
|
||||
|
||||
class CannedFunction(CannedObject):
|
||||
"""Can a function."""
|
||||
|
||||
def __init__(self, f):
|
||||
"""Initialize the can"""
|
||||
self._check_type(f)
|
||||
self.code = f.__code__
|
||||
self.defaults: typing.Optional[typing.List[typing.Any]]
|
||||
if f.__defaults__:
|
||||
self.defaults = [can(fd) for fd in f.__defaults__]
|
||||
else:
|
||||
self.defaults = None
|
||||
|
||||
self.closure: typing.Any
|
||||
closure = f.__closure__
|
||||
if closure:
|
||||
self.closure = tuple(can(cell) for cell in closure)
|
||||
else:
|
||||
self.closure = None
|
||||
|
||||
self.module = f.__module__ or "__main__"
|
||||
self.__name__ = f.__name__
|
||||
self.buffers = []
|
||||
|
||||
def _check_type(self, obj):
|
||||
assert isinstance(obj, FunctionType), "Not a function type"
|
||||
|
||||
def get_object(self, g=None):
|
||||
"""Get an object out of the can."""
|
||||
# try to load function back into its module:
|
||||
if not self.module.startswith("__"):
|
||||
__import__(self.module)
|
||||
g = sys.modules[self.module].__dict__
|
||||
|
||||
if g is None:
|
||||
g = {}
|
||||
defaults = tuple(uncan(cfd, g) for cfd in self.defaults) if self.defaults else None
|
||||
closure = tuple(uncan(cell, g) for cell in self.closure) if self.closure else None
|
||||
return FunctionType(self.code, g, self.__name__, defaults, closure)
|
||||
|
||||
|
||||
class CannedClass(CannedObject):
|
||||
"""A canned class object."""
|
||||
|
||||
def __init__(self, cls):
|
||||
"""Initialize the can."""
|
||||
self._check_type(cls)
|
||||
self.name = cls.__name__
|
||||
self.old_style = not isinstance(cls, type)
|
||||
self._canned_dict = {}
|
||||
for k, v in cls.__dict__.items():
|
||||
if k not in ("__weakref__", "__dict__"):
|
||||
self._canned_dict[k] = can(v)
|
||||
mro = [] if self.old_style else cls.mro()
|
||||
|
||||
self.parents = [can(c) for c in mro[1:]]
|
||||
self.buffers = []
|
||||
|
||||
def _check_type(self, obj):
|
||||
assert isinstance(obj, class_type), "Not a class type"
|
||||
|
||||
def get_object(self, g=None):
|
||||
"""Get an object from the can."""
|
||||
parents = tuple(uncan(p, g) for p in self.parents)
|
||||
return type(self.name, parents, uncan_dict(self._canned_dict, g=g))
|
||||
|
||||
|
||||
class CannedArray(CannedObject):
|
||||
"""A canned numpy array."""
|
||||
|
||||
def __init__(self, obj):
|
||||
"""Initialize the can."""
|
||||
from numpy import ascontiguousarray
|
||||
|
||||
self.shape = obj.shape
|
||||
self.dtype = obj.dtype.descr if obj.dtype.fields else obj.dtype.str
|
||||
self.pickled = False
|
||||
if sum(obj.shape) == 0:
|
||||
self.pickled = True
|
||||
elif obj.dtype == "O":
|
||||
# can't handle object dtype with buffer approach
|
||||
self.pickled = True
|
||||
elif obj.dtype.fields and any(dt == "O" for dt, sz in obj.dtype.fields.values()):
|
||||
self.pickled = True
|
||||
if self.pickled:
|
||||
# just pickle it
|
||||
self.buffers = [pickle.dumps(obj, PICKLE_PROTOCOL)]
|
||||
else:
|
||||
# ensure contiguous
|
||||
obj = ascontiguousarray(obj, dtype=None)
|
||||
self.buffers = [buffer(obj)]
|
||||
|
||||
def get_object(self, g=None):
|
||||
"""Get the object."""
|
||||
from numpy import frombuffer
|
||||
|
||||
data = self.buffers[0]
|
||||
if self.pickled:
|
||||
# we just pickled it
|
||||
return pickle.loads(data)
|
||||
return frombuffer(data, dtype=self.dtype).reshape(self.shape)
|
||||
|
||||
|
||||
class CannedBytes(CannedObject):
|
||||
"""A canned bytes object."""
|
||||
|
||||
@staticmethod
|
||||
def wrap(buf: typing.Union[memoryview, bytes, typing.SupportsBytes]) -> bytes:
|
||||
"""Cast a buffer or memoryview object to bytes"""
|
||||
if isinstance(buf, memoryview):
|
||||
return buf.tobytes()
|
||||
if not isinstance(buf, bytes):
|
||||
return bytes(buf)
|
||||
return buf
|
||||
|
||||
def __init__(self, obj):
|
||||
"""Initialize the can."""
|
||||
self.buffers = [obj]
|
||||
|
||||
def get_object(self, g=None):
|
||||
"""Get the canned object."""
|
||||
data = self.buffers[0]
|
||||
return self.wrap(data)
|
||||
|
||||
|
||||
class CannedBuffer(CannedBytes):
|
||||
"""A canned buffer."""
|
||||
|
||||
wrap = buffer # type:ignore[assignment]
|
||||
|
||||
|
||||
class CannedMemoryView(CannedBytes):
|
||||
"""A canned memory view."""
|
||||
|
||||
wrap = memoryview # type:ignore[assignment]
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# Functions
|
||||
# -------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _import_mapping(mapping, original=None):
|
||||
"""import any string-keys in a type mapping"""
|
||||
log = get_logger()
|
||||
log.debug("Importing canning map")
|
||||
for key, _ in list(mapping.items()):
|
||||
if isinstance(key, str):
|
||||
try:
|
||||
cls = import_item(key)
|
||||
except Exception:
|
||||
if original and key not in original:
|
||||
# only message on user-added classes
|
||||
log.error("canning class not importable: %r", key, exc_info=True) # noqa: G201
|
||||
mapping.pop(key)
|
||||
else:
|
||||
mapping[cls] = mapping.pop(key)
|
||||
|
||||
|
||||
def istype(obj, check):
|
||||
"""like isinstance(obj, check), but strict
|
||||
|
||||
This won't catch subclasses.
|
||||
"""
|
||||
if isinstance(check, tuple):
|
||||
return any(type(obj) is cls for cls in check)
|
||||
return type(obj) is check
|
||||
|
||||
|
||||
def can(obj):
|
||||
"""prepare an object for pickling"""
|
||||
|
||||
import_needed = False
|
||||
|
||||
for cls, canner in can_map.items():
|
||||
if isinstance(cls, str):
|
||||
import_needed = True
|
||||
break
|
||||
if istype(obj, cls):
|
||||
return canner(obj)
|
||||
|
||||
if import_needed:
|
||||
# perform can_map imports, then try again
|
||||
# this will usually only happen once
|
||||
_import_mapping(can_map, _original_can_map)
|
||||
return can(obj)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def can_class(obj):
|
||||
"""Can a class object."""
|
||||
if isinstance(obj, class_type) and obj.__module__ == "__main__":
|
||||
return CannedClass(obj)
|
||||
return obj
|
||||
|
||||
|
||||
def can_dict(obj):
|
||||
"""can the *values* of a dict"""
|
||||
if istype(obj, dict):
|
||||
newobj = {}
|
||||
for k, v in obj.items():
|
||||
newobj[k] = can(v)
|
||||
return newobj
|
||||
return obj
|
||||
|
||||
|
||||
sequence_types = (list, tuple, set)
|
||||
|
||||
|
||||
def can_sequence(obj):
|
||||
"""can the elements of a sequence"""
|
||||
if istype(obj, sequence_types):
|
||||
t = type(obj)
|
||||
return t([can(i) for i in obj])
|
||||
return obj
|
||||
|
||||
|
||||
def uncan(obj, g=None):
|
||||
"""invert canning"""
|
||||
|
||||
import_needed = False
|
||||
for cls, uncanner in uncan_map.items():
|
||||
if isinstance(cls, str):
|
||||
import_needed = True
|
||||
break
|
||||
if isinstance(obj, cls):
|
||||
return uncanner(obj, g)
|
||||
|
||||
if import_needed:
|
||||
# perform uncan_map imports, then try again
|
||||
# this will usually only happen once
|
||||
_import_mapping(uncan_map, _original_uncan_map)
|
||||
return uncan(obj, g)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def uncan_dict(obj, g=None):
|
||||
"""Uncan a dict object."""
|
||||
if istype(obj, dict):
|
||||
newobj = {}
|
||||
for k, v in obj.items():
|
||||
newobj[k] = uncan(v, g)
|
||||
return newobj
|
||||
return obj
|
||||
|
||||
|
||||
def uncan_sequence(obj, g=None):
|
||||
"""Uncan a sequence."""
|
||||
if istype(obj, sequence_types):
|
||||
t = type(obj)
|
||||
return t([uncan(i, g) for i in obj])
|
||||
return obj
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# API dictionaries
|
||||
# -------------------------------------------------------------------------------
|
||||
|
||||
# These dicts can be extended for custom serialization of new objects
|
||||
|
||||
can_map = {
|
||||
"numpy.ndarray": CannedArray,
|
||||
FunctionType: CannedFunction,
|
||||
bytes: CannedBytes,
|
||||
memoryview: CannedMemoryView,
|
||||
cell_type: CannedCell,
|
||||
class_type: can_class,
|
||||
}
|
||||
if buffer is not memoryview:
|
||||
can_map[buffer] = CannedBuffer
|
||||
|
||||
uncan_map: typing.Dict[type, typing.Any] = {
|
||||
CannedObject: lambda obj, g: obj.get_object(g),
|
||||
dict: uncan_dict,
|
||||
}
|
||||
|
||||
# for use in _import_mapping:
|
||||
_original_can_map = can_map.copy()
|
||||
_original_uncan_map = uncan_map.copy()
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,15 @@
|
||||
"""A matplotlib backend for publishing figures via display_data"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import warnings
|
||||
|
||||
from matplotlib_inline.backend_inline import * # type:ignore[import-untyped] # analysis: ignore
|
||||
|
||||
warnings.warn(
|
||||
"`ipykernel.pylab.backend_inline` is deprecated, directly "
|
||||
"use `matplotlib_inline.backend_inline`",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
14
venv/lib/python3.10/site-packages/ipykernel/pylab/config.py
Normal file
14
venv/lib/python3.10/site-packages/ipykernel/pylab/config.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Configurable for configuring the IPython inline backend
|
||||
|
||||
This module does not import anything from matplotlib.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
from matplotlib_inline.config import * # type:ignore[import-untyped] # analysis: ignore
|
||||
|
||||
warnings.warn(
|
||||
"`ipykernel.pylab.config` is deprecated, directly use `matplotlib_inline.config`",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 1.1 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 2.1 KiB |
@@ -0,0 +1,265 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
version="1.0"
|
||||
id="svg2"
|
||||
sodipodi:version="0.32"
|
||||
inkscape:version="1.2.1 (9c6d41e410, 2022-07-14)"
|
||||
sodipodi:docname="python-logo-only.svg"
|
||||
width="83.371017pt"
|
||||
height="101.00108pt"
|
||||
inkscape:export-filename="python-logo-only.png"
|
||||
inkscape:export-xdpi="232.44"
|
||||
inkscape:export-ydpi="232.44"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<metadata
|
||||
id="metadata371">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<sodipodi:namedview
|
||||
inkscape:window-height="2080"
|
||||
inkscape:window-width="1976"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
guidetolerance="10.0"
|
||||
gridtolerance="10.0"
|
||||
objecttolerance="10.0"
|
||||
borderopacity="1.0"
|
||||
bordercolor="#666666"
|
||||
pagecolor="#ffffff"
|
||||
id="base"
|
||||
inkscape:zoom="2.1461642"
|
||||
inkscape:cx="91.558698"
|
||||
inkscape:cy="47.9926"
|
||||
inkscape:window-x="1092"
|
||||
inkscape:window-y="72"
|
||||
inkscape:current-layer="svg2"
|
||||
width="210mm"
|
||||
height="40mm"
|
||||
units="mm"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="pt"
|
||||
showgrid="false"
|
||||
inkscape:window-maximized="0" />
|
||||
<defs
|
||||
id="defs4">
|
||||
<linearGradient
|
||||
id="linearGradient2795">
|
||||
<stop
|
||||
style="stop-color:#b8b8b8;stop-opacity:0.49803922;"
|
||||
offset="0"
|
||||
id="stop2797" />
|
||||
<stop
|
||||
style="stop-color:#7f7f7f;stop-opacity:0;"
|
||||
offset="1"
|
||||
id="stop2799" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
id="linearGradient2787">
|
||||
<stop
|
||||
style="stop-color:#7f7f7f;stop-opacity:0.5;"
|
||||
offset="0"
|
||||
id="stop2789" />
|
||||
<stop
|
||||
style="stop-color:#7f7f7f;stop-opacity:0;"
|
||||
offset="1"
|
||||
id="stop2791" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
id="linearGradient3676">
|
||||
<stop
|
||||
style="stop-color:#b2b2b2;stop-opacity:0.5;"
|
||||
offset="0"
|
||||
id="stop3678" />
|
||||
<stop
|
||||
style="stop-color:#b3b3b3;stop-opacity:0;"
|
||||
offset="1"
|
||||
id="stop3680" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
id="linearGradient3236">
|
||||
<stop
|
||||
style="stop-color:#f4f4f4;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop3244" />
|
||||
<stop
|
||||
style="stop-color:white;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop3240" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
id="linearGradient4671">
|
||||
<stop
|
||||
style="stop-color:#ffd43b;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop4673" />
|
||||
<stop
|
||||
style="stop-color:#ffe873;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop4675" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
id="linearGradient4689">
|
||||
<stop
|
||||
style="stop-color:#5a9fd4;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop4691" />
|
||||
<stop
|
||||
style="stop-color:#306998;stop-opacity:1;"
|
||||
offset="1"
|
||||
id="stop4693" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
x1="224.23996"
|
||||
y1="144.75717"
|
||||
x2="-65.308502"
|
||||
y2="144.75717"
|
||||
id="linearGradient2987"
|
||||
xlink:href="#linearGradient4671"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(100.2702,99.61116)" />
|
||||
<linearGradient
|
||||
x1="172.94208"
|
||||
y1="77.475983"
|
||||
x2="26.670298"
|
||||
y2="76.313133"
|
||||
id="linearGradient2990"
|
||||
xlink:href="#linearGradient4689"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(100.2702,99.61116)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient4689"
|
||||
id="linearGradient2587"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(100.2702,99.61116)"
|
||||
x1="172.94208"
|
||||
y1="77.475983"
|
||||
x2="26.670298"
|
||||
y2="76.313133" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient4671"
|
||||
id="linearGradient2589"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(100.2702,99.61116)"
|
||||
x1="224.23996"
|
||||
y1="144.75717"
|
||||
x2="-65.308502"
|
||||
y2="144.75717" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient4689"
|
||||
id="linearGradient2248"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(100.2702,99.61116)"
|
||||
x1="172.94208"
|
||||
y1="77.475983"
|
||||
x2="26.670298"
|
||||
y2="76.313133" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient4671"
|
||||
id="linearGradient2250"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(100.2702,99.61116)"
|
||||
x1="224.23996"
|
||||
y1="144.75717"
|
||||
x2="-65.308502"
|
||||
y2="144.75717" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient4671"
|
||||
id="linearGradient2255"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(0.562541,0,0,0.567972,-11.5974,-7.60954)"
|
||||
x1="224.23996"
|
||||
y1="144.75717"
|
||||
x2="-65.308502"
|
||||
y2="144.75717" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient4689"
|
||||
id="linearGradient2258"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(0.562541,0,0,0.567972,-11.5974,-7.60954)"
|
||||
x1="172.94208"
|
||||
y1="76.176224"
|
||||
x2="26.670298"
|
||||
y2="76.313133" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient2795"
|
||||
id="radialGradient2801"
|
||||
cx="61.518883"
|
||||
cy="132.28575"
|
||||
fx="61.518883"
|
||||
fy="132.28575"
|
||||
r="29.036913"
|
||||
gradientTransform="matrix(1,0,0,0.177966,0,108.7434)"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient4671"
|
||||
id="linearGradient1475"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(0.562541,0,0,0.567972,-14.99112,-11.702371)"
|
||||
x1="150.96111"
|
||||
y1="192.35176"
|
||||
x2="112.03144"
|
||||
y2="137.27299" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient4689"
|
||||
id="linearGradient1478"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(0.562541,0,0,0.567972,-14.99112,-11.702371)"
|
||||
x1="26.648937"
|
||||
y1="20.603781"
|
||||
x2="135.66525"
|
||||
y2="114.39767" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient2795"
|
||||
id="radialGradient1480"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(1.7490565e-8,-0.23994696,1.054668,3.7915457e-7,-83.7008,142.46201)"
|
||||
cx="61.518883"
|
||||
cy="132.28575"
|
||||
fx="61.518883"
|
||||
fy="132.28575"
|
||||
r="29.036913" />
|
||||
</defs>
|
||||
<path
|
||||
style="fill:url(#linearGradient1478);fill-opacity:1"
|
||||
d="M 54.918785,9.1927421e-4 C 50.335132,0.02221727 45.957846,0.41313697 42.106285,1.0946693 30.760069,3.0991731 28.700036,7.2947714 28.700035,15.032169 v 10.21875 h 26.8125 v 3.40625 h -26.8125 -10.0625 c -7.792459,0 -14.6157588,4.683717 -16.7499998,13.59375 -2.46181998,10.212966 -2.57101508,16.586023 0,27.25 1.9059283,7.937852 6.4575432,13.593748 14.2499998,13.59375 h 9.21875 v -12.25 c 0,-8.849902 7.657144,-16.656248 16.75,-16.65625 h 26.78125 c 7.454951,0 13.406253,-6.138164 13.40625,-13.625 v -25.53125 c 0,-7.2663386 -6.12998,-12.7247771 -13.40625,-13.9374997 C 64.281548,0.32794397 59.502438,-0.02037903 54.918785,9.1927421e-4 Z m -14.5,8.21875012579 c 2.769547,0 5.03125,2.2986456 5.03125,5.1249996 -2e-6,2.816336 -2.261703,5.09375 -5.03125,5.09375 -2.779476,-1e-6 -5.03125,-2.277415 -5.03125,-5.09375 -10e-7,-2.826353 2.251774,-5.1249996 5.03125,-5.1249996 z"
|
||||
id="path1948" />
|
||||
<path
|
||||
style="fill:url(#linearGradient1475);fill-opacity:1"
|
||||
d="m 85.637535,28.657169 v 11.90625 c 0,9.230755 -7.825895,16.999999 -16.75,17 h -26.78125 c -7.335833,0 -13.406249,6.278483 -13.40625,13.625 v 25.531247 c 0,7.266344 6.318588,11.540324 13.40625,13.625004 8.487331,2.49561 16.626237,2.94663 26.78125,0 6.750155,-1.95439 13.406253,-5.88761 13.40625,-13.625004 V 86.500919 h -26.78125 v -3.40625 h 26.78125 13.406254 c 7.792461,0 10.696251,-5.435408 13.406241,-13.59375 2.79933,-8.398886 2.68022,-16.475776 0,-27.25 -1.92578,-7.757441 -5.60387,-13.59375 -13.406241,-13.59375 z m -15.0625,64.65625 c 2.779478,3e-6 5.03125,2.277417 5.03125,5.093747 -2e-6,2.826354 -2.251775,5.125004 -5.03125,5.125004 -2.76955,0 -5.03125,-2.29865 -5.03125,-5.125004 2e-6,-2.81633 2.261697,-5.093747 5.03125,-5.093747 z"
|
||||
id="path1950" />
|
||||
<ellipse
|
||||
style="opacity:0.44382;fill:url(#radialGradient1480);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:15.4174;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
id="path1894"
|
||||
cx="55.816761"
|
||||
cy="127.70079"
|
||||
rx="35.930977"
|
||||
ry="6.9673119" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 9.4 KiB |
204
venv/lib/python3.10/site-packages/ipykernel/serialize.py
Normal file
204
venv/lib/python3.10/site-packages/ipykernel/serialize.py
Normal file
@@ -0,0 +1,204 @@
|
||||
"""serialization utilities for apply messages"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"ipykernel.serialize is deprecated. It has moved to ipyparallel.serialize",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
import pickle
|
||||
from itertools import chain
|
||||
|
||||
try:
|
||||
# available since ipyparallel 5.0.0
|
||||
from ipyparallel.serialize.canning import (
|
||||
CannedObject,
|
||||
can,
|
||||
can_sequence,
|
||||
istype,
|
||||
sequence_types,
|
||||
uncan,
|
||||
uncan_sequence,
|
||||
)
|
||||
from ipyparallel.serialize.serialize import PICKLE_PROTOCOL
|
||||
except ImportError:
|
||||
# Deprecated since ipykernel 4.3.0
|
||||
from ipykernel.pickleutil import (
|
||||
PICKLE_PROTOCOL,
|
||||
CannedObject,
|
||||
can,
|
||||
can_sequence,
|
||||
istype,
|
||||
sequence_types,
|
||||
uncan,
|
||||
uncan_sequence,
|
||||
)
|
||||
|
||||
from jupyter_client.session import MAX_BYTES, MAX_ITEMS
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Serialization Functions
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _extract_buffers(obj, threshold=MAX_BYTES):
|
||||
"""extract buffers larger than a certain threshold"""
|
||||
buffers = []
|
||||
if isinstance(obj, CannedObject) and obj.buffers:
|
||||
for i, buf in enumerate(obj.buffers):
|
||||
if len(buf) > threshold:
|
||||
# buffer larger than threshold, prevent pickling
|
||||
obj.buffers[i] = None
|
||||
buffers.append(buf)
|
||||
# buffer too small for separate send, coerce to bytes
|
||||
# because pickling buffer objects just results in broken pointers
|
||||
elif isinstance(buf, memoryview):
|
||||
obj.buffers[i] = buf.tobytes()
|
||||
return buffers
|
||||
|
||||
|
||||
def _restore_buffers(obj, buffers):
|
||||
"""restore buffers extracted by"""
|
||||
if isinstance(obj, CannedObject) and obj.buffers:
|
||||
for i, buf in enumerate(obj.buffers):
|
||||
if buf is None:
|
||||
obj.buffers[i] = buffers.pop(0)
|
||||
|
||||
|
||||
def serialize_object(obj, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):
|
||||
"""Serialize an object into a list of sendable buffers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj : object
|
||||
The object to be serialized
|
||||
buffer_threshold : int
|
||||
The threshold (in bytes) for pulling out data buffers
|
||||
to avoid pickling them.
|
||||
item_threshold : int
|
||||
The maximum number of items over which canning will iterate.
|
||||
Containers (lists, dicts) larger than this will be pickled without
|
||||
introspection.
|
||||
|
||||
Returns
|
||||
-------
|
||||
[bufs] : list of buffers representing the serialized object.
|
||||
"""
|
||||
buffers = []
|
||||
if istype(obj, sequence_types) and len(obj) < item_threshold:
|
||||
cobj = can_sequence(obj)
|
||||
for c in cobj:
|
||||
buffers.extend(_extract_buffers(c, buffer_threshold))
|
||||
elif istype(obj, dict) and len(obj) < item_threshold:
|
||||
cobj = {}
|
||||
for k in sorted(obj):
|
||||
c = can(obj[k])
|
||||
buffers.extend(_extract_buffers(c, buffer_threshold))
|
||||
cobj[k] = c
|
||||
else:
|
||||
cobj = can(obj)
|
||||
buffers.extend(_extract_buffers(cobj, buffer_threshold))
|
||||
|
||||
buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL))
|
||||
return buffers
|
||||
|
||||
|
||||
def deserialize_object(buffers, g=None):
|
||||
"""reconstruct an object serialized by serialize_object from data buffers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
buffers : list of buffers/bytes
|
||||
g : globals to be used when uncanning
|
||||
|
||||
Returns
|
||||
-------
|
||||
(newobj, bufs) : unpacked object, and the list of remaining unused buffers.
|
||||
"""
|
||||
bufs = list(buffers)
|
||||
pobj = bufs.pop(0)
|
||||
canned = pickle.loads(pobj)
|
||||
if istype(canned, sequence_types) and len(canned) < MAX_ITEMS:
|
||||
for c in canned:
|
||||
_restore_buffers(c, bufs)
|
||||
newobj = uncan_sequence(canned, g)
|
||||
elif istype(canned, dict) and len(canned) < MAX_ITEMS:
|
||||
newobj = {}
|
||||
for k in sorted(canned):
|
||||
c = canned[k]
|
||||
_restore_buffers(c, bufs)
|
||||
newobj[k] = uncan(c, g)
|
||||
else:
|
||||
_restore_buffers(canned, bufs)
|
||||
newobj = uncan(canned, g)
|
||||
|
||||
return newobj, bufs
|
||||
|
||||
|
||||
def pack_apply_message(f, args, kwargs, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):
|
||||
"""pack up a function, args, and kwargs to be sent over the wire
|
||||
|
||||
Each element of args/kwargs will be canned for special treatment,
|
||||
but inspection will not go any deeper than that.
|
||||
|
||||
Any object whose data is larger than `threshold` will not have their data copied
|
||||
(only numpy arrays and bytes/buffers support zero-copy)
|
||||
|
||||
Message will be a list of bytes/buffers of the format:
|
||||
|
||||
[ cf, pinfo, <arg_bufs>, <kwarg_bufs> ]
|
||||
|
||||
With length at least two + len(args) + len(kwargs)
|
||||
"""
|
||||
|
||||
arg_bufs = list(
|
||||
chain.from_iterable(serialize_object(arg, buffer_threshold, item_threshold) for arg in args)
|
||||
)
|
||||
|
||||
kw_keys = sorted(kwargs.keys())
|
||||
kwarg_bufs = list(
|
||||
chain.from_iterable(
|
||||
serialize_object(kwargs[key], buffer_threshold, item_threshold) for key in kw_keys
|
||||
)
|
||||
)
|
||||
|
||||
info = dict(nargs=len(args), narg_bufs=len(arg_bufs), kw_keys=kw_keys)
|
||||
|
||||
msg = [pickle.dumps(can(f), PICKLE_PROTOCOL)]
|
||||
msg.append(pickle.dumps(info, PICKLE_PROTOCOL))
|
||||
msg.extend(arg_bufs)
|
||||
msg.extend(kwarg_bufs)
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
def unpack_apply_message(bufs, g=None, copy=True):
|
||||
"""unpack f,args,kwargs from buffers packed by pack_apply_message()
|
||||
Returns: original f,args,kwargs"""
|
||||
bufs = list(bufs) # allow us to pop
|
||||
assert len(bufs) >= 2, "not enough buffers!"
|
||||
pf = bufs.pop(0)
|
||||
f = uncan(pickle.loads(pf), g)
|
||||
pinfo = bufs.pop(0)
|
||||
info = pickle.loads(pinfo)
|
||||
arg_bufs, kwarg_bufs = bufs[: info["narg_bufs"]], bufs[info["narg_bufs"] :]
|
||||
|
||||
args_list = []
|
||||
for _ in range(info["nargs"]):
|
||||
arg, arg_bufs = deserialize_object(arg_bufs, g)
|
||||
args_list.append(arg)
|
||||
args = tuple(args_list)
|
||||
assert not arg_bufs, "Shouldn't be any arg bufs left over"
|
||||
|
||||
kwargs = {}
|
||||
for key in info["kw_keys"]:
|
||||
kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g)
|
||||
kwargs[key] = kwarg
|
||||
assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over"
|
||||
|
||||
return f, args, kwargs
|
||||
71
venv/lib/python3.10/site-packages/ipykernel/trio_runner.py
Normal file
71
venv/lib/python3.10/site-packages/ipykernel/trio_runner.py
Normal file
@@ -0,0 +1,71 @@
|
||||
"""A trio loop runner."""
|
||||
import builtins
|
||||
import logging
|
||||
import signal
|
||||
import threading
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
import trio
|
||||
|
||||
|
||||
class TrioRunner:
|
||||
"""A trio loop runner."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the runner."""
|
||||
self._cell_cancel_scope = None
|
||||
self._trio_token = None
|
||||
|
||||
def initialize(self, kernel, io_loop):
|
||||
"""Initialize the runner."""
|
||||
kernel.shell.set_trio_runner(self)
|
||||
kernel.shell.run_line_magic("autoawait", "trio")
|
||||
kernel.shell.magics_manager.magics["line"]["autoawait"] = lambda _: warnings.warn(
|
||||
"Autoawait isn't allowed in Trio background loop mode.", stacklevel=2
|
||||
)
|
||||
self._interrupted = False
|
||||
bg_thread = threading.Thread(target=io_loop.start, daemon=True, name="TornadoBackground")
|
||||
bg_thread.start()
|
||||
|
||||
def interrupt(self, signum, frame):
|
||||
"""Interuppt the runner."""
|
||||
if self._cell_cancel_scope:
|
||||
self._cell_cancel_scope.cancel()
|
||||
else:
|
||||
msg = "Kernel interrupted but no cell is running"
|
||||
raise Exception(msg)
|
||||
|
||||
def run(self):
|
||||
"""Run the loop."""
|
||||
old_sig = signal.signal(signal.SIGINT, self.interrupt)
|
||||
|
||||
def log_nursery_exc(exc):
|
||||
exc = "\n".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
|
||||
logging.error("An exception occurred in a global nursery task.\n%s", exc)
|
||||
|
||||
async def trio_main():
|
||||
"""Run the main loop."""
|
||||
self._trio_token = trio.lowlevel.current_trio_token()
|
||||
async with trio.open_nursery() as nursery:
|
||||
# TODO This hack prevents the nursery from cancelling all child
|
||||
# tasks when an uncaught exception occurs, but it's ugly.
|
||||
nursery._add_exc = log_nursery_exc
|
||||
builtins.GLOBAL_NURSERY = nursery # type:ignore[attr-defined]
|
||||
await trio.sleep_forever()
|
||||
|
||||
trio.run(trio_main)
|
||||
signal.signal(signal.SIGINT, old_sig)
|
||||
|
||||
def __call__(self, async_fn):
|
||||
"""Handle a function call."""
|
||||
|
||||
async def loc(coro):
|
||||
"""A thread runner context."""
|
||||
self._cell_cancel_scope = trio.CancelScope()
|
||||
with self._cell_cancel_scope:
|
||||
return await coro
|
||||
self._cell_cancel_scope = None # type:ignore[unreachable]
|
||||
return None
|
||||
|
||||
return trio.from_thread.run(loc, async_fn, trio_token=self._trio_token)
|
||||
663
venv/lib/python3.10/site-packages/ipykernel/zmqshell.py
Normal file
663
venv/lib/python3.10/site-packages/ipykernel/zmqshell.py
Normal file
@@ -0,0 +1,663 @@
|
||||
"""A ZMQ-based subclass of InteractiveShell.
|
||||
|
||||
This code is meant to ease the refactoring of the base InteractiveShell into
|
||||
something with a cleaner architecture for 2-process use, without actually
|
||||
breaking InteractiveShell itself. So we're doing something a bit ugly, where
|
||||
we subclass and override what we want to fix. Once this is working well, we
|
||||
can go back to the base class and refactor the code for a cleaner inheritance
|
||||
implementation that doesn't rely on so much monkeypatching.
|
||||
|
||||
But this lets us maintain a fully working IPython as we develop the new
|
||||
machinery. This should thus be thought of as scaffolding.
|
||||
"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from threading import local
|
||||
|
||||
from IPython.core import page, payloadpage
|
||||
from IPython.core.autocall import ZMQExitAutocall
|
||||
from IPython.core.displaypub import DisplayPublisher
|
||||
from IPython.core.error import UsageError
|
||||
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
|
||||
from IPython.core.magic import Magics, line_magic, magics_class
|
||||
from IPython.core.magics import CodeMagics, MacroToEdit # type:ignore[attr-defined]
|
||||
from IPython.core.usage import default_banner
|
||||
from IPython.display import Javascript, display
|
||||
from IPython.utils import openpy
|
||||
from IPython.utils.process import arg_split, system # type:ignore[attr-defined]
|
||||
from jupyter_client.session import Session, extract_header
|
||||
from jupyter_core.paths import jupyter_runtime_dir
|
||||
from traitlets import Any, CBool, CBytes, Dict, Instance, Type, default, observe
|
||||
|
||||
from ipykernel import connect_qtconsole, get_connection_file, get_connection_info
|
||||
from ipykernel.displayhook import ZMQShellDisplayHook
|
||||
from ipykernel.jsonutil import encode_images, json_clean
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Functions and classes
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ZMQDisplayPublisher(DisplayPublisher):
|
||||
"""A display publisher that publishes data using a ZeroMQ PUB socket."""
|
||||
|
||||
session = Instance(Session, allow_none=True)
|
||||
pub_socket = Any(allow_none=True)
|
||||
parent_header = Dict({})
|
||||
topic = CBytes(b"display_data")
|
||||
|
||||
# thread_local:
|
||||
# An attribute used to ensure the correct output message
|
||||
# is processed. See ipykernel Issue 113 for a discussion.
|
||||
_thread_local = Any()
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent for outbound messages."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def _flush_streams(self):
|
||||
"""flush IO Streams prior to display"""
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
@default("_thread_local")
|
||||
def _default_thread_local(self):
|
||||
"""Initialize our thread local storage"""
|
||||
return local()
|
||||
|
||||
@property
|
||||
def _hooks(self):
|
||||
if not hasattr(self._thread_local, "hooks"):
|
||||
# create new list for a new thread
|
||||
self._thread_local.hooks = []
|
||||
return self._thread_local.hooks
|
||||
|
||||
def publish(
|
||||
self,
|
||||
data,
|
||||
metadata=None,
|
||||
transient=None,
|
||||
update=False,
|
||||
):
|
||||
"""Publish a display-data message
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : dict
|
||||
A mime-bundle dict, keyed by mime-type.
|
||||
metadata : dict, optional
|
||||
Metadata associated with the data.
|
||||
transient : dict, optional, keyword-only
|
||||
Transient data that may only be relevant during a live display,
|
||||
such as display_id.
|
||||
Transient data should not be persisted to documents.
|
||||
update : bool, optional, keyword-only
|
||||
If True, send an update_display_data message instead of display_data.
|
||||
"""
|
||||
self._flush_streams()
|
||||
if metadata is None:
|
||||
metadata = {}
|
||||
if transient is None:
|
||||
transient = {}
|
||||
self._validate_data(data, metadata)
|
||||
content = {}
|
||||
content["data"] = encode_images(data)
|
||||
content["metadata"] = metadata
|
||||
content["transient"] = transient
|
||||
|
||||
msg_type = "update_display_data" if update else "display_data"
|
||||
|
||||
# Use 2-stage process to send a message,
|
||||
# in order to put it through the transform
|
||||
# hooks before potentially sending.
|
||||
assert self.session is not None
|
||||
msg = self.session.msg(msg_type, json_clean(content), parent=self.parent_header)
|
||||
|
||||
# Each transform either returns a new
|
||||
# message or None. If None is returned,
|
||||
# the message has been 'used' and we return.
|
||||
for hook in self._hooks:
|
||||
msg = hook(msg)
|
||||
if msg is None:
|
||||
return # type:ignore[unreachable]
|
||||
|
||||
self.session.send(
|
||||
self.pub_socket,
|
||||
msg,
|
||||
ident=self.topic,
|
||||
)
|
||||
|
||||
def clear_output(self, wait=False):
|
||||
"""Clear output associated with the current execution (cell).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
wait : bool (default: False)
|
||||
If True, the output will not be cleared immediately,
|
||||
instead waiting for the next display before clearing.
|
||||
This reduces bounce during repeated clear & display loops.
|
||||
|
||||
"""
|
||||
content = dict(wait=wait)
|
||||
self._flush_streams()
|
||||
assert self.session is not None
|
||||
msg = self.session.msg("clear_output", json_clean(content), parent=self.parent_header)
|
||||
|
||||
# see publish() for details on how this works
|
||||
for hook in self._hooks:
|
||||
msg = hook(msg)
|
||||
if msg is None:
|
||||
return # type:ignore[unreachable]
|
||||
|
||||
self.session.send(
|
||||
self.pub_socket,
|
||||
msg,
|
||||
ident=self.topic,
|
||||
)
|
||||
|
||||
def register_hook(self, hook):
|
||||
"""
|
||||
Registers a hook with the thread-local storage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hook : Any callable object
|
||||
|
||||
Returns
|
||||
-------
|
||||
Either a publishable message, or `None`.
|
||||
The DisplayHook objects must return a message from
|
||||
the __call__ method if they still require the
|
||||
`session.send` method to be called after transformation.
|
||||
Returning `None` will halt that execution path, and
|
||||
session.send will not be called.
|
||||
"""
|
||||
self._hooks.append(hook)
|
||||
|
||||
def unregister_hook(self, hook):
|
||||
"""
|
||||
Un-registers a hook with the thread-local storage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hook : Any callable object which has previously been
|
||||
registered as a hook.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool - `True` if the hook was removed, `False` if it wasn't
|
||||
found.
|
||||
"""
|
||||
try:
|
||||
self._hooks.remove(hook)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
@magics_class
|
||||
class KernelMagics(Magics):
|
||||
"""Kernel magics."""
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# Magic overrides
|
||||
# ------------------------------------------------------------------------
|
||||
# Once the base class stops inheriting from magic, this code needs to be
|
||||
# moved into a separate machinery as well. For now, at least isolate here
|
||||
# the magics which this class needs to implement differently from the base
|
||||
# class, or that are unique to it.
|
||||
|
||||
@line_magic
|
||||
def edit(self, parameter_s="", last_call=None):
|
||||
"""Bring up an editor and execute the resulting code.
|
||||
|
||||
Usage:
|
||||
%edit [options] [args]
|
||||
|
||||
%edit runs an external text editor. You will need to set the command for
|
||||
this editor via the ``TerminalInteractiveShell.editor`` option in your
|
||||
configuration file before it will work.
|
||||
|
||||
This command allows you to conveniently edit multi-line code right in
|
||||
your IPython session.
|
||||
|
||||
If called without arguments, %edit opens up an empty editor with a
|
||||
temporary file and will execute the contents of this file when you
|
||||
close it (don't forget to save it!).
|
||||
|
||||
Options:
|
||||
|
||||
-n <number>
|
||||
Open the editor at a specified line number. By default, the IPython
|
||||
editor hook uses the unix syntax 'editor +N filename', but you can
|
||||
configure this by providing your own modified hook if your favorite
|
||||
editor supports line-number specifications with a different syntax.
|
||||
|
||||
-p
|
||||
Call the editor with the same data as the previous time it was used,
|
||||
regardless of how long ago (in your current session) it was.
|
||||
|
||||
-r
|
||||
Use 'raw' input. This option only applies to input taken from the
|
||||
user's history. By default, the 'processed' history is used, so that
|
||||
magics are loaded in their transformed version to valid Python. If
|
||||
this option is given, the raw input as typed as the command line is
|
||||
used instead. When you exit the editor, it will be executed by
|
||||
IPython's own processor.
|
||||
|
||||
Arguments:
|
||||
|
||||
If arguments are given, the following possibilities exist:
|
||||
|
||||
- The arguments are numbers or pairs of colon-separated numbers (like
|
||||
1 4:8 9). These are interpreted as lines of previous input to be
|
||||
loaded into the editor. The syntax is the same of the %macro command.
|
||||
|
||||
- If the argument doesn't start with a number, it is evaluated as a
|
||||
variable and its contents loaded into the editor. You can thus edit
|
||||
any string which contains python code (including the result of
|
||||
previous edits).
|
||||
|
||||
- If the argument is the name of an object (other than a string),
|
||||
IPython will try to locate the file where it was defined and open the
|
||||
editor at the point where it is defined. You can use ``%edit function``
|
||||
to load an editor exactly at the point where 'function' is defined,
|
||||
edit it and have the file be executed automatically.
|
||||
|
||||
If the object is a macro (see %macro for details), this opens up your
|
||||
specified editor with a temporary file containing the macro's data.
|
||||
Upon exit, the macro is reloaded with the contents of the file.
|
||||
|
||||
Note: opening at an exact line is only supported under Unix, and some
|
||||
editors (like kedit and gedit up to Gnome 2.8) do not understand the
|
||||
'+NUMBER' parameter necessary for this feature. Good editors like
|
||||
(X)Emacs, vi, jed, pico and joe all do.
|
||||
|
||||
- If the argument is not found as a variable, IPython will look for a
|
||||
file with that name (adding .py if necessary) and load it into the
|
||||
editor. It will execute its contents with execfile() when you exit,
|
||||
loading any code in the file into your interactive namespace.
|
||||
|
||||
Unlike in the terminal, this is designed to use a GUI editor, and we do
|
||||
not know when it has closed. So the file you edit will not be
|
||||
automatically executed or printed.
|
||||
|
||||
Note that %edit is also available through the alias %ed.
|
||||
"""
|
||||
last_call = last_call or ["", ""]
|
||||
opts, args = self.parse_options(parameter_s, "prn:")
|
||||
|
||||
try:
|
||||
filename, lineno, _ = CodeMagics._find_edit_target(self.shell, args, opts, last_call)
|
||||
except MacroToEdit:
|
||||
# TODO: Implement macro editing over 2 processes.
|
||||
print("Macro editing not yet implemented in 2-process model.")
|
||||
return
|
||||
|
||||
# Make sure we send to the client an absolute path, in case the working
|
||||
# directory of client and kernel don't match
|
||||
filename = str(Path(filename).resolve())
|
||||
|
||||
payload = {"source": "edit_magic", "filename": filename, "line_number": lineno}
|
||||
assert self.shell is not None
|
||||
self.shell.payload_manager.write_payload(payload)
|
||||
|
||||
# A few magics that are adapted to the specifics of using pexpect and a
|
||||
# remote terminal
|
||||
|
||||
@line_magic
|
||||
def clear(self, arg_s):
|
||||
"""Clear the terminal."""
|
||||
assert self.shell is not None
|
||||
if os.name == "posix":
|
||||
self.shell.system("clear")
|
||||
else:
|
||||
self.shell.system("cls")
|
||||
|
||||
if os.name == "nt":
|
||||
# This is the usual name in windows
|
||||
cls = line_magic("cls")(clear)
|
||||
|
||||
# Terminal pagers won't work over pexpect, but we do have our own pager
|
||||
|
||||
@line_magic
|
||||
def less(self, arg_s):
|
||||
"""Show a file through the pager.
|
||||
|
||||
Files ending in .py are syntax-highlighted."""
|
||||
if not arg_s:
|
||||
msg = "Missing filename."
|
||||
raise UsageError(msg)
|
||||
|
||||
if arg_s.endswith(".py"):
|
||||
assert self.shell is not None
|
||||
cont = self.shell.pycolorize(openpy.read_py_file(arg_s, skip_encoding_cookie=False))
|
||||
else:
|
||||
with open(arg_s) as fid:
|
||||
cont = fid.read()
|
||||
page.page(cont)
|
||||
|
||||
more = line_magic("more")(less)
|
||||
|
||||
# Man calls a pager, so we also need to redefine it
|
||||
if os.name == "posix":
|
||||
|
||||
@line_magic
|
||||
def man(self, arg_s):
|
||||
"""Find the man page for the given command and display in pager."""
|
||||
assert self.shell is not None
|
||||
page.page(self.shell.getoutput("man %s | col -b" % arg_s, split=False))
|
||||
|
||||
@line_magic
|
||||
def connect_info(self, arg_s):
|
||||
"""Print information for connecting other clients to this kernel
|
||||
|
||||
It will print the contents of this session's connection file, as well as
|
||||
shortcuts for local clients.
|
||||
|
||||
In the simplest case, when called from the most recently launched kernel,
|
||||
secondary clients can be connected, simply with:
|
||||
|
||||
$> jupyter <app> --existing
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
connection_file = get_connection_file()
|
||||
info = get_connection_info(unpack=False)
|
||||
except Exception as e:
|
||||
warnings.warn("Could not get connection info: %r" % e, stacklevel=2)
|
||||
return
|
||||
|
||||
# if it's in the default dir, truncate to basename
|
||||
if jupyter_runtime_dir() == str(Path(connection_file).parent):
|
||||
connection_file = Path(connection_file).name
|
||||
|
||||
assert isinstance(info, str)
|
||||
print(info + "\n")
|
||||
print(
|
||||
f"Paste the above JSON into a file, and connect with:\n"
|
||||
f" $> jupyter <app> --existing <file>\n"
|
||||
f"or, if you are local, you can connect with just:\n"
|
||||
f" $> jupyter <app> --existing {connection_file}\n"
|
||||
f"or even just:\n"
|
||||
f" $> jupyter <app> --existing\n"
|
||||
f"if this is the most recent Jupyter kernel you have started."
|
||||
)
|
||||
|
||||
@line_magic
|
||||
def qtconsole(self, arg_s):
|
||||
"""Open a qtconsole connected to this kernel.
|
||||
|
||||
Useful for connecting a qtconsole to running notebooks, for better
|
||||
debugging.
|
||||
"""
|
||||
|
||||
# %qtconsole should imply bind_kernel for engines:
|
||||
# FIXME: move to ipyparallel Kernel subclass
|
||||
if "ipyparallel" in sys.modules:
|
||||
from ipyparallel import bind_kernel
|
||||
|
||||
bind_kernel()
|
||||
|
||||
try:
|
||||
connect_qtconsole(argv=arg_split(arg_s, os.name == "posix"))
|
||||
except Exception as e:
|
||||
warnings.warn("Could not start qtconsole: %r" % e, stacklevel=2)
|
||||
return
|
||||
|
||||
@line_magic
|
||||
def autosave(self, arg_s):
|
||||
"""Set the autosave interval in the notebook (in seconds).
|
||||
|
||||
The default value is 120, or two minutes.
|
||||
``%autosave 0`` will disable autosave.
|
||||
|
||||
This magic only has an effect when called from the notebook interface.
|
||||
It has no effect when called in a startup file.
|
||||
"""
|
||||
|
||||
try:
|
||||
interval = int(arg_s)
|
||||
except ValueError as e:
|
||||
raise UsageError("%%autosave requires an integer, got %r" % arg_s) from e
|
||||
|
||||
# javascript wants milliseconds
|
||||
milliseconds = 1000 * interval
|
||||
display(
|
||||
Javascript("IPython.notebook.set_autosave_interval(%i)" % milliseconds),
|
||||
include=["application/javascript"],
|
||||
)
|
||||
if interval:
|
||||
print("Autosaving every %i seconds" % interval)
|
||||
else:
|
||||
print("Autosave disabled")
|
||||
|
||||
|
||||
class ZMQInteractiveShell(InteractiveShell):
|
||||
"""A subclass of InteractiveShell for ZMQ."""
|
||||
|
||||
displayhook_class = Type(ZMQShellDisplayHook)
|
||||
display_pub_class = Type(ZMQDisplayPublisher)
|
||||
data_pub_class = Any() # type:ignore[assignment]
|
||||
kernel = Any()
|
||||
parent_header = Any()
|
||||
|
||||
@default("banner1")
|
||||
def _default_banner1(self):
|
||||
return default_banner
|
||||
|
||||
# Override the traitlet in the parent class, because there's no point using
|
||||
# readline for the kernel. Can be removed when the readline code is moved
|
||||
# to the terminal frontend.
|
||||
readline_use = CBool(False)
|
||||
# autoindent has no meaning in a zmqshell, and attempting to enable it
|
||||
# will print a warning in the absence of readline.
|
||||
autoindent = CBool(False)
|
||||
|
||||
exiter = Instance(ZMQExitAutocall)
|
||||
|
||||
@default("exiter")
|
||||
def _default_exiter(self):
|
||||
return ZMQExitAutocall(self)
|
||||
|
||||
@observe("exit_now")
|
||||
def _update_exit_now(self, change):
|
||||
"""stop eventloop when exit_now fires"""
|
||||
if change["new"]:
|
||||
if hasattr(self.kernel, "io_loop"):
|
||||
loop = self.kernel.io_loop
|
||||
loop.call_later(0.1, loop.stop)
|
||||
if self.kernel.eventloop:
|
||||
exit_hook = getattr(self.kernel.eventloop, "exit_hook", None)
|
||||
if exit_hook:
|
||||
exit_hook(self.kernel)
|
||||
|
||||
keepkernel_on_exit = None
|
||||
|
||||
# Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no
|
||||
# interactive input being read; we provide event loop support in ipkernel
|
||||
def enable_gui(self, gui):
|
||||
"""Enable a given guil."""
|
||||
from .eventloops import enable_gui as real_enable_gui
|
||||
|
||||
try:
|
||||
real_enable_gui(gui)
|
||||
self.active_eventloop = gui
|
||||
except ValueError as e:
|
||||
raise UsageError("%s" % e) from e
|
||||
|
||||
def init_environment(self):
|
||||
"""Configure the user's environment."""
|
||||
env = os.environ
|
||||
# These two ensure 'ls' produces nice coloring on BSD-derived systems
|
||||
env["TERM"] = "xterm-color"
|
||||
env["CLICOLOR"] = "1"
|
||||
# These two add terminal color in tools that support it.
|
||||
env["FORCE_COLOR"] = "1"
|
||||
env["CLICOLOR_FORCE"] = "1"
|
||||
# Since normal pagers don't work at all (over pexpect we don't have
|
||||
# single-key control of the subprocess), try to disable paging in
|
||||
# subprocesses as much as possible.
|
||||
env["PAGER"] = "cat"
|
||||
env["GIT_PAGER"] = "cat"
|
||||
|
||||
def init_hooks(self):
|
||||
"""Initialize hooks."""
|
||||
super().init_hooks()
|
||||
self.set_hook("show_in_pager", page.as_hook(payloadpage.page), 99)
|
||||
|
||||
def init_data_pub(self):
|
||||
"""Delay datapub init until request, for deprecation warnings"""
|
||||
|
||||
@property
|
||||
def data_pub(self):
|
||||
if not hasattr(self, "_data_pub"):
|
||||
warnings.warn(
|
||||
"InteractiveShell.data_pub is deprecated outside IPython parallel.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
self._data_pub = self.data_pub_class(parent=self) # type:ignore[has-type]
|
||||
self._data_pub.session = self.display_pub.session # type:ignore[attr-defined]
|
||||
self._data_pub.pub_socket = self.display_pub.pub_socket # type:ignore[attr-defined]
|
||||
return self._data_pub
|
||||
|
||||
@data_pub.setter
|
||||
def data_pub(self, pub):
|
||||
self._data_pub = pub
|
||||
|
||||
def ask_exit(self):
|
||||
"""Engage the exit actions."""
|
||||
self.exit_now = not self.keepkernel_on_exit
|
||||
payload = dict(
|
||||
source="ask_exit",
|
||||
keepkernel=self.keepkernel_on_exit,
|
||||
)
|
||||
self.payload_manager.write_payload(payload) # type:ignore[union-attr]
|
||||
|
||||
def run_cell(self, *args, **kwargs):
|
||||
"""Run a cell."""
|
||||
self._last_traceback = None
|
||||
return super().run_cell(*args, **kwargs)
|
||||
|
||||
def _showtraceback(self, etype, evalue, stb):
|
||||
# try to preserve ordering of tracebacks and print statements
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
exc_content = {
|
||||
"traceback": stb,
|
||||
"ename": str(etype.__name__),
|
||||
"evalue": str(evalue),
|
||||
}
|
||||
|
||||
dh = self.displayhook
|
||||
# Send exception info over pub socket for other clients than the caller
|
||||
# to pick up
|
||||
topic = None
|
||||
if dh.topic: # type:ignore[attr-defined]
|
||||
topic = dh.topic.replace(b"execute_result", b"error") # type:ignore[attr-defined]
|
||||
|
||||
dh.session.send( # type:ignore[attr-defined]
|
||||
dh.pub_socket, # type:ignore[attr-defined]
|
||||
"error",
|
||||
json_clean(exc_content),
|
||||
dh.parent_header, # type:ignore[attr-defined]
|
||||
ident=topic,
|
||||
)
|
||||
|
||||
# FIXME - Once we rely on Python 3, the traceback is stored on the
|
||||
# exception object, so we shouldn't need to store it here.
|
||||
self._last_traceback = stb
|
||||
|
||||
def set_next_input(self, text, replace=False):
|
||||
"""Send the specified text to the frontend to be presented at the next
|
||||
input cell."""
|
||||
payload = dict(
|
||||
source="set_next_input",
|
||||
text=text,
|
||||
replace=replace,
|
||||
)
|
||||
self.payload_manager.write_payload(payload) # type:ignore[union-attr]
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent header for associating output with its triggering input"""
|
||||
self.parent_header = parent
|
||||
self.displayhook.set_parent(parent) # type:ignore[attr-defined]
|
||||
self.display_pub.set_parent(parent) # type:ignore[attr-defined]
|
||||
if hasattr(self, "_data_pub"):
|
||||
self.data_pub.set_parent(parent)
|
||||
try:
|
||||
sys.stdout.set_parent(parent) # type:ignore[attr-defined]
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
sys.stderr.set_parent(parent) # type:ignore[attr-defined]
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def get_parent(self):
|
||||
"""Get the parent header."""
|
||||
return self.parent_header
|
||||
|
||||
def init_magics(self):
|
||||
"""Initialize magics."""
|
||||
super().init_magics()
|
||||
self.register_magics(KernelMagics)
|
||||
self.magics_manager.register_alias("ed", "edit") # type:ignore[union-attr]
|
||||
|
||||
def init_virtualenv(self):
|
||||
"""Initialize virtual environment."""
|
||||
# Overridden not to do virtualenv detection, because it's probably
|
||||
# not appropriate in a kernel. To use a kernel in a virtualenv, install
|
||||
# it inside the virtualenv.
|
||||
# https://ipython.readthedocs.io/en/latest/install/kernel_install.html
|
||||
|
||||
def system_piped(self, cmd):
|
||||
"""Call the given cmd in a subprocess, piping stdout/err
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cmd : str
|
||||
Command to execute (can not end in '&', as background processes are
|
||||
not supported. Should not be a command that expects input
|
||||
other than simple text.
|
||||
"""
|
||||
if cmd.rstrip().endswith("&"):
|
||||
# this is *far* from a rigorous test
|
||||
# We do not support backgrounding processes because we either use
|
||||
# pexpect or pipes to read from. Users can always just call
|
||||
# os.system() or use ip.system=ip.system_raw
|
||||
# if they really want a background process.
|
||||
msg = "Background processes not supported."
|
||||
raise OSError(msg)
|
||||
|
||||
# we explicitly do NOT return the subprocess status code, because
|
||||
# a non-None value would trigger :func:`sys.displayhook` calls.
|
||||
# Instead, we store the exit_code in user_ns.
|
||||
# Also, protect system call from UNC paths on Windows here too
|
||||
# as is done in InteractiveShell.system_raw
|
||||
if sys.platform == "win32":
|
||||
cmd = self.var_expand(cmd, depth=1)
|
||||
from IPython.utils._process_win32 import AvoidUNCPath
|
||||
|
||||
with AvoidUNCPath() as path:
|
||||
if path is not None:
|
||||
cmd = f"pushd {path} &&{cmd}"
|
||||
self.user_ns["_exit_code"] = system(cmd)
|
||||
else:
|
||||
self.user_ns["_exit_code"] = system(self.var_expand(cmd, depth=1))
|
||||
|
||||
# Ensure new system_piped implementation is used
|
||||
system = system_piped
|
||||
|
||||
|
||||
InteractiveShellABC.register(ZMQInteractiveShell)
|
||||
Reference in New Issue
Block a user