[Python-modules-commits] [jupyter-client] 01/09: Import jupyter-client_4.2.2.orig.tar.gz

Julien Cristau jcristau at moszumanska.debian.org
Fri Apr 8 10:37:53 UTC 2016


This is an automated email from the git hooks/post-receive script.

jcristau pushed a commit to branch master
in repository jupyter-client.

commit 40e1acb3ecf4c8be857491a1ad1d36acabb50c43
Author: Julien Cristau <julien.cristau at logilab.fr>
Date:   Fri Apr 8 12:11:19 2016 +0200

    Import jupyter-client_4.2.2.orig.tar.gz
---
 PKG-INFO                                   |  2 +-
 docs/api/kernelspec.rst                    |  2 +
 docs/changelog.rst                         | 32 +++++++++++
 docs/messaging.rst                         | 77 +++++++++------------------
 docs/wrapperkernels.rst                    |  5 +-
 jupyter_client/_version.py                 |  2 +-
 jupyter_client/blocking/client.py          | 48 +++++++++++++++--
 jupyter_client/channels.py                 |  4 +-
 jupyter_client/client.py                   |  9 +++-
 jupyter_client/connect.py                  | 39 +++++++++++---
 jupyter_client/consoleapp.py               | 19 ++++---
 jupyter_client/kernelspec.py               | 85 +++++++++++++++++++++++++-----
 jupyter_client/kernelspecapp.py            | 70 ++++++++++++++++++++++--
 jupyter_client/launcher.py                 | 36 ++++++++-----
 jupyter_client/localinterfaces.py          |  7 ++-
 jupyter_client/manager.py                  | 14 +++--
 jupyter_client/multikernelmanager.py       |  7 +--
 jupyter_client/session.py                  | 18 +++++--
 jupyter_client/tests/signalkernel.py       | 70 ++++++++++++++++++++++++
 jupyter_client/tests/test_connect.py       | 24 ++++++++-
 jupyter_client/tests/test_kernelmanager.py | 80 +++++++++++++++++++++++++++-
 jupyter_client/tests/test_kernelspec.py    | 44 +++++++++++-----
 jupyter_client/tests/utils.py              | 56 ++++++++++++++++++++
 23 files changed, 615 insertions(+), 135 deletions(-)

diff --git a/PKG-INFO b/PKG-INFO
index 63d8d52..ec859df 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: jupyter_client
-Version: 4.1.1
+Version: 4.2.2
 Summary: Jupyter protocol implementation and client libraries
 Home-page: http://jupyter.org
 Author: Jupyter Development Team
diff --git a/docs/api/kernelspec.rst b/docs/api/kernelspec.rst
index fcb0946..c2164ea 100644
--- a/docs/api/kernelspec.rst
+++ b/docs/api/kernelspec.rst
@@ -35,6 +35,8 @@ kernelspec - discovering kernels
 
    .. automethod:: find_kernel_specs
 
+   .. automethod:: get_all_specs
+
    .. automethod:: get_kernel_spec
 
    .. automethod:: install_kernel_spec
diff --git a/docs/changelog.rst b/docs/changelog.rst
index 8b38439..48e9652 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -4,6 +4,38 @@
 Changes in Jupyter Client
 =========================
 
+4.2
+===
+
+4.2.2
+-----
+
+`4.2.2 on GitHub <https://github.com/jupyter/jupyter_client/milestones/4.2.2>`__
+
+- Another fix for the :func:`start_new_kernel` issue in 4.2.1 affecting slow-starting kernels.
+
+
+4.2.1
+-----
+
+`4.2.1 on GitHub <https://github.com/jupyter/jupyter_client/milestones/4.2.1>`__
+
+- Fix regression in 4.2 causing :func:`start_new_kernel`
+  to fail while waiting for kernels to become available.
+
+
+4.2.0
+-----
+
+`4.2.0 on GitHub <https://github.com/jupyter/jupyter_client/milestones/4.2>`__
+
+- added :command:`jupyter kernelspec remove` for removing kernelspecs
+- allow specifying the environment for kernel processes via the ``env`` argument
+- added ``name`` field to connection files identifying the kernelspec name,
+  so that consumers of connection files (alternate frontends) can identify the kernelspec in use
+- added :meth:`KernelSpecManager.get_all_specs` for getting all kernelspecs more efficiently
+- various improvements to error messages and documentation
+
 4.1
 ===
 
diff --git a/docs/messaging.rst b/docs/messaging.rst
index 1a3d23c..6725666 100644
--- a/docs/messaging.rst
+++ b/docs/messaging.rst
@@ -75,6 +75,9 @@ kernel has four sockets that serve the following functions:
 4. **Control**: This channel is identical to Shell, but operates on a separate socket,
    to allow important messages to avoid queueing behind execution requests (e.g. shutdown or abort).
 
+5. **Heartbeat**: This socket allows for simple bytestring messages to be sent
+    between the frontend and the kernel to ensure that they are still connected.
+
 The actual format of the messages allowed on each of these channels is
 specified below.  Messages are dicts of dicts with string keys and values that
 are reasonably representable in JSON.  Our current implementation uses JSON
@@ -104,9 +107,9 @@ A message is defined by the following four-dictionary structure::
       # same kernel simultaneously, so that frontends can label the various
       # messages in a meaningful way.
       'header' : {
-                    'msg_id' : uuid,
+                    'msg_id' : str, # typically UUID, must be unique per message
                     'username' : str,
-                    'session' : uuid,
+                    'session' : str, # typically UUID, should be unique per session
                     # ISO 8601 timestamp for when the message is created
                     'date': str,
                     # All recognized message type strings are listed below.
@@ -181,7 +184,7 @@ which is the topic for IOPub subscribers, e.g. ``execute_result``, ``display_dat
     In most cases, the IOPub topics are irrelevant and completely ignored,
     because frontends just subscribe to all topics.
     The convention used in the IPython kernel is to use the msg_type as the topic,
-    and possibly extra information about the message, e.g. ``execute_result`` or ``stream.stdout``
+    and possibly extra information about the message, e.g. ``kernel.{u-u-i-d}.execute_result`` or ``stream.stdout``
 
 After the delimiter is the `HMAC`_ signature of the message, used for authentication.
 If authentication is disabled, this should be an empty string.
@@ -223,11 +226,12 @@ The default and most common serialization is JSON, but msgpack and pickle
 are common alternatives.
 
 After the serialized dicts are zero to many raw data buffers,
-which can be used by message types that support binary data (mainly apply and data_pub).
+which can be used by message types that support binary data,
+which can be used in custom messages, such as comms and extensions to the protocol.
 
 
-Python functional API
-=====================
+Python API
+==========
 
 As messages are dicts, they map naturally to a ``func(**kw)`` call form.  We
 should develop, at a few key points, functional forms of all the requests that
@@ -241,7 +245,7 @@ messages upon deserialization to the following form for convenience::
       'header' : dict,
       # The msg's unique identifier and type are always stored in the header,
       # but the Python implementation copies them to the top level.
-      'msg_id' : uuid,
+      'msg_id' : str,
       'msg_type' : str,
       'parent_header' : dict,
       'content' : dict,
@@ -361,7 +365,7 @@ Message type: ``execute_reply``::
       # One of: 'ok' OR 'error' OR 'abort'
       'status' : str,
 
-      # The global kernel counter that increases by one with each request that 
+      # The global kernel counter that increases by one with each request that
       # stores history.  This will typically be used by clients to display
       # prompt numbers to the user.  If the request did not store history, this will
       # be the current value of the counter in the kernel.
@@ -777,7 +781,7 @@ Message type: ``kernel_info_reply``::
 
         # Information about the language of code for the kernel
         'language_info': {
-            # Name of the programming language in which kernel is implemented.
+            # Name of the programming language that the kernel implements.
             # Kernel included in IPython returns 'python'.
             'name': str,
 
@@ -858,7 +862,8 @@ multiple cases:
 
 The client sends a shutdown request to the kernel, and once it receives the
 reply message (which is otherwise empty), it can assume that the kernel has
-completed shutdown safely.
+completed shutdown safely.  The request can be sent on either the `control` or
+`shell` channels.
 
 Upon their own shutdown, client applications will typically execute a last
 minute sanity check and forcefully terminate any kernel that is still alive, to
@@ -867,13 +872,13 @@ avoid leaving stray processes in the user's machine.
 Message type: ``shutdown_request``::
 
     content = {
-        'restart' : bool # whether the shutdown is final, or precedes a restart
+        'restart' : bool # False if final shutdown, or True if shutdown precedes a restart
     }
 
 Message type: ``shutdown_reply``::
 
     content = {
-        'restart' : bool # whether the shutdown is final, or precedes a restart
+        'restart' : bool # False if final shutdown, or True if shutdown precedes a restart
     }
 
 .. Note::
@@ -926,7 +931,8 @@ Message type: ``display_data``::
     content = {
 
         # Who create the data
-        'source' : str,
+        # Used in V4. Removed in V5.
+        # 'source' : str,
 
         # The data dict contains key/value pairs, where the keys are MIME
         # types and the values are the raw data of the representation in that
@@ -962,42 +968,6 @@ of images::
     not double-serialized as a JSON string.
 
 
-Raw Data Publication
---------------------
-
-``display_data`` lets you publish *representations* of data, such as images and html.
-This ``data_pub`` message lets you publish *actual raw data*, sent via message buffers.
-
-data_pub messages are constructed via the :func:`IPython.lib.datapub.publish_data` function:
-
-.. sourcecode:: python
-
-    from IPython.kernel.zmq.datapub import publish_data
-    ns = dict(x=my_array)
-    publish_data(ns)
-
-
-Message type: ``data_pub``::
-
-    content = {
-        # the keys of the data dict, after it has been unserialized
-        'keys' : ['a', 'b']
-    }
-    # the namespace dict will be serialized in the message buffers,
-    # which will have a length of at least one
-    buffers = [b'pdict', ...]
-
-
-The interpretation of a sequence of data_pub messages for a given parent request should be
-to update a single namespace with subsequent results.
-
-.. note::
-
-    No frontends directly handle data_pub messages at this time.
-    It is currently only used by the client/engines in :mod:`IPython.parallel`,
-    where engines may publish *data* to the Client,
-    of which the Client can then publish *representations* via ``display_data``
-    to various frontends.
 
 Code inputs
 -----------
@@ -1105,7 +1075,7 @@ Message type: ``clear_output``::
     content = {
 
         # Wait to clear the output until new output is available.  Clears the
-        # existing output immediately before the new output is displayed.  
+        # existing output immediately before the new output is displayed.
         # Useful for creating simple animations with minimal flickering.
         'wait' : bool,
     }
@@ -1201,7 +1171,9 @@ Opening a Comm produces a ``comm_open`` message, to be sent to the other side::
     {
       'comm_id' : 'u-u-i-d',
       'target_name' : 'my_comm',
-      'data' : {}
+      'data' : {},
+      # Optional, the target module
+      'target_module': 'my_module',
     }
 
 Every Comm has an ID and a target name.
@@ -1214,6 +1186,9 @@ The ``data`` key is always a dict and can be any extra JSON information used in
 If the ``target_name`` key is not found on the receiving side,
 then it should immediately reply with a ``comm_close`` message to avoid an inconsistent state.
 
+The optional ``target_module`` is used to select a module that is responsible
+for handling the ``target_name``.
+
 Comm Messages
 -------------
 
diff --git a/docs/wrapperkernels.rst b/docs/wrapperkernels.rst
index 8a70b90..886506b 100644
--- a/docs/wrapperkernels.rst
+++ b/docs/wrapperkernels.rst
@@ -36,7 +36,8 @@ following methods and attributes:
 
      Language information for :ref:`msging_kernel_info` replies, in a dictionary.
      This should contain the key ``mimetype`` with the mimetype of code in the
-     target language (e.g. ``'text/x-python'``), and ``file_extension`` (e.g.
+     target language (e.g. ``'text/x-python'``), the ``name`` of the language
+     being implemented (e.g. ``'python'``), and ``file_extension`` (e.g.
      ``'py'``).
      It may also contain keys ``codemirror_mode`` and ``pygments_lexer`` if they
      need to differ from :attr:`language`.
@@ -65,7 +66,7 @@ following methods and attributes:
 To launch your kernel, add this at the end of your module::
 
     if __name__ == '__main__':
-        from ipyernel.kernelapp import IPKernelApp
+        from ipykernel.kernelapp import IPKernelApp
         IPKernelApp.launch_instance(kernel_class=MyKernel)
 
 Now create a `JSON kernel spec file <http://jupyter-client.readthedocs.org/en/latest/kernels.html#kernel-specs>`_ and install it using ``jupyter kernelspec install </path/to/kernel>``. Place your kernel module anywhere Python can import it (try current directory for testing). Finally, you can run your kernel using ``jupyter console --kernel <mykernelname>``. Note that ``<mykernelname>`` in the below example is ``echo``. 
diff --git a/jupyter_client/_version.py b/jupyter_client/_version.py
index 3816cc1..c6ae7dd 100644
--- a/jupyter_client/_version.py
+++ b/jupyter_client/_version.py
@@ -1,4 +1,4 @@
-version_info = (4, 1, 1)
+version_info = (4, 2, 2)
 __version__ = '.'.join(map(str, version_info))
 
 protocol_version_info = (5, 0)
diff --git a/jupyter_client/blocking/client.py b/jupyter_client/blocking/client.py
index 67d6f62..37e83db 100644
--- a/jupyter_client/blocking/client.py
+++ b/jupyter_client/blocking/client.py
@@ -9,20 +9,58 @@ try:
     from queue import Empty  # Python 3
 except ImportError:
     from Queue import Empty  # Python 2
+import time
 
 from traitlets import Type
 from jupyter_client.channels import HBChannel
 from jupyter_client.client import KernelClient
 from .channels import ZMQSocketChannel
 
+
 class BlockingKernelClient(KernelClient):
-    def wait_for_ready(self):
+    """A BlockingKernelClient """
+    
+    def wait_for_ready(self, timeout=None):
+        """Waits for a response when a client is blocked
+        
+        - Sets future time for timeout
+        - Blocks on shell channel until a message is received
+        - Exit if the kernel has died
+        - If client times out before receiving a message from the kernel, send RuntimeError
+        - Flush the IOPub channel
+        """
+        if timeout is None:
+            abs_timeout = float('inf')
+        else:
+            abs_timeout = time.time() + timeout
+
+        from ..manager import KernelManager
+        if not isinstance(self.parent, KernelManager):
+            # This Client was not created by a KernelManager,
+            # so wait for kernel to become responsive to heartbeats
+            # before checking for kernel_info reply
+            while not self.is_alive():
+                if time.time() > abs_timeout:
+                    raise RuntimeError("Kernel didn't respond to heartbeats in %d seconds and timed out" % timeout)
+                time.sleep(0.2)
+
         # Wait for kernel info reply on shell channel
         while True:
-            msg = self.shell_channel.get_msg(block=True)
-            if msg['msg_type'] == 'kernel_info_reply':
-                self._handle_kernel_info_reply(msg)
-                break
+            try:
+                msg = self.shell_channel.get_msg(block=True, timeout=1)
+            except Empty:
+                pass
+            else:
+                if msg['msg_type'] == 'kernel_info_reply':
+                    self._handle_kernel_info_reply(msg)
+                    break
+
+            if not self.is_alive():
+                raise RuntimeError('Kernel died before replying to kernel_info')
+
+            # Check if current time is ready check time plus timeout
+            if time.time() > abs_timeout:
+                raise RuntimeError("Kernel didn't respond in %d seconds" % timeout)
 
         # Flush IOPub channel
         while True:
diff --git a/jupyter_client/channels.py b/jupyter_client/channels.py
index 0a40646..d7b0836 100644
--- a/jupyter_client/channels.py
+++ b/jupyter_client/channels.py
@@ -72,8 +72,10 @@ class HBChannel(Thread):
         self.address = address
         atexit.register(self._notice_exit)
 
+        # running is False until `.start()` is called
         self._running = False
-        self._pause = True
+        # don't start paused
+        self._pause = False
         self.poller = zmq.Poller()
 
     def _notice_exit(self):
diff --git a/jupyter_client/client.py b/jupyter_client/client.py
index 62a5439..6b47f7d 100644
--- a/jupyter_client/client.py
+++ b/jupyter_client/client.py
@@ -181,9 +181,14 @@ class KernelClient(ConnectionFileMixin):
 
     def is_alive(self):
         """Is the kernel process still running?"""
+        from .manager import KernelManager
+        if isinstance(self.parent, KernelManager):
+            # This KernelClient was created by a KernelManager,
+            # we can ask the parent KernelManager:
+            return self.parent.is_alive()
         if self._hb_channel is not None:
-            # We didn't start the kernel with this KernelManager so we
-            # use the heartbeat.
+            # We don't have access to the KernelManager,
+            # so we use the heartbeat.
             return self._hb_channel.is_beating()
         else:
             # no heartbeat and not local, we can't tell if it's running,
diff --git a/jupyter_client/connect.py b/jupyter_client/connect.py
index 09238dc..f30bff3 100644
--- a/jupyter_client/connect.py
+++ b/jupyter_client/connect.py
@@ -33,7 +33,7 @@ from jupyter_core.paths import jupyter_data_dir, jupyter_runtime_dir
 
 def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0, hb_port=0,
                          control_port=0, ip='', key=b'', transport='tcp',
-                         signature_scheme='hmac-sha256',
+                         signature_scheme='hmac-sha256', kernel_name=''
                          ):
     """Generates a JSON config file, including the selection of random ports.
 
@@ -72,6 +72,8 @@ def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0,
         Currently, 'hmac' is the only supported digest scheme,
         and 'sha256' is the default hash function.
 
+    kernel_name : str, optional
+        The name of the kernel currently connected to.
     """
     if not ip:
         ip = localhost()
@@ -93,7 +95,7 @@ def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0,
             sock = socket.socket()
             # struct.pack('ii', (0,0)) is 8 null bytes
             sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b'\0' * 8)
-            sock.bind(('', 0))
+            sock.bind((ip, 0))
             ports.append(sock)
         for i, sock in enumerate(ports):
             port = sock.getsockname()[1]
@@ -127,6 +129,7 @@ def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0,
     cfg['key'] = bytes_to_str(key)
     cfg['transport'] = transport
     cfg['signature_scheme'] = signature_scheme
+    cfg['kernel_name'] = kernel_name
 
     with open(fname, 'w') as f:
         f.write(json.dumps(cfg, indent=2))
@@ -320,9 +323,22 @@ class ConnectionFileMixin(LoggingConfigurable):
     # Connection and ipc file management
     #--------------------------------------------------------------------------
 
-    def get_connection_info(self):
-        """return the connection info as a dict"""
-        return dict(
+    def get_connection_info(self, session=False):
+        """Return the connection info as a dict
+
+        Parameters
+        ----------
+        session : bool [default: False]
+            If True, return our session object will be included in the connection info.
+            If False (default), the configuration parameters of our session object will be included,
+            rather than the session object itself.
+
+        Returns
+        -------
+        connect_info : dict
+            dictionary of connection information.
+        """
+        info = dict(
             transport=self.transport,
             ip=self.ip,
             shell_port=self.shell_port,
@@ -330,9 +346,17 @@ class ConnectionFileMixin(LoggingConfigurable):
             stdin_port=self.stdin_port,
             hb_port=self.hb_port,
             control_port=self.control_port,
-            signature_scheme=self.session.signature_scheme,
-            key=self.session.key,
         )
+        if session:
+            # add session
+            info['session'] = self.session
+        else:
+            # add session info
+            info.update(dict(
+                signature_scheme=self.session.signature_scheme,
+                key=self.session.key,
+            ))
+        return info
 
     # factory for blocking clients
     blocking_class = Type(klass=object, default_value='jupyter_client.BlockingKernelClient')
@@ -379,6 +403,7 @@ class ConnectionFileMixin(LoggingConfigurable):
             shell_port=self.shell_port, hb_port=self.hb_port,
             control_port=self.control_port,
             signature_scheme=self.session.signature_scheme,
+            kernel_name=self.kernel_name
         )
         # write_connection_file also sets default ports:
         for name in port_names:
diff --git a/jupyter_client/consoleapp.py b/jupyter_client/consoleapp.py
index dc2bb2c..2c57840 100644
--- a/jupyter_client/consoleapp.py
+++ b/jupyter_client/consoleapp.py
@@ -24,6 +24,7 @@ from traitlets import (
 from jupyter_core.application import base_flags, base_aliases
 
 from .blocking import BlockingKernelClient
+from .restarter import KernelRestarter
 from . import KernelManager, tunnel_to_kernel, find_connection_file, connect
 from .kernelspec import NoSuchKernel
 from .session import Session
@@ -47,11 +48,15 @@ app_flags = {
 }
 app_flags.update(boolean_flag(
     'confirm-exit', 'JupyterConsoleApp.confirm_exit',
-    """Set to display confirmation dialog on exit. You can always use 'exit' or 'quit',
-       to force a direct exit without any confirmation.
+    """Set to display confirmation dialog on exit. You can always use 'exit' or
+       'quit', to force a direct exit without any confirmation. This can also
+       be set in the config file by setting
+       `c.JupyterConsoleApp.confirm_exit`.
     """,
     """Don't prompt the user when exiting. This will terminate the kernel
        if it is owned by the frontend, and leave it alive if it is external.
+       This can also be set in the config file by setting
+       `c.JupyterConsoleApp.confirm_exit`.
     """
 ))
 flags.update(app_flags)
@@ -80,7 +85,7 @@ aliases.update(app_aliases)
 # Classes
 #-----------------------------------------------------------------------------
 
-classes = [KernelManager, Session]
+classes = [KernelManager, KernelRestarter, Session]
 
 class JupyterConsoleApp(ConnectionFileMixin):
     name = 'jupyter-console-mixin'
@@ -230,11 +235,11 @@ class JupyterConsoleApp(ConnectionFileMixin):
         self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports
         
         cf = self.connection_file
-        base,ext = os.path.splitext(cf)
-        base = os.path.basename(base)
-        self.connection_file = os.path.basename(base)+'-ssh'+ext
+        root, ext = os.path.splitext(cf)
+        self.connection_file = root + '-ssh' + ext
+        self.write_connection_file() # write the new connection file
         self.log.info("To connect another client via this tunnel, use:")
-        self.log.info("--existing %s" % self.connection_file)
+        self.log.info("--existing %s" % os.path.basename(self.connection_file))
     
     def _new_connection_file(self):
         cf = ''
diff --git a/jupyter_client/kernelspec.py b/jupyter_client/kernelspec.py
index 5232834..0065366 100644
--- a/jupyter_client/kernelspec.py
+++ b/jupyter_client/kernelspec.py
@@ -12,7 +12,7 @@ import warnings
 pjoin = os.path.join
 
 from ipython_genutils.py3compat import PY3
-from traitlets import HasTraits, List, Unicode, Dict, Set
+from traitlets import HasTraits, List, Unicode, Dict, Set, Bool, Type
 from traitlets.config import LoggingConfigurable
 
 from jupyter_core.paths import jupyter_data_dir, jupyter_path, SYSTEM_JUPYTER_PATH
@@ -72,7 +72,23 @@ class NoSuchKernel(KeyError):
     def __init__(self, name):
         self.name = name
 
+    def __str__(self):
+        return "No such kernel named {}".format(self.name)
+
 class KernelSpecManager(LoggingConfigurable):
+
+    kernel_spec_class = Type(KernelSpec, config=True,
+        help="""The kernel spec class.  This is configurable to allow
+        subclassing of the KernelSpecManager for customized behavior.
+        """
+    )
+
+    ensure_native_kernel = Bool(True, config=True,
+        help="""If there is no Python kernelspec registered and the IPython
+        kernel is available, ensure it is added to the spec list.
+        """
+    )
+
     data_dir = Unicode()
     def _data_dir_default(self):
         return jupyter_data_dir()
@@ -116,7 +132,7 @@ class KernelSpecManager(LoggingConfigurable):
                     self.log.debug("Found kernel %s in %s", kname, kernel_dir)
                     d[kname] = spec
 
-        if NATIVE_KERNEL_NAME not in d:
+        if self.ensure_native_kernel and NATIVE_KERNEL_NAME not in d:
             try:
                 from ipykernel.kernelspec import RESOURCES
                 self.log.debug("Native kernel (%s) available from %s",
@@ -131,6 +147,22 @@ class KernelSpecManager(LoggingConfigurable):
         return d
         # TODO: Caching?
 
+    def _get_kernel_spec_by_name(self, kernel_name, resource_dir):
+        """ Returns a :class:`KernelSpec` instance for a given kernel_name
+        and resource_dir.
+        """
+        if kernel_name == NATIVE_KERNEL_NAME:
+            try:
+                from ipykernel.kernelspec import RESOURCES, get_kernel_dict
+            except ImportError:
+                # It should be impossible to reach this, but let's play it safe
+                pass
+            else:
+                if resource_dir == RESOURCES:
+                    return self.kernel_spec_class(resource_dir=resource_dir, **get_kernel_dict())
+
+        return self.kernel_spec_class.from_resource_dir(resource_dir)
+
     def get_kernel_spec(self, kernel_name):
         """Returns a :class:`KernelSpec` instance for the given kernel_name.
 
@@ -142,17 +174,45 @@ class KernelSpecManager(LoggingConfigurable):
         except KeyError:
             raise NoSuchKernel(kernel_name)
 
-        if kernel_name == NATIVE_KERNEL_NAME:
-            try:
-                from ipykernel.kernelspec import RESOURCES, get_kernel_dict
-            except ImportError:
-                # It should be impossible to reach this, but let's play it safe
-                pass
-            else:
-                if resource_dir == RESOURCES:
-                    return KernelSpec(resource_dir=resource_dir, **get_kernel_dict())
+        return self._get_kernel_spec_by_name(kernel_name, resource_dir)
 
-        return KernelSpec.from_resource_dir(resource_dir)
+    def get_all_specs(self):
+        """Returns a dict mapping kernel names to kernelspecs.
+
+        Returns a dict of the form::
+
+            {
+              'kernel_name': {
+                'resource_dir': '/path/to/kernel_name',
+                'spec': {"the spec itself": ...}
+              },
+              ...
+            }
+        """
+        d = self.find_kernel_specs()
+        return {kname: {
+                "resource_dir": d[kname],
+                "spec": self._get_kernel_spec_by_name(kname, d[kname]).to_dict()
+                } for kname in d}
+
+    def remove_kernel_spec(self, name):
+        """Remove a kernel spec directory by name.
+        
+        Returns the path that was deleted.
+        """
+        save_native = self.ensure_native_kernel
+        try:
+            self.ensure_native_kernel = False
+            specs = self.find_kernel_specs()
+        finally:
+            self.ensure_native_kernel = save_native
+        spec_dir = specs[name]
+        self.log.debug("Removing %s", spec_dir)
+        if os.path.islink(spec_dir):
+            os.remove(spec_dir)
+        else:
+            shutil.rmtree(spec_dir)
+        return spec_dir
 
     def _get_destination_dir(self, kernel_name, user=False, prefix=None):
         if user:
@@ -178,6 +238,7 @@ class KernelSpecManager(LoggingConfigurable):
         PREFIX/share/jupyter/kernels/KERNEL_NAME. This can be sys.prefix
         for installation inside virtual or conda envs.
         """
+        source_dir = source_dir.rstrip('/\\')
         if not kernel_name:
             kernel_name = os.path.basename(source_dir)
         kernel_name = kernel_name.lower()
diff --git a/jupyter_client/kernelspecapp.py b/jupyter_client/kernelspecapp.py
index 0efd7e5..c43647e 100644
--- a/jupyter_client/kernelspecapp.py
+++ b/jupyter_client/kernelspecapp.py
@@ -13,11 +13,17 @@ from traitlets.config.application import Application
 from jupyter_core.application import (
     JupyterApp, base_flags, base_aliases
 )
-from traitlets import Instance, Dict, Unicode, Bool
+from traitlets import Instance, Dict, Unicode, Bool, List
 
 from . import __version__
 from .kernelspec import KernelSpecManager
 
+try:
+    raw_input
+except NameError:
+    # py3
+    raw_input = input
+
 class ListKernelSpecs(JupyterApp):
     version = __version__
     description = """List installed kernel specifications."""
@@ -35,10 +41,7 @@ class ListKernelSpecs(JupyterApp):
 
     def start(self):
         paths = self.kernel_spec_manager.find_kernel_specs()
-        specs = {kname: {
-                "resources_dir": paths[kname],
-                "spec": self.kernel_spec_manager.get_kernel_spec(kname).to_dict() 
-            } for kname in paths}
+        specs = self.kernel_spec_manager.get_all_specs()
         if not self.json_output:
             if not specs:
                 print("No kernels available")
@@ -148,6 +151,61 @@ class InstallKernelSpec(JupyterApp):
                 self.exit(1)
             raise
 
+class RemoveKernelSpec(JupyterApp):
+    version = __version__
+    description = """Remove one or more Jupyter kernelspecs by name."""
+    examples = """jupyter kernelspec remove python2 [my_kernel ...]"""
+    
+    force = Bool(False, config=True,
+        help="""Force removal, don't prompt for confirmation."""
+    )
+    spec_names = List(Unicode())
+    
+    kernel_spec_manager = Instance(KernelSpecManager)
+    def _kernel_spec_manager_default(self):
+        return KernelSpecManager(data_dir=self.data_dir, parent=self)
+    
+    flags = {
+        'f': ({'RemoveKernelSpec': {'force': True}}, force.get_metadata('help')),
+    }
+    flags.update(JupyterApp.flags)
+    
+    def parse_command_line(self, argv):
+        super(RemoveKernelSpec, self).parse_command_line(argv)
+        # accept positional arg as profile name
+        if self.extra_args:
+            self.spec_names = sorted(set(self.extra_args)) # remove duplicates
+        else:
+            self.exit("No kernelspec specified.")
+    
+    def start(self):
+        self.kernel_spec_manager.ensure_native_kernel = False
+        spec_paths = self.kernel_spec_manager.find_kernel_specs()
+        missing = set(self.spec_names).difference(set(spec_paths))
+        if missing:
+            self.exit("Couldn't find kernel spec(s): %s" % ', '.join(missing))
+        
+        if not self.force:
+            print("Kernel specs to remove:")
+            for name in self.spec_names:
+                print("  %s\t%s" % (name.ljust(20), spec_paths[name]))
+            answer = raw_input("Remove %i kernel specs [y/N]: " % len(self.spec_names))
+            if not answer.lower().startswith('y'):
+                return
+        
+        for kernel_name in self.spec_names:
+            try:
+                path = self.kernel_spec_manager.remove_kernel_spec(kernel_name)
+            except OSError as e:
+                if e.errno == errno.EACCES:
+                    print(e, file=sys.stderr)
+                    print("Perhaps you want sudo?", file=sys.stderr)
+                    self.exit(1)
+                else:
+                    raise
+            self.log.info("Removed %s", path)
+
+
 class InstallNativeKernelSpec(JupyterApp):
     version = __version__
     description = """[DEPRECATED] Install the IPython kernel spec directory for this Python."""
@@ -192,6 +250,8 @@ class KernelSpecApp(Application):
     subcommands = Dict({
         'list': (ListKernelSpecs, ListKernelSpecs.description.splitlines()[0]),
         'install': (InstallKernelSpec, InstallKernelSpec.description.splitlines()[0]),
+        'uninstall': (RemoveKernelSpec, "Alias for remove"),
+        'remove': (RemoveKernelSpec, RemoveKernelSpec.description.splitlines()[0]),
         'install-self': (InstallNativeKernelSpec, InstallNativeKernelSpec.description.splitlines()[0]),
     })
 
diff --git a/jupyter_client/launcher.py b/jupyter_client/launcher.py
index c0dda3f..7ec1104 100644
--- a/jupyter_client/launcher.py
+++ b/jupyter_client/launcher.py
@@ -8,7 +8,8 @@ import sys
 from subprocess import Popen, PIPE
 
 from ipython_genutils.encoding import getdefaultencoding
-from ipython_genutils.py3compat import cast_bytes_py2
+from ipython_genutils.py3compat import cast_bytes_py2, PY3
+from traitlets.log import get_logger
 
 
 def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None,
@@ -105,22 +106,33 @@ def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None,
                                      DUPLICATE_SAME_ACCESS)
             env['JPY_PARENT_PID'] = str(int(handle))
 
-        proc = Popen(cmd, **kwargs)
-
-        # Attach the interrupt event to the Popen objet so it can be used later.
-        proc.win32_interrupt_event = interrupt_event
-
     else:
-        if independent:
-            kwargs['preexec_fn'] = lambda: os.setsid()
+        # Create a new session.
+        # This makes it easier to interrupt the kernel,
+        # because we want to interrupt the whole process group.
+        # We don't use setpgrp, which is known to cause problems for kernels starting
+        # certain interactive subprocesses, such as bash -i.
+        if PY3:
+            kwargs['start_new_session'] = True
         else:
-            # Create a new process group. This makes it easier to
-            # interrupt the kernel, because we want to interrupt the
-            # children of the kernel process also.
-            kwargs['preexec_fn'] = lambda: os.setpgrp()
+            kwargs['preexec_fn'] = lambda: os.setsid()
+        if not independent:
             env['JPY_PARENT_PID'] = str(os.getpid())
 
+    try:
         proc = Popen(cmd, **kwargs)
+    except Exception as exc:
+        msg = (
+            "Failed to run command:\n{}\n"
+            "with kwargs:\n{!r}\n"
+        )
+        msg = msg.format(cmd, kwargs)
+        get_logger().error(msg)
+        raise
+
+    if sys.platform == 'win32':
+        # Attach the interrupt event to the Popen objet so it can be used later.
+        proc.win32_interrupt_event = interrupt_event
 
     # Clean up pipes created to work around Popen bug.
     if redirect_in:
diff --git a/jupyter_client/localinterfaces.py b/jupyter_client/localinterfaces.py
index 488650b..b0ffdb0 100644
--- a/jupyter_client/localinterfaces.py
+++ b/jupyter_client/localinterfaces.py
@@ -6,6 +6,7 @@
 import os
 import re
 import socket
+import subprocess
 from subprocess import Popen, PIPE
 
 from warnings import warn
@@ -30,7 +31,11 @@ def _uniq_stable(elems):
 
 def _get_output(cmd):
     """Get output of a command, raising IOError if it fails"""
-    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+    startupinfo = None
+    if os.name == 'nt':
+        startupinfo = subprocess.STARTUPINFO()
+        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+    p = Popen(cmd, stdout=PIPE, stderr=PIPE, startupinfo=startupinfo)
     stdout, stderr = p.communicate()
     if p.returncode:
         raise IOError("Failed to run %s: %s" % (cmd, stderr.decode('utf8', 'replace')))
diff --git a/jupyter_client/manager.py b/jupyter_client/manager.py
index ca66553..042d1c9 100644
--- a/jupyter_client/manager.py
+++ b/jupyter_client/manager.py
@@ -110,7 +110,7 @@ class KernelManager(ConnectionFileMixin):
 
     _restarter = Any()
 
-    autorestart = Bool(False, config=True,
+    autorestart = Bool(True, config=True,
         help="""Should we autorestart the kernel if it dies."""
     )
 
@@ -147,10 +147,9 @@ class KernelManager(ConnectionFileMixin):
     def client(self, **kwargs):
         """Create a client configured to connect to our kernel"""
         kw = {}
-        kw.update(self.get_connection_info())
+        kw.update(self.get_connection_info(session=True))
         kw.update(dict(
             connection_file=self.connection_file,
-            session=self.session,
             parent=self,
         ))
 
@@ -229,7 +228,7 @@ class KernelManager(ConnectionFileMixin):
         # build the Popen cmd
         extra_arguments = kw.pop('extra_arguments', [])
         kernel_cmd = self.format_kernel_cmd(extra_arguments=extra_arguments)
-        env = os.environ.copy()
+        env = kw.pop('env', os.environ).copy()
         # Don't allow PYTHONEXECUTABLE to be passed to kernel process.
         # If set, it can bork all the things.
         env.pop('PYTHONEXECUTABLE', None)
@@ -430,7 +429,12 @@ def start_new_kernel(startup_timeout=60, kernel_name='python', **kwargs):
     km.start_kernel(**kwargs)
     kc = km.client()
     kc.start_channels()
-    kc.wait_for_ready()
+    try:
+        kc.wait_for_ready(timeout=startup_timeout)
+    except RuntimeError:
+        kc.stop_channels()
+        km.shutdown_kernel()
+        raise
 
     return km, kc
 
diff --git a/jupyter_client/multikernelmanager.py b/jupyter_client/multikernelmanager.py
index 802279b..46967d9 100644
--- a/jupyter_client/multikernelmanager.py
+++ b/jupyter_client/multikernelmanager.py
@@ -99,12 +99,13 @@ class MultiKernelManager(LoggingConfigurable):
         # kernel_manager_factory is the constructor for the KernelManager
         # subclass we are using. It can be configured as any Configurable,
         # including things like its transport and ip.
+        constructor_kwargs = {}
         if self.kernel_spec_manager:
-            kwargs['kernel_spec_manager'] = self.kernel_spec_manager
+            constructor_kwargs['kernel_spec_manager'] = self.kernel_spec_manager
         km = self.kernel_manager_factory(connection_file=os.path.join(
                     self.connection_dir, "kernel-%s.json" % kernel_id),
-                    parent=self, autorestart=True, log=self.log, kernel_name=kernel_name,
-                    **kwargs
+                    parent=self, log=self.log, kernel_name=kernel_name,
+                    **constructor_kwargs
         )
         km.start_kernel(**kwargs)
         self._kernels[kernel_id] = km
diff --git a/jupyter_client/session.py b/jupyter_client/session.py
index 11b0798..1dd2be1 100644
--- a/jupyter_client/session.py
+++ b/jupyter_client/session.py
@@ -268,7 +268,13 @@ class Session(Configurable):
 
     """
 
-    debug=Bool(False, config=True, help="""Debug output in the Session""")
+    debug = Bool(False, config=True, help="""Debug output in the Session""")
+    
+    check_pid = Bool(True, config=True,
+        help="""Whether to check PID to protect against calls after fork.
+        
+        This check can be disabled if fork-safety is handled elsewhere.
+        """)
 
     packer = DottedObjectName('json',config=True,
             help="""The name of the packer for serializing messages.
@@ -448,6 +454,8 @@ class Session(Configurable):
         self.session
         self.pid = os.getpid()
         self._new_auth()
+        if not self.key:
+            get_logger().warning("Message signing is disabled.  This is insecure and not recommended!")
 
     @property
     def msg_id(self):
@@ -654,8 +662,8 @@ class Session(Configurable):
         else:
             msg = self.msg(msg_or_type, content=content, parent=parent,
                            header=header, metadata=metadata)
-        if not os.getpid() == self.pid:
-            get_logger().warn("WARNING: attempted to send message from fork\n%s",
+        if self.check_pid and not os.getpid() == self.pid:
+            get_logger().warning("WARNING: attempted to send message from fork\n%s",
... 390 lines suppressed ...

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/jupyter-client.git



More information about the Python-modules-commits mailing list