diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index f799bc226..65b020f1d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -36,6 +36,7 @@ jobs:
testing:
name: 'install + test-suite'
+ timeout-minutes: 10
runs-on: ubuntu-latest
steps:
diff --git a/piker/__init__.py b/piker/__init__.py
index d08c2dbce..6ebeec3df 100644
--- a/piker/__init__.py
+++ b/piker/__init__.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers.
-# Copyright 2020-eternity Tyler Goodlet (in stewardship for piker0)
+# Copyright 2020-eternity Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -14,11 +14,11 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-"""
+'''
piker: trading gear for hackers.
-"""
-from ._daemon import open_piker_runtime
+'''
+from .service import open_piker_runtime
from .data.feed import open_feed
__all__ = [
diff --git a/piker/_daemon.py b/piker/_daemon.py
deleted file mode 100644
index 8983ecccf..000000000
--- a/piker/_daemon.py
+++ /dev/null
@@ -1,758 +0,0 @@
-# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-"""
-Structured, daemon tree service management.
-
-"""
-from __future__ import annotations
-import os
-from typing import (
- Optional,
- Callable,
- Any,
- ClassVar,
-)
-from contextlib import (
- asynccontextmanager as acm,
-)
-from collections import defaultdict
-
-import tractor
-import trio
-from trio_typing import TaskStatus
-
-from .log import (
- get_logger,
- get_console_log,
-)
-from .brokers import get_brokermod
-
-from pprint import pformat
-from functools import partial
-
-
-log = get_logger(__name__)
-
-_root_dname = 'pikerd'
-
-_default_registry_host: str = '127.0.0.1'
-_default_registry_port: int = 6116
-_default_reg_addr: tuple[str, int] = (
- _default_registry_host,
- _default_registry_port,
-)
-
-
-# NOTE: this value is set as an actor-global once the first endpoint
-# who is capable, spawns a `pikerd` service tree.
-_registry: Registry | None = None
-
-
-class Registry:
- addr: None | tuple[str, int] = None
-
- # TODO: table of uids to sockaddrs
- peers: dict[
- tuple[str, str],
- tuple[str, int],
- ] = {}
-
-
-_tractor_kwargs: dict[str, Any] = {}
-
-
-@acm
-async def open_registry(
- addr: None | tuple[str, int] = None,
- ensure_exists: bool = True,
-
-) -> tuple[str, int]:
-
- global _tractor_kwargs
- actor = tractor.current_actor()
- uid = actor.uid
- if (
- Registry.addr is not None
- and addr
- ):
- raise RuntimeError(
- f'`{uid}` registry addr already bound @ {_registry.sockaddr}'
- )
-
- was_set: bool = False
-
- if (
- not tractor.is_root_process()
- and Registry.addr is None
- ):
- Registry.addr = actor._arb_addr
-
- if (
- ensure_exists
- and Registry.addr is None
- ):
- raise RuntimeError(
- f"`{uid}` registry should already exist bug doesn't?"
- )
-
- if (
- Registry.addr is None
- ):
- was_set = True
- Registry.addr = addr or _default_reg_addr
-
- _tractor_kwargs['arbiter_addr'] = Registry.addr
-
- try:
- yield Registry.addr
- finally:
- # XXX: always clear the global addr if we set it so that the
- # next (set of) calls will apply whatever new one is passed
- # in.
- if was_set:
- Registry.addr = None
-
-
-def get_tractor_runtime_kwargs() -> dict[str, Any]:
- '''
- Deliver ``tractor`` related runtime variables in a `dict`.
-
- '''
- return _tractor_kwargs
-
-
-_root_modules = [
- __name__,
- 'piker.clearing._ems',
- 'piker.clearing._client',
- 'piker.data._sampling',
-]
-
-
-# TODO: factor this into a ``tractor.highlevel`` extension
-# pack for the library.
-class Services:
-
- actor_n: tractor._supervise.ActorNursery
- service_n: trio.Nursery
- debug_mode: bool # tractor sub-actor debug mode flag
- service_tasks: dict[
- str,
- tuple[
- trio.CancelScope,
- tractor.Portal,
- trio.Event,
- ]
- ] = {}
- locks = defaultdict(trio.Lock)
-
- @classmethod
- async def start_service_task(
- self,
- name: str,
- portal: tractor.Portal,
- target: Callable,
- **kwargs,
-
- ) -> (trio.CancelScope, tractor.Context):
- '''
- Open a context in a service sub-actor, add to a stack
- that gets unwound at ``pikerd`` teardown.
-
- This allows for allocating long-running sub-services in our main
- daemon and explicitly controlling their lifetimes.
-
- '''
- async def open_context_in_task(
- task_status: TaskStatus[
- tuple[
- trio.CancelScope,
- trio.Event,
- Any,
- ]
- ] = trio.TASK_STATUS_IGNORED,
-
- ) -> Any:
-
- with trio.CancelScope() as cs:
- async with portal.open_context(
- target,
- **kwargs,
-
- ) as (ctx, first):
-
- # unblock once the remote context has started
- complete = trio.Event()
- task_status.started((cs, complete, first))
- log.info(
- f'`pikerd` service {name} started with value {first}'
- )
- try:
- # wait on any context's return value
- # and any final portal result from the
- # sub-actor.
- ctx_res = await ctx.result()
-
- # NOTE: blocks indefinitely until cancelled
- # either by error from the target context
- # function or by being cancelled here by the
- # surrounding cancel scope.
- return (await portal.result(), ctx_res)
-
- finally:
- await portal.cancel_actor()
- complete.set()
- self.service_tasks.pop(name)
-
- cs, complete, first = await self.service_n.start(open_context_in_task)
-
- # store the cancel scope and portal for later cancellation or
- # retstart if needed.
- self.service_tasks[name] = (cs, portal, complete)
-
- return cs, first
-
- @classmethod
- async def cancel_service(
- self,
- name: str,
-
- ) -> Any:
- '''
- Cancel the service task and actor for the given ``name``.
-
- '''
- log.info(f'Cancelling `pikerd` service {name}')
- cs, portal, complete = self.service_tasks[name]
- cs.cancel()
- await complete.wait()
- assert name not in self.service_tasks, \
- f'Serice task for {name} not terminated?'
-
-
-@acm
-async def open_piker_runtime(
- name: str,
- enable_modules: list[str] = [],
- loglevel: Optional[str] = None,
-
- # XXX NOTE XXX: you should pretty much never want debug mode
- # for data daemons when running in production.
- debug_mode: bool = False,
-
- registry_addr: None | tuple[str, int] = None,
-
- # TODO: once we have `rsyscall` support we will read a config
- # and spawn the service tree distributed per that.
- start_method: str = 'trio',
-
- **tractor_kwargs,
-
-) -> tuple[
- tractor.Actor,
- tuple[str, int],
-]:
- '''
- Start a piker actor who's runtime will automatically sync with
- existing piker actors on the local link based on configuration.
-
- Can be called from a subactor or any program that needs to start
- a root actor.
-
- '''
- try:
- # check for existing runtime
- actor = tractor.current_actor().uid
-
- except tractor._exceptions.NoRuntime:
-
- registry_addr = registry_addr or _default_reg_addr
-
- async with (
- tractor.open_root_actor(
-
- # passed through to ``open_root_actor``
- arbiter_addr=registry_addr,
- name=name,
- loglevel=loglevel,
- debug_mode=debug_mode,
- start_method=start_method,
-
- # TODO: eventually we should be able to avoid
- # having the root have more then permissions to
- # spawn other specialized daemons I think?
- enable_modules=enable_modules,
-
- **tractor_kwargs,
- ) as _,
-
- open_registry(registry_addr, ensure_exists=False) as addr,
- ):
- yield (
- tractor.current_actor(),
- addr,
- )
- else:
- async with open_registry(registry_addr) as addr:
- yield (
- actor,
- addr,
- )
-
-
-@acm
-async def open_pikerd(
-
- loglevel: str | None = None,
-
- # XXX: you should pretty much never want debug mode
- # for data daemons when running in production.
- debug_mode: bool = False,
- registry_addr: None | tuple[str, int] = None,
-
- # db init flags
- tsdb: bool = False,
- es: bool = False,
-
-) -> Services:
- '''
- Start a root piker daemon who's lifetime extends indefinitely until
- cancelled.
-
- A root actor nursery is created which can be used to create and keep
- alive underling services (see below).
-
- '''
-
- async with (
- open_piker_runtime(
-
- name=_root_dname,
- # TODO: eventually we should be able to avoid
- # having the root have more then permissions to
- # spawn other specialized daemons I think?
- enable_modules=_root_modules,
-
- loglevel=loglevel,
- debug_mode=debug_mode,
- registry_addr=registry_addr,
-
- ) as (root_actor, reg_addr),
- tractor.open_nursery() as actor_nursery,
- trio.open_nursery() as service_nursery,
- ):
- assert root_actor.accept_addr == reg_addr
-
- if tsdb:
- from piker.data._ahab import start_ahab
- from piker.data.marketstore import start_marketstore
-
- log.info('Spawning `marketstore` supervisor')
- ctn_ready, config, (cid, pid) = await service_nursery.start(
- start_ahab,
- 'marketstored',
- start_marketstore,
-
- )
- log.info(
- f'`marketstored` up!\n'
- f'pid: {pid}\n'
- f'container id: {cid[:12]}\n'
- f'config: {pformat(config)}'
- )
-
- if es:
- from piker.data._ahab import start_ahab
- from piker.data.elastic import start_elasticsearch
-
- log.info('Spawning `elasticsearch` supervisor')
- ctn_ready, config, (cid, pid) = await service_nursery.start(
- partial(
- start_ahab,
- 'elasticsearch',
- start_elasticsearch,
- start_timeout=240.0 # high cause ci
- )
- )
-
- log.info(
- f'`elasticsearch` up!\n'
- f'pid: {pid}\n'
- f'container id: {cid[:12]}\n'
- f'config: {pformat(config)}'
- )
-
- # assign globally for future daemon/task creation
- Services.actor_n = actor_nursery
- Services.service_n = service_nursery
- Services.debug_mode = debug_mode
-
-
- try:
- yield Services
-
- finally:
- # TODO: is this more clever/efficient?
- # if 'samplerd' in Services.service_tasks:
- # await Services.cancel_service('samplerd')
- service_nursery.cancel_scope.cancel()
-
-
-@acm
-async def maybe_open_runtime(
- loglevel: Optional[str] = None,
- **kwargs,
-
-) -> None:
- '''
- Start the ``tractor`` runtime (a root actor) if none exists.
-
- '''
- name = kwargs.pop('name')
-
- if not tractor.current_actor(err_on_no_runtime=False):
- async with open_piker_runtime(
- name,
- loglevel=loglevel,
- **kwargs,
- ) as (_, addr):
- yield addr,
- else:
- async with open_registry() as addr:
- yield addr
-
-
-@acm
-async def maybe_open_pikerd(
- loglevel: Optional[str] = None,
- registry_addr: None | tuple = None,
- tsdb: bool = False,
- es: bool = False,
-
- **kwargs,
-
-) -> tractor._portal.Portal | ClassVar[Services]:
- '''
- If no ``pikerd`` daemon-root-actor can be found start it and
- yield up (we should probably figure out returning a portal to self
- though).
-
- '''
- if loglevel:
- get_console_log(loglevel)
-
- # subtle, we must have the runtime up here or portal lookup will fail
- query_name = kwargs.pop('name', f'piker_query_{os.getpid()}')
-
- # TODO: if we need to make the query part faster we could not init
- # an actor runtime and instead just hit the socket?
- # from tractor._ipc import _connect_chan, Channel
- # async with _connect_chan(host, port) as chan:
- # async with open_portal(chan) as arb_portal:
- # yield arb_portal
-
- async with (
- open_piker_runtime(
- name=query_name,
- registry_addr=registry_addr,
- loglevel=loglevel,
- **kwargs,
- ) as _,
- tractor.find_actor(
- _root_dname,
- arbiter_sockaddr=registry_addr,
- ) as portal
- ):
- # connect to any existing daemon presuming
- # its registry socket was selected.
- if (
- portal is not None
- ):
- yield portal
- return
-
- # presume pikerd role since no daemon could be found at
- # configured address
- async with open_pikerd(
- loglevel=loglevel,
- debug_mode=kwargs.get('debug_mode', False),
- registry_addr=registry_addr,
- tsdb=tsdb,
- es=es,
-
- ) as service_manager:
- # in the case where we're starting up the
- # tractor-piker runtime stack in **this** process
- # we return no portal to self.
- assert service_manager
- yield service_manager
-
-
-# `brokerd` enabled modules
-# NOTE: keeping this list as small as possible is part of our caps-sec
-# model and should be treated with utmost care!
-_data_mods = [
- 'piker.brokers.core',
- 'piker.brokers.data',
- 'piker.data',
- 'piker.data.feed',
- 'piker.data._sampling'
-]
-
-
-@acm
-async def find_service(
- service_name: str,
-) -> tractor.Portal | None:
-
- async with open_registry() as reg_addr:
- log.info(f'Scanning for service `{service_name}`')
- # attach to existing daemon by name if possible
- async with tractor.find_actor(
- service_name,
- arbiter_sockaddr=reg_addr,
- ) as maybe_portal:
- yield maybe_portal
-
-
-async def check_for_service(
- service_name: str,
-
-) -> None | tuple[str, int]:
- '''
- Service daemon "liveness" predicate.
-
- '''
- async with open_registry(ensure_exists=False) as reg_addr:
- async with tractor.query_actor(
- service_name,
- arbiter_sockaddr=reg_addr,
- ) as sockaddr:
- return sockaddr
-
-
-@acm
-async def maybe_spawn_daemon(
-
- service_name: str,
- service_task_target: Callable,
- spawn_args: dict[str, Any],
- loglevel: Optional[str] = None,
-
- singleton: bool = False,
- **kwargs,
-
-) -> tractor.Portal:
- '''
- If no ``service_name`` daemon-actor can be found,
- spawn one in a local subactor and return a portal to it.
-
- If this function is called from a non-pikerd actor, the
- spawned service will persist as long as pikerd does or
- it is requested to be cancelled.
-
- This can be seen as a service starting api for remote-actor
- clients.
-
- '''
- if loglevel:
- get_console_log(loglevel)
-
- # serialize access to this section to avoid
- # 2 or more tasks racing to create a daemon
- lock = Services.locks[service_name]
- await lock.acquire()
-
- async with find_service(service_name) as portal:
- if portal is not None:
- lock.release()
- yield portal
- return
-
- log.warning(f"Couldn't find any existing {service_name}")
-
- # TODO: really shouldn't the actor spawning be part of the service
- # starting method `Services.start_service()` ?
-
- # ask root ``pikerd`` daemon to spawn the daemon we need if
- # pikerd is not live we now become the root of the
- # process tree
- async with maybe_open_pikerd(
-
- loglevel=loglevel,
- **kwargs,
-
- ) as pikerd_portal:
-
- # we are the root and thus are `pikerd`
- # so spawn the target service directly by calling
- # the provided target routine.
- # XXX: this assumes that the target is well formed and will
- # do the right things to setup both a sub-actor **and** call
- # the ``_Services`` api from above to start the top level
- # service task for that actor.
- started: bool
- if pikerd_portal is None:
- started = await service_task_target(**spawn_args)
-
- else:
- # tell the remote `pikerd` to start the target,
- # the target can't return a non-serializable value
- # since it is expected that service startingn is
- # non-blocking and the target task will persist running
- # on `pikerd` after the client requesting it's start
- # disconnects.
- started = await pikerd_portal.run(
- service_task_target,
- **spawn_args,
- )
-
- if started:
- log.info(f'Service {service_name} started!')
-
- async with tractor.wait_for_actor(service_name) as portal:
- lock.release()
- yield portal
- await portal.cancel_actor()
-
-
-async def spawn_brokerd(
-
- brokername: str,
- loglevel: Optional[str] = None,
- **tractor_kwargs,
-
-) -> bool:
-
- log.info(f'Spawning {brokername} broker daemon')
-
- brokermod = get_brokermod(brokername)
- dname = f'brokerd.{brokername}'
-
- extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {})
- tractor_kwargs.update(extra_tractor_kwargs)
-
- # ask `pikerd` to spawn a new sub-actor and manage it under its
- # actor nursery
- modpath = brokermod.__name__
- broker_enable = [modpath]
- for submodname in getattr(
- brokermod,
- '__enable_modules__',
- [],
- ):
- subpath = f'{modpath}.{submodname}'
- broker_enable.append(subpath)
-
- portal = await Services.actor_n.start_actor(
- dname,
- enable_modules=_data_mods + broker_enable,
- loglevel=loglevel,
- debug_mode=Services.debug_mode,
- **tractor_kwargs
- )
-
- # non-blocking setup of brokerd service nursery
- from .data import _setup_persistent_brokerd
-
- await Services.start_service_task(
- dname,
- portal,
- _setup_persistent_brokerd,
- brokername=brokername,
- )
- return True
-
-
-@acm
-async def maybe_spawn_brokerd(
-
- brokername: str,
- loglevel: Optional[str] = None,
- **kwargs,
-
-) -> tractor.Portal:
- '''
- Helper to spawn a brokerd service *from* a client
- who wishes to use the sub-actor-daemon.
-
- '''
- async with maybe_spawn_daemon(
-
- f'brokerd.{brokername}',
- service_task_target=spawn_brokerd,
- spawn_args={'brokername': brokername, 'loglevel': loglevel},
- loglevel=loglevel,
- **kwargs,
-
- ) as portal:
- yield portal
-
-
-async def spawn_emsd(
-
- loglevel: Optional[str] = None,
- **extra_tractor_kwargs
-
-) -> bool:
- """
- Start the clearing engine under ``pikerd``.
-
- """
- log.info('Spawning emsd')
-
- portal = await Services.actor_n.start_actor(
- 'emsd',
- enable_modules=[
- 'piker.clearing._ems',
- 'piker.clearing._client',
- ],
- loglevel=loglevel,
- debug_mode=Services.debug_mode, # set by pikerd flag
- **extra_tractor_kwargs
- )
-
- # non-blocking setup of clearing service
- from .clearing._ems import _setup_persistent_emsd
-
- await Services.start_service_task(
- 'emsd',
- portal,
- _setup_persistent_emsd,
- )
- return True
-
-
-@acm
-async def maybe_open_emsd(
-
- brokername: str,
- loglevel: Optional[str] = None,
- **kwargs,
-
-) -> tractor._portal.Portal: # noqa
-
- async with maybe_spawn_daemon(
-
- 'emsd',
- service_task_target=spawn_emsd,
- spawn_args={'loglevel': loglevel},
- loglevel=loglevel,
- **kwargs,
-
- ) as portal:
- yield portal
diff --git a/piker/brokers/cli.py b/piker/brokers/cli.py
index 0d84384d6..f86c679eb 100644
--- a/piker/brokers/cli.py
+++ b/piker/brokers/cli.py
@@ -29,8 +29,15 @@
from ..cli import cli
from .. import watchlists as wl
from ..log import get_console_log, colorize_json, get_logger
-from .._daemon import maybe_spawn_brokerd, maybe_open_pikerd
-from ..brokers import core, get_brokermod, data
+from ..service import (
+ maybe_spawn_brokerd,
+ maybe_open_pikerd,
+)
+from ..brokers import (
+ core,
+ get_brokermod,
+ data,
+)
log = get_logger('cli')
DEFAULT_BROKER = 'questrade'
@@ -60,6 +67,7 @@ def get_method(client, meth_name: str):
print_ok('found!.')
return method
+
async def run_method(client, meth_name: str, **kwargs):
method = get_method(client, meth_name)
print('running...', end='', flush=True)
@@ -67,19 +75,20 @@ async def run_method(client, meth_name: str, **kwargs):
print_ok(f'done! result: {type(result)}')
return result
+
async def run_test(broker_name: str):
brokermod = get_brokermod(broker_name)
total = 0
passed = 0
failed = 0
- print(f'getting client...', end='', flush=True)
+ print('getting client...', end='', flush=True)
if not hasattr(brokermod, 'get_client'):
print_error('fail! no \'get_client\' context manager found.')
return
async with brokermod.get_client(is_brokercheck=True) as client:
- print_ok(f'done! inside client context.')
+ print_ok('done! inside client context.')
# check for methods present on brokermod
method_list = [
@@ -130,7 +139,6 @@ async def run_test(broker_name: str):
total += 1
-
# check for methods present con brokermod.Client and their
# results
@@ -180,7 +188,6 @@ async def bcheck_main():
trio.run(run_test, broker)
-
@cli.command()
@click.option('--keys', '-k', multiple=True,
help='Return results only for these keys')
@@ -335,8 +342,6 @@ def contracts(ctx, loglevel, broker, symbol, ids):
brokermod = get_brokermod(broker)
get_console_log(loglevel)
-
-
contracts = trio.run(partial(core.contracts, brokermod, symbol))
if not ids:
# just print out expiry dates which can be used with
diff --git a/piker/brokers/core.py b/piker/brokers/core.py
index af5da3a1e..3e9e1614a 100644
--- a/piker/brokers/core.py
+++ b/piker/brokers/core.py
@@ -28,7 +28,7 @@
from ..log import get_logger
from . import get_brokermod
-from .._daemon import maybe_spawn_brokerd
+from ..service import maybe_spawn_brokerd
from .._cacheables import open_cached_client
diff --git a/piker/brokers/ib/_util.py b/piker/brokers/ib/_util.py
index c7a499091..d6491ee73 100644
--- a/piker/brokers/ib/_util.py
+++ b/piker/brokers/ib/_util.py
@@ -177,8 +177,11 @@ def i3ipc_xdotool_manual_click_hack() -> None:
)
# re-activate and focus original window
- subprocess.call([
- 'xdotool',
- 'windowactivate', '--sync', str(orig_win_id),
- 'click', '--window', str(orig_win_id), '1',
- ])
+ try:
+ subprocess.call([
+ 'xdotool',
+ 'windowactivate', '--sync', str(orig_win_id),
+ 'click', '--window', str(orig_win_id), '1',
+ ])
+ except subprocess.TimeoutExpired:
+ log.exception(f'xdotool timed out?')
diff --git a/piker/brokers/kraken/feed.py b/piker/brokers/kraken/feed.py
index b4a2e666f..a737aaadb 100644
--- a/piker/brokers/kraken/feed.py
+++ b/piker/brokers/kraken/feed.py
@@ -453,7 +453,7 @@ async def open_symbol_search(
score_cutoff=50,
)
# repack in dict form
- await stream.send(
- {item[0]['altname']: item[0]
- for item in matches}
- )
+ await stream.send({
+ pair[0].altname: pair[0]
+ for pair in matches
+ })
diff --git a/piker/clearing/_client.py b/piker/clearing/_client.py
index 0a40b5480..7d03406a2 100644
--- a/piker/clearing/_client.py
+++ b/piker/clearing/_client.py
@@ -29,8 +29,11 @@
from ..log import get_logger
from ..data.types import Struct
-from .._daemon import maybe_open_emsd
-from ._messages import Order, Cancel
+from ..service import maybe_open_emsd
+from ._messages import (
+ Order,
+ Cancel,
+)
from ..brokers import get_brokermod
if TYPE_CHECKING:
diff --git a/piker/cli/__init__.py b/piker/cli/__init__.py
index 9b6f225ce..63b8321af 100644
--- a/piker/cli/__init__.py
+++ b/piker/cli/__init__.py
@@ -19,16 +19,18 @@
'''
import os
-from pprint import pformat
-from functools import partial
import click
import trio
import tractor
-from ..log import get_console_log, get_logger, colorize_json
+from ..log import (
+ get_console_log,
+ get_logger,
+ colorize_json,
+)
from ..brokers import get_brokermod
-from .._daemon import (
+from ..service import (
_default_registry_host,
_default_registry_port,
)
@@ -68,7 +70,7 @@ def pikerd(
'''
- from .._daemon import open_pikerd
+ from ..service import open_pikerd
log = get_console_log(loglevel)
if pdb:
@@ -171,7 +173,7 @@ def cli(
@click.pass_obj
def services(config, tl, ports):
- from .._daemon import (
+ from ..service import (
open_piker_runtime,
_default_registry_port,
_default_registry_host,
@@ -204,8 +206,8 @@ async def list_services():
def _load_clis() -> None:
- from ..data import marketstore # noqa
- from ..data import elastic
+ from ..service import marketstore # noqa
+ from ..service import elastic
from ..data import cli # noqa
from ..brokers import cli # noqa
from ..ui import cli # noqa
diff --git a/piker/config.py b/piker/config.py
index 3ae6a6652..397342e34 100644
--- a/piker/config.py
+++ b/piker/config.py
@@ -15,7 +15,7 @@
# along with this program. If not, see .
"""
-Broker configuration mgmt.
+Platform configuration (files) mgmt.
"""
import platform
@@ -26,17 +26,25 @@
import shutil
from typing import Optional
from pathlib import Path
+
from bidict import bidict
import toml
-from piker.testing import TEST_CONFIG_DIR_PATH
+
from .log import get_logger
log = get_logger('broker-config')
-# taken from ``click`` since apparently they have some
+# XXX NOTE: taken from ``click`` since apparently they have some
# super weirdness with sigint and sudo..no clue
-def get_app_dir(app_name, roaming=True, force_posix=False):
+# we're probably going to slowly just modify it to our own version over
+# time..
+def get_app_dir(
+ app_name: str,
+ roaming: bool = True,
+ force_posix: bool = False,
+
+) -> str:
r"""Returns the config folder for the application. The default behavior
is to return whatever is most appropriate for the operating system.
@@ -75,14 +83,30 @@ def get_app_dir(app_name, roaming=True, force_posix=False):
def _posixify(name):
return "-".join(name.split()).lower()
- # TODO: This is a hacky way to a) determine we're testing
- # and b) creating a test dir. We should aim to set a variable
- # within the tractor runtimes and store testing config data
- # outside of the users filesystem
+ # NOTE: for testing with `pytest` we leverage the `tmp_dir`
+ # fixture to generate (and clean up) a test-request-specific
+ # directory for isolated configuration files such that,
+ # - multiple tests can run (possibly in parallel) without data races
+ # on the config state,
+ # - we don't need to ever worry about leaking configs into the
+ # system thus avoiding needing to manage config cleaup fixtures or
+ # other bothers (since obviously `tmp_dir` cleans up after itself).
+ #
+ # In order to "pass down" the test dir path to all (sub-)actors in
+ # the actor tree we preload the root actor's runtime vars state (an
+ # internal mechanism for inheriting state down an actor tree in
+ # `tractor`) with the testing dir and check for it whenever we
+ # detect `pytest` is being used (which it isn't under normal
+ # operation).
if "pytest" in sys.modules:
- app_name = os.path.join(app_name, TEST_CONFIG_DIR_PATH)
+ import tractor
+ actor = tractor.current_actor(err_on_no_runtime=False)
+ if actor: # runtime is up
+ rvs = tractor._state._runtime_vars
+ testdirpath = Path(rvs['piker_vars']['piker_test_dir'])
+ assert testdirpath.exists(), 'piker test harness might be borked!?'
+ app_name = str(testdirpath)
- # if WIN:
if platform.system() == 'Windows':
key = "APPDATA" if roaming else "LOCALAPPDATA"
folder = os.environ.get(key)
diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py
index adaed041b..48a11f402 100644
--- a/piker/data/_pathops.py
+++ b/piker/data/_pathops.py
@@ -295,7 +295,7 @@ def slice_from_time(
arr: np.ndarray,
start_t: float,
stop_t: float,
- step: int | None = None,
+ step: float, # sampler period step-diff
) -> slice:
'''
@@ -324,12 +324,6 @@ def slice_from_time(
# end of the input array.
read_i_max = arr.shape[0]
- # TODO: require this is always passed in?
- if step is None:
- step = round(t_last - times[-2])
- if step == 0:
- step = 1
-
# compute (presumed) uniform-time-step index offsets
i_start_t = floor(start_t)
read_i_start = floor(((i_start_t - t_first) // step)) - 1
@@ -395,7 +389,7 @@ def slice_from_time(
# f'diff: {t_diff}\n'
# f'REMAPPED START i: {read_i_start} -> {new_read_i_start}\n'
# )
- read_i_start = new_read_i_start - 1
+ read_i_start = new_read_i_start
t_iv_stop = times[read_i_stop - 1]
if (
@@ -412,7 +406,7 @@ def slice_from_time(
times[read_i_start:],
# times,
i_stop_t,
- side='left',
+ side='right',
)
if (
diff --git a/piker/data/_sampling.py b/piker/data/_sampling.py
index a5df96cca..84dce08e2 100644
--- a/piker/data/_sampling.py
+++ b/piker/data/_sampling.py
@@ -42,7 +42,7 @@
get_logger,
get_console_log,
)
-from .._daemon import maybe_spawn_daemon
+from ..service import maybe_spawn_daemon
if TYPE_CHECKING:
from ._sharedmem import (
@@ -68,8 +68,8 @@ class Sampler:
This non-instantiated type is meant to be a singleton within
a `samplerd` actor-service spawned once by the user wishing to
- time-step sample real-time quote feeds, see
- ``._daemon.maybe_open_samplerd()`` and the below
+ time-step-sample (real-time) quote feeds, see
+ ``.service.maybe_open_samplerd()`` and the below
``register_with_sampler()``.
'''
@@ -87,7 +87,6 @@ class Sampler:
# holds all the ``tractor.Context`` remote subscriptions for
# a particular sample period increment event: all subscribers are
# notified on a step.
- # subscribers: dict[int, list[tractor.MsgStream]] = {}
subscribers: defaultdict[
float,
list[
@@ -240,8 +239,11 @@ async def broadcast(
subscribers for a given sample period.
'''
+ pair: list[float, set]
pair = self.subscribers[period_s]
+ last_ts: float
+ subs: set
last_ts, subs = pair
task = trio.lowlevel.current_task()
@@ -253,25 +255,35 @@ async def broadcast(
# f'consumers: {subs}'
)
borked: set[tractor.MsgStream] = set()
- for stream in subs:
+ sent: set[tractor.MsgStream] = set()
+ while True:
try:
- await stream.send({
- 'index': time_stamp or last_ts,
- 'period': period_s,
- })
- except (
- trio.BrokenResourceError,
- trio.ClosedResourceError
- ):
- log.error(
- f'{stream._ctx.chan.uid} dropped connection'
- )
- borked.add(stream)
+ for stream in (subs - sent):
+ try:
+ await stream.send({
+ 'index': time_stamp or last_ts,
+ 'period': period_s,
+ })
+ sent.add(stream)
+
+ except (
+ trio.BrokenResourceError,
+ trio.ClosedResourceError
+ ):
+ log.error(
+ f'{stream._ctx.chan.uid} dropped connection'
+ )
+ borked.add(stream)
+ else:
+ break
+ except RuntimeError:
+ log.warning(f'Client subs {subs} changed while broadcasting')
+ continue
for stream in borked:
try:
subs.remove(stream)
- except ValueError:
+ except KeyError:
log.warning(
f'{stream._ctx.chan.uid} sub already removed!?'
)
@@ -379,7 +391,7 @@ async def spawn_samplerd(
update and increment count write and stream broadcasting.
'''
- from piker._daemon import Services
+ from piker.service import Services
dname = 'samplerd'
log.info(f'Spawning `{dname}`')
@@ -419,7 +431,7 @@ async def maybe_open_samplerd(
loglevel: str | None = None,
**kwargs,
-) -> tractor._portal.Portal: # noqa
+) -> tractor.Portal: # noqa
'''
Client-side helper to maybe startup the ``samplerd`` service
under the ``pikerd`` tree.
@@ -609,6 +621,14 @@ async def sample_and_broadcast(
fqsn = f'{broker_symbol}.{brokername}'
lags: int = 0
+ # TODO: speed up this loop in an AOT compiled lang (like
+ # rust or nim or zig) and/or instead of doing a fan out to
+ # TCP sockets here, we add a shm-style tick queue which
+ # readers can pull from instead of placing the burden of
+ # broadcast on solely on this `brokerd` actor. see issues:
+ # - https://github.com/pikers/piker/issues/98
+ # - https://github.com/pikers/piker/issues/107
+
for (stream, tick_throttle) in subs.copy():
try:
with trio.move_on_after(0.2) as cs:
@@ -738,9 +758,6 @@ def frame_ticks(
ticks_by_type[ttype].append(tick)
-# TODO: a less naive throttler, here's some snippets:
-# token bucket by njs:
-# https://gist.github.com/njsmith/7ea44ec07e901cb78ebe1dd8dd846cb9
async def uniform_rate_send(
rate: float,
@@ -750,8 +767,22 @@ async def uniform_rate_send(
task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
) -> None:
+ '''
+ Throttle a real-time (presumably tick event) stream to a uniform
+ transmissiom rate, normally for the purposes of throttling a data
+ flow being consumed by a graphics rendering actor which itself is limited
+ by a fixed maximum display rate.
- # try not to error-out on overruns of the subscribed (chart) client
+ Though this function isn't documented (nor was intentially written
+ to be) a token-bucket style algo, it effectively operates as one (we
+ think?).
+
+ TODO: a less naive throttler, here's some snippets:
+ token bucket by njs:
+ https://gist.github.com/njsmith/7ea44ec07e901cb78ebe1dd8dd846cb9
+
+ '''
+ # try not to error-out on overruns of the subscribed client
stream._ctx._backpressure = True
# TODO: compute the approx overhead latency per cycle
@@ -848,6 +879,16 @@ async def uniform_rate_send(
# rate timing exactly lul
try:
await stream.send({sym: first_quote})
+ except tractor.RemoteActorError as rme:
+ if rme.type is not tractor._exceptions.StreamOverrun:
+ raise
+ ctx = stream._ctx
+ chan = ctx.chan
+ log.warning(
+ 'Throttled quote-stream overrun!\n'
+ f'{sym}:{ctx.cid}@{chan.uid}'
+ )
+
except (
# NOTE: any of these can be raised by ``tractor``'s IPC
# transport-layer and we want to be highly resilient
diff --git a/piker/data/cli.py b/piker/data/cli.py
index 554048a46..6984d9ff6 100644
--- a/piker/data/cli.py
+++ b/piker/data/cli.py
@@ -18,31 +18,22 @@
marketstore cli.
"""
-from functools import partial
-from pprint import pformat
-
-from anyio_marketstore import open_marketstore_client
import trio
import tractor
import click
-import numpy as np
-from .marketstore import (
- get_client,
+from ..service.marketstore import (
+ # get_client,
# stream_quotes,
ingest_quote_stream,
# _url,
- _tick_tbk_ids,
- mk_tbk,
+ # _tick_tbk_ids,
+ # mk_tbk,
)
from ..cli import cli
from .. import watchlists as wl
-from ..log import get_logger
-from ._sharedmem import (
- maybe_open_shm_array,
-)
-from ._source import (
- base_iohlc_dtype,
+from ..log import (
+ get_logger,
)
@@ -89,16 +80,16 @@ async def main():
# async def main():
# nonlocal names
# async with get_client(url) as client:
-#
+#
# if not names:
# names = await client.list_symbols()
-#
+#
# # default is to wipe db entirely.
# answer = input(
# "This will entirely wipe you local marketstore db @ "
# f"{url} of the following symbols:\n {pformat(names)}"
# "\n\nDelete [N/y]?\n")
-#
+#
# if answer == 'y':
# for sym in names:
# # tbk = _tick_tbk.format(sym)
@@ -107,21 +98,17 @@ async def main():
# await client.destroy(mk_tbk(tbk))
# else:
# print("Nothing deleted.")
-#
+#
# tractor.run(main)
@cli.command()
@click.option(
- '--tl',
- is_flag=True,
- help='Enable tractor logging')
-@click.option(
- '--host',
+ '--tsdb_host',
default='localhost'
)
@click.option(
- '--port',
+ '--tsdb_port',
default=5993
)
@click.argument('symbols', nargs=-1)
@@ -137,18 +124,93 @@ def storesh(
Start an IPython shell ready to query the local marketstore db.
'''
- from piker.data.marketstore import tsdb_history_update
- from piker._daemon import open_piker_runtime
+ from piker.data.marketstore import open_tsdb_client
+ from piker.service import open_piker_runtime
async def main():
nonlocal symbols
async with open_piker_runtime(
'storesh',
- enable_modules=['piker.data._ahab'],
+ enable_modules=['piker.service._ahab'],
):
symbol = symbols[0]
- await tsdb_history_update(symbol)
+
+ async with open_tsdb_client(symbol):
+ # TODO: ask if user wants to write history for detected
+ # available shm buffers?
+ from tractor.trionics import ipython_embed
+ await ipython_embed()
+
+ trio.run(main)
+
+
+@cli.command()
+@click.option(
+ '--host',
+ default='localhost'
+)
+@click.option(
+ '--port',
+ default=5993
+)
+@click.option(
+ '--delete',
+ '-d',
+ is_flag=True,
+ help='Delete history (1 Min) for symbol(s)',
+)
+@click.argument('symbols', nargs=-1)
+@click.pass_obj
+def storage(
+ config,
+ host,
+ port,
+ symbols: list[str],
+ delete: bool,
+
+):
+ '''
+ Start an IPython shell ready to query the local marketstore db.
+
+ '''
+ from piker.service.marketstore import open_tsdb_client
+ from piker.service import open_piker_runtime
+
+ async def main():
+ nonlocal symbols
+
+ async with open_piker_runtime(
+ 'tsdb_storage',
+ enable_modules=['piker.service._ahab'],
+ ):
+ symbol = symbols[0]
+ async with open_tsdb_client(symbol) as storage:
+ if delete:
+ for fqsn in symbols:
+ syms = await storage.client.list_symbols()
+
+ resp60s = await storage.delete_ts(fqsn, 60)
+
+ msgish = resp60s.ListFields()[0][1]
+ if 'error' in str(msgish):
+
+ # TODO: MEGA LOL, apparently the symbols don't
+ # flush out until you refresh something or other
+ # (maybe the WALFILE)... #lelandorlulzone, classic
+ # alpaca(Rtm) design here ..
+ # well, if we ever can make this work we
+ # probably want to dogsplain the real reason
+ # for the delete errurz..llululu
+ if fqsn not in syms:
+ log.error(f'Pair {fqsn} dne in DB')
+
+ log.error(f'Deletion error: {fqsn}\n{msgish}')
+
+ resp1s = await storage.delete_ts(fqsn, 1)
+ msgish = resp1s.ListFields()[0][1]
+ if 'error' in str(msgish):
+ log.error(f'Deletion error: {fqsn}\n{msgish}')
trio.run(main)
@@ -182,7 +244,7 @@ def ingest(config, name, test_file, tl):
async def entry_point():
async with tractor.open_nursery() as n:
- for provider, symbols in grouped_syms.items():
+ for provider, symbols in grouped_syms.items():
await n.run_in_actor(
ingest_quote_stream,
name='ingest_marketstore',
diff --git a/piker/data/feed.py b/piker/data/feed.py
index 906f4bb4f..7efd5eb32 100644
--- a/piker/data/feed.py
+++ b/piker/data/feed.py
@@ -58,7 +58,7 @@
get_logger,
get_console_log,
)
-from .._daemon import (
+from ..service import (
maybe_spawn_brokerd,
check_for_service,
)
@@ -86,7 +86,7 @@
)
if TYPE_CHECKING:
- from .marketstore import Storage
+ from ..service.marketstore import Storage
log = get_logger(__name__)
@@ -865,7 +865,7 @@ async def manage_history(
):
log.info('Found existing `marketstored`')
- from . import marketstore
+ from ..service import marketstore
async with (
marketstore.open_storage_client(fqsn)as storage,
):
@@ -1589,6 +1589,9 @@ async def open_feed(
(brokermod, bfqsns),
) in zip(ctxs, providers.items()):
+ # NOTE: do it asap to avoid overruns during multi-feed setup?
+ ctx._backpressure = backpressure
+
for fqsn, flume_msg in flumes_msg_dict.items():
flume = Flume.from_msg(flume_msg)
assert flume.symbol.fqsn == fqsn
diff --git a/piker/log.py b/piker/log.py
index 804e09dc6..a36beec02 100644
--- a/piker/log.py
+++ b/piker/log.py
@@ -21,7 +21,11 @@
import json
import tractor
-from pygments import highlight, lexers, formatters
+from pygments import (
+ highlight,
+ lexers,
+ formatters,
+)
# Makes it so we only see the full module name when using ``__name__``
# without the extra "piker." prefix.
@@ -32,26 +36,48 @@ def get_logger(
name: str = None,
) -> logging.Logger:
- '''Return the package log or a sub-log for `name` if provided.
+ '''
+ Return the package log or a sub-log for `name` if provided.
+
'''
return tractor.log.get_logger(name=name, _root_name=_proj_name)
-def get_console_log(level: str = None, name: str = None) -> logging.Logger:
- '''Get the package logger and enable a handler which writes to stderr.
+def get_console_log(
+ level: str | None = None,
+ name: str | None = None,
+
+) -> logging.Logger:
+ '''
+ Get the package logger and enable a handler which writes to stderr.
Yeah yeah, i know we can use ``DictConfig``. You do it...
+
'''
return tractor.log.get_console_log(
- level, name=name, _root_name=_proj_name) # our root logger
+ level,
+ name=name,
+ _root_name=_proj_name,
+ ) # our root logger
-def colorize_json(data, style='algol_nu'):
- """Colorize json output using ``pygments``.
- """
- formatted_json = json.dumps(data, sort_keys=True, indent=4)
+def colorize_json(
+ data: dict,
+ style='algol_nu',
+):
+ '''
+ Colorize json output using ``pygments``.
+
+ '''
+ formatted_json = json.dumps(
+ data,
+ sort_keys=True,
+ indent=4,
+ )
return highlight(
- formatted_json, lexers.JsonLexer(),
+ formatted_json,
+ lexers.JsonLexer(),
+
# likeable styles: algol_nu, tango, monokai
formatters.TerminalTrueColorFormatter(style=style)
)
diff --git a/piker/pp.py b/piker/pp.py
index 38ff15664..a01bdc4e7 100644
--- a/piker/pp.py
+++ b/piker/pp.py
@@ -199,8 +199,16 @@ def to_pretoml(self) -> tuple[str, dict]:
sym_info = s.broker_info[broker]
d['asset_type'] = sym_info['asset_type']
- d['price_tick_size'] = sym_info['price_tick_size']
- d['lot_tick_size'] = sym_info['lot_tick_size']
+ d['price_tick_size'] = (
+ sym_info.get('price_tick_size')
+ or
+ s.tick_size
+ )
+ d['lot_tick_size'] = (
+ sym_info.get('lot_tick_size')
+ or
+ s.lot_tick_size
+ )
if self.expiry is None:
d.pop('expiry', None)
diff --git a/piker/service/__init__.py b/piker/service/__init__.py
new file mode 100644
index 000000000..3b9767cdc
--- /dev/null
+++ b/piker/service/__init__.py
@@ -0,0 +1,60 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+"""
+Actor-runtime service orchestration machinery.
+
+"""
+from __future__ import annotations
+
+from ._mngr import Services
+from ._registry import ( # noqa
+ _tractor_kwargs,
+ _default_reg_addr,
+ _default_registry_host,
+ _default_registry_port,
+ open_registry,
+ find_service,
+ check_for_service,
+)
+from ._daemon import ( # noqa
+ maybe_spawn_daemon,
+ spawn_brokerd,
+ maybe_spawn_brokerd,
+ spawn_emsd,
+ maybe_open_emsd,
+)
+from ._actor_runtime import (
+ open_piker_runtime,
+ maybe_open_pikerd,
+ open_pikerd,
+ get_tractor_runtime_kwargs,
+)
+
+
+__all__ = [
+ 'check_for_service',
+ 'Services',
+ 'maybe_spawn_daemon',
+ 'spawn_brokerd',
+ 'maybe_spawn_brokerd',
+ 'spawn_emsd',
+ 'maybe_open_emsd',
+ 'open_piker_runtime',
+ 'maybe_open_pikerd',
+ 'open_pikerd',
+ 'get_tractor_runtime_kwargs',
+]
diff --git a/piker/service/_actor_runtime.py b/piker/service/_actor_runtime.py
new file mode 100644
index 000000000..b92ad221d
--- /dev/null
+++ b/piker/service/_actor_runtime.py
@@ -0,0 +1,347 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+"""
+``tractor`` wrapping + default config to bootstrap the `pikerd`.
+
+"""
+from __future__ import annotations
+from pprint import pformat
+from functools import partial
+import os
+from typing import (
+ Optional,
+ Any,
+ ClassVar,
+)
+from contextlib import (
+ asynccontextmanager as acm,
+)
+
+import tractor
+import trio
+
+from ..log import (
+ get_logger,
+ get_console_log,
+)
+from ._mngr import (
+ Services,
+)
+from ._registry import ( # noqa
+ _tractor_kwargs,
+ _default_reg_addr,
+ open_registry,
+)
+
+log = get_logger(__name__)
+
+
+def get_tractor_runtime_kwargs() -> dict[str, Any]:
+ '''
+ Deliver ``tractor`` related runtime variables in a `dict`.
+
+ '''
+ return _tractor_kwargs
+
+
+@acm
+async def open_piker_runtime(
+ name: str,
+ enable_modules: list[str] = [],
+ loglevel: Optional[str] = None,
+
+ # XXX NOTE XXX: you should pretty much never want debug mode
+ # for data daemons when running in production.
+ debug_mode: bool = False,
+
+ registry_addr: None | tuple[str, int] = None,
+
+ # TODO: once we have `rsyscall` support we will read a config
+ # and spawn the service tree distributed per that.
+ start_method: str = 'trio',
+
+ tractor_runtime_overrides: dict | None = None,
+ **tractor_kwargs,
+
+) -> tuple[
+ tractor.Actor,
+ tuple[str, int],
+]:
+ '''
+ Start a piker actor who's runtime will automatically sync with
+ existing piker actors on the local link based on configuration.
+
+ Can be called from a subactor or any program that needs to start
+ a root actor.
+
+ '''
+ try:
+ # check for existing runtime
+ actor = tractor.current_actor().uid
+
+ except tractor._exceptions.NoRuntime:
+ tractor._state._runtime_vars[
+ 'piker_vars'] = tractor_runtime_overrides
+
+ registry_addr = registry_addr or _default_reg_addr
+
+ async with (
+ tractor.open_root_actor(
+
+ # passed through to ``open_root_actor``
+ arbiter_addr=registry_addr,
+ name=name,
+ loglevel=loglevel,
+ debug_mode=debug_mode,
+ start_method=start_method,
+
+ # TODO: eventually we should be able to avoid
+ # having the root have more then permissions to
+ # spawn other specialized daemons I think?
+ enable_modules=enable_modules,
+
+ **tractor_kwargs,
+ ) as _,
+
+ open_registry(registry_addr, ensure_exists=False) as addr,
+ ):
+ yield (
+ tractor.current_actor(),
+ addr,
+ )
+ else:
+ async with open_registry(registry_addr) as addr:
+ yield (
+ actor,
+ addr,
+ )
+
+
+_root_dname = 'pikerd'
+_root_modules = [
+ __name__,
+ 'piker.service._daemon',
+ 'piker.clearing._ems',
+ 'piker.clearing._client',
+ 'piker.data._sampling',
+]
+
+
+@acm
+async def open_pikerd(
+
+ loglevel: str | None = None,
+
+ # XXX: you should pretty much never want debug mode
+ # for data daemons when running in production.
+ debug_mode: bool = False,
+ registry_addr: None | tuple[str, int] = None,
+
+ # db init flags
+ tsdb: bool = False,
+ es: bool = False,
+ drop_root_perms_for_ahab: bool = True,
+
+ **kwargs,
+
+) -> Services:
+ '''
+ Start a root piker daemon with an indefinite lifetime.
+
+ A root actor nursery is created which can be used to create and keep
+ alive underling services (see below).
+
+ '''
+ async with (
+ open_piker_runtime(
+
+ name=_root_dname,
+ # TODO: eventually we should be able to avoid
+ # having the root have more then permissions to
+ # spawn other specialized daemons I think?
+ enable_modules=_root_modules,
+ loglevel=loglevel,
+ debug_mode=debug_mode,
+ registry_addr=registry_addr,
+
+ **kwargs,
+
+ ) as (root_actor, reg_addr),
+ tractor.open_nursery() as actor_nursery,
+ trio.open_nursery() as service_nursery,
+ ):
+ if root_actor.accept_addr != reg_addr:
+ raise RuntimeError(f'Daemon failed to bind on {reg_addr}!?')
+
+ # assign globally for future daemon/task creation
+ Services.actor_n = actor_nursery
+ Services.service_n = service_nursery
+ Services.debug_mode = debug_mode
+
+ if tsdb:
+ from ._ahab import start_ahab
+ from .marketstore import start_marketstore
+
+ log.info('Spawning `marketstore` supervisor')
+ ctn_ready, config, (cid, pid) = await service_nursery.start(
+ partial(
+ start_ahab,
+ 'marketstored',
+ start_marketstore,
+ loglevel=loglevel,
+ drop_root_perms=drop_root_perms_for_ahab,
+ )
+
+ )
+ log.info(
+ f'`marketstored` up!\n'
+ f'pid: {pid}\n'
+ f'container id: {cid[:12]}\n'
+ f'config: {pformat(config)}'
+ )
+
+ if es:
+ from ._ahab import start_ahab
+ from .elastic import start_elasticsearch
+
+ log.info('Spawning `elasticsearch` supervisor')
+ ctn_ready, config, (cid, pid) = await service_nursery.start(
+ partial(
+ start_ahab,
+ 'elasticsearch',
+ start_elasticsearch,
+ loglevel=loglevel,
+ drop_root_perms=drop_root_perms_for_ahab,
+ )
+ )
+
+ log.info(
+ f'`elasticsearch` up!\n'
+ f'pid: {pid}\n'
+ f'container id: {cid[:12]}\n'
+ f'config: {pformat(config)}'
+ )
+
+ try:
+ yield Services
+
+ finally:
+ # TODO: is this more clever/efficient?
+ # if 'samplerd' in Services.service_tasks:
+ # await Services.cancel_service('samplerd')
+ service_nursery.cancel_scope.cancel()
+
+
+# TODO: do we even need this?
+# @acm
+# async def maybe_open_runtime(
+# loglevel: Optional[str] = None,
+# **kwargs,
+
+# ) -> None:
+# '''
+# Start the ``tractor`` runtime (a root actor) if none exists.
+
+# '''
+# name = kwargs.pop('name')
+
+# if not tractor.current_actor(err_on_no_runtime=False):
+# async with open_piker_runtime(
+# name,
+# loglevel=loglevel,
+# **kwargs,
+# ) as (_, addr):
+# yield addr,
+# else:
+# async with open_registry() as addr:
+# yield addr
+
+
+@acm
+async def maybe_open_pikerd(
+ loglevel: Optional[str] = None,
+ registry_addr: None | tuple = None,
+ tsdb: bool = False,
+ es: bool = False,
+ drop_root_perms_for_ahab: bool = True,
+
+ **kwargs,
+
+) -> tractor._portal.Portal | ClassVar[Services]:
+ '''
+ If no ``pikerd`` daemon-root-actor can be found start it and
+ yield up (we should probably figure out returning a portal to self
+ though).
+
+ '''
+ if loglevel:
+ get_console_log(loglevel)
+
+ # subtle, we must have the runtime up here or portal lookup will fail
+ query_name = kwargs.pop(
+ 'name',
+ f'piker_query_{os.getpid()}',
+ )
+
+ # TODO: if we need to make the query part faster we could not init
+ # an actor runtime and instead just hit the socket?
+ # from tractor._ipc import _connect_chan, Channel
+ # async with _connect_chan(host, port) as chan:
+ # async with open_portal(chan) as arb_portal:
+ # yield arb_portal
+
+ async with (
+ open_piker_runtime(
+ name=query_name,
+ registry_addr=registry_addr,
+ loglevel=loglevel,
+ **kwargs,
+ ) as _,
+
+ tractor.find_actor(
+ _root_dname,
+ arbiter_sockaddr=registry_addr,
+ ) as portal
+ ):
+ # connect to any existing daemon presuming
+ # its registry socket was selected.
+ if (
+ portal is not None
+ ):
+ yield portal
+ return
+
+ # presume pikerd role since no daemon could be found at
+ # configured address
+ async with open_pikerd(
+ loglevel=loglevel,
+ registry_addr=registry_addr,
+
+ # ahabd (docker super) specific controls
+ tsdb=tsdb,
+ es=es,
+ drop_root_perms_for_ahab=drop_root_perms_for_ahab,
+
+ # passthrough to ``tractor`` init
+ **kwargs,
+
+ ) as service_manager:
+ # in the case where we're starting up the
+ # tractor-piker runtime stack in **this** process
+ # we return no portal to self.
+ assert service_manager
+ yield service_manager
diff --git a/piker/data/_ahab.py b/piker/service/_ahab.py
similarity index 65%
rename from piker/data/_ahab.py
rename to piker/service/_ahab.py
index 39a5b46a1..7c3133e11 100644
--- a/piker/data/_ahab.py
+++ b/piker/service/_ahab.py
@@ -15,9 +15,12 @@
# along with this program. If not, see .
'''
-Supervisor for docker with included specific-image service helpers.
+Supervisor for ``docker`` with included async and SC wrapping
+to ensure a cancellable container lifetime system.
'''
+from collections import ChainMap
+from functools import partial
import os
import time
from typing import (
@@ -45,7 +48,10 @@
ReadTimeout,
)
-from ..log import get_logger, get_console_log
+from ..log import (
+ get_logger,
+ get_console_log,
+)
from .. import config
log = get_logger(__name__)
@@ -124,10 +130,19 @@ def __init__(
async def process_logs_until(
self,
+ log_msg_key: str,
+
# this is a predicate func for matching log msgs emitted by the
# underlying containerized app
patt_matcher: Callable[[str], bool],
- bp_on_msg: bool = False,
+
+ # XXX WARNING XXX: do not touch this sleep value unless
+ # you know what you are doing! the value is critical to
+ # making sure the caller code inside the startup context
+ # does not timeout BEFORE we receive a match on the
+ # ``patt_matcher()`` predicate above.
+ checkpoint_period: float = 0.001,
+
) -> bool:
'''
Attempt to capture container log messages and relay through our
@@ -137,12 +152,14 @@ async def process_logs_until(
seen_so_far = self.seen_so_far
while True:
+ logs = self.cntr.logs()
try:
logs = self.cntr.logs()
except (
docker.errors.NotFound,
docker.errors.APIError
):
+ log.exception('Failed to parse logs?')
return False
entries = logs.decode().split('\n')
@@ -155,25 +172,23 @@ async def process_logs_until(
entry = entry.strip()
try:
record = json.loads(entry)
-
- if 'msg' in record:
- msg = record['msg']
- elif 'message' in record:
- msg = record['message']
- else:
- raise KeyError(f'Unexpected log format\n{record}')
-
+ msg = record[log_msg_key]
level = record['level']
except json.JSONDecodeError:
msg = entry
level = 'error'
- if msg and entry not in seen_so_far:
- seen_so_far.add(entry)
- if bp_on_msg:
- await tractor.breakpoint()
+ # TODO: do we need a more general mechanism
+ # for these kinda of "log record entries"?
+ # if 'Error' in entry:
+ # raise RuntimeError(entry)
+ if (
+ msg
+ and entry not in seen_so_far
+ ):
+ seen_so_far.add(entry)
getattr(log, level.lower(), log.error)(f'{msg}')
if level == 'fatal':
@@ -183,10 +198,15 @@ async def process_logs_until(
return True
# do a checkpoint so we don't block if cancelled B)
- await trio.sleep(0.1)
+ await trio.sleep(checkpoint_period)
return False
+ @property
+ def cuid(self) -> str:
+ fqcn: str = self.cntr.attrs['Config']['Image']
+ return f'{fqcn}[{self.cntr.short_id}]'
+
def try_signal(
self,
signal: str = 'SIGINT',
@@ -222,17 +242,23 @@ def hard_kill(self, start: float) -> None:
async def cancel(
self,
- stop_msg: str,
+ log_msg_key: str,
+ stop_predicate: Callable[[str], bool],
+
hard_kill: bool = False,
) -> None:
+ '''
+ Attempt to cancel this container gracefully, fail over to
+ a hard kill on timeout.
+ '''
cid = self.cntr.id
# first try a graceful cancel
log.cancel(
- f'SIGINT cancelling container: {cid}\n'
- f'waiting on stop msg: "{stop_msg}"'
+ f'SIGINT cancelling container: {self.cuid}\n'
+ 'waiting on stop predicate...'
)
self.try_signal('SIGINT')
@@ -243,7 +269,10 @@ async def cancel(
log.cancel('polling for CNTR logs...')
try:
- await self.process_logs_until(stop_msg)
+ await self.process_logs_until(
+ log_msg_key,
+ stop_predicate,
+ )
except ApplicationLogError:
hard_kill = True
else:
@@ -301,12 +330,16 @@ async def cancel(
async def open_ahabd(
ctx: tractor.Context,
endpoint: str, # ns-pointer str-msg-type
- start_timeout: float = 1.0,
+ loglevel: str | None = 'cancel',
**kwargs,
) -> None:
- get_console_log('info', name=__name__)
+
+ log = get_console_log(
+ loglevel,
+ name=__name__,
+ )
async with open_docker() as client:
@@ -317,42 +350,110 @@ async def open_ahabd(
(
dcntr,
cntr_config,
- start_lambda,
- stop_lambda,
+ start_pred,
+ stop_pred,
) = ep_func(client)
cntr = Container(dcntr)
- with trio.move_on_after(start_timeout):
- found = await cntr.process_logs_until(start_lambda)
+ conf: ChainMap[str, Any] = ChainMap(
- if not found and dcntr not in client.containers.list():
- for entry in cntr.seen_so_far:
- log.info(entry)
+ # container specific
+ cntr_config,
- raise RuntimeError(
- f'Failed to start {dcntr.id} check logs deats'
- )
+ # defaults
+ {
+ # startup time limit which is the max the supervisor
+ # will wait for the container to be registered in
+ # ``client.containers.list()``
+ 'startup_timeout': 1.0,
- await ctx.started((
- cntr.cntr.id,
- os.getpid(),
- cntr_config,
- ))
+ # how fast to poll for the starup predicate by sleeping
+ # this amount incrementally thus yielding to the
+ # ``trio`` scheduler on during sync polling execution.
+ 'startup_query_period': 0.001,
+
+ # str-key value expected to contain log message body-contents
+ # when read using:
+ # ``json.loads(entry for entry in DockerContainer.logs())``
+ 'log_msg_key': 'msg',
+
+
+ # startup sync func, like `Nursery.started()`
+ 'started_afunc': None,
+ },
+ )
try:
+ with trio.move_on_after(conf['startup_timeout']) as cs:
+ async with trio.open_nursery() as tn:
+ tn.start_soon(
+ partial(
+ cntr.process_logs_until,
+ log_msg_key=conf['log_msg_key'],
+ patt_matcher=start_pred,
+ checkpoint_period=conf['startup_query_period'],
+ )
+ )
+
+ # optional blocking routine
+ started = conf['started_afunc']
+ if started:
+ await started()
+
+ # poll for container startup or timeout
+ while not cs.cancel_called:
+ if dcntr in client.containers.list():
+ break
+
+ await trio.sleep(conf['startup_query_period'])
+
+ # sync with remote caller actor-task but allow log
+ # processing to continue running in bg.
+ await ctx.started((
+ cntr.cntr.id,
+ os.getpid(),
+ cntr_config,
+ ))
+
+ # XXX: if we timeout on finding the "startup msg" we
+ # expect then we want to FOR SURE raise an error
+ # upwards!
+ if cs.cancelled_caught:
+ # if dcntr not in client.containers.list():
+ for entry in cntr.seen_so_far:
+ log.info(entry)
+
+ raise DockerNotStarted(
+ f'Failed to start container: {cntr.cuid}\n'
+ f'due to timeout={conf["startup_timeout"]}s\n\n'
+ "check ur container's logs!"
+ )
+
# TODO: we might eventually want a proxy-style msg-prot here
# to allow remote control of containers without needing
# callers to have root perms?
await trio.sleep_forever()
finally:
- await cntr.cancel(stop_lambda)
+ # TODO: ensure loglevel can be set and teardown logs are
+ # reported if possible on error or cancel..
+ # XXX WARNING: currently shielding here can result in hangs
+ # on ctl-c from user.. ideally we can avoid a cancel getting
+ # consumed and not propagating whilst still doing teardown
+ # logging..
+ with trio.CancelScope(shield=True):
+ await cntr.cancel(
+ log_msg_key=conf['log_msg_key'],
+ stop_predicate=stop_pred,
+ )
async def start_ahab(
service_name: str,
endpoint: Callable[docker.DockerClient, DockerContainer],
- start_timeout: float = 1.0,
+ loglevel: str | None = 'cancel',
+ drop_root_perms: bool = True,
+
task_status: TaskStatus[
tuple[
trio.Event,
@@ -373,13 +474,12 @@ async def start_ahab(
'''
cn_ready = trio.Event()
try:
- async with tractor.open_nursery(
- loglevel='runtime',
- ) as tn:
+ async with tractor.open_nursery() as an:
- portal = await tn.start_actor(
+ portal = await an.start_actor(
service_name,
- enable_modules=[__name__]
+ enable_modules=[__name__],
+ loglevel=loglevel,
)
# TODO: we have issues with this on teardown
@@ -389,7 +489,10 @@ async def start_ahab(
# de-escalate root perms to the original user
# after the docker supervisor actor is spawned.
- if config._parent_user:
+ if (
+ drop_root_perms
+ and config._parent_user
+ ):
import pwd
os.setuid(
pwd.getpwnam(
@@ -400,7 +503,7 @@ async def start_ahab(
async with portal.open_context(
open_ahabd,
endpoint=str(NamespacePath.from_ref(endpoint)),
- start_timeout=start_timeout
+ loglevel='cancel',
) as (ctx, first):
cid, pid, cntr_config = first
diff --git a/piker/service/_daemon.py b/piker/service/_daemon.py
new file mode 100644
index 000000000..45d6cb81e
--- /dev/null
+++ b/piker/service/_daemon.py
@@ -0,0 +1,271 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+"""
+Daemon-actor spawning "endpoint-hooks".
+
+"""
+from __future__ import annotations
+from typing import (
+ Optional,
+ Callable,
+ Any,
+)
+from contextlib import (
+ asynccontextmanager as acm,
+)
+
+import tractor
+
+from ..log import (
+ get_logger,
+ get_console_log,
+)
+from ..brokers import get_brokermod
+from ._mngr import (
+ Services,
+)
+from ._actor_runtime import maybe_open_pikerd
+from ._registry import find_service
+
+log = get_logger(__name__)
+
+# `brokerd` enabled modules
+# NOTE: keeping this list as small as possible is part of our caps-sec
+# model and should be treated with utmost care!
+_data_mods = [
+ 'piker.brokers.core',
+ 'piker.brokers.data',
+ 'piker.data',
+ 'piker.data.feed',
+ 'piker.data._sampling'
+]
+
+
+@acm
+async def maybe_spawn_daemon(
+
+ service_name: str,
+ service_task_target: Callable,
+ spawn_args: dict[str, Any],
+ loglevel: Optional[str] = None,
+
+ singleton: bool = False,
+ **kwargs,
+
+) -> tractor.Portal:
+ '''
+ If no ``service_name`` daemon-actor can be found,
+ spawn one in a local subactor and return a portal to it.
+
+ If this function is called from a non-pikerd actor, the
+ spawned service will persist as long as pikerd does or
+ it is requested to be cancelled.
+
+ This can be seen as a service starting api for remote-actor
+ clients.
+
+ '''
+ if loglevel:
+ get_console_log(loglevel)
+
+ # serialize access to this section to avoid
+ # 2 or more tasks racing to create a daemon
+ lock = Services.locks[service_name]
+ await lock.acquire()
+
+ async with find_service(service_name) as portal:
+ if portal is not None:
+ lock.release()
+ yield portal
+ return
+
+ log.warning(f"Couldn't find any existing {service_name}")
+
+ # TODO: really shouldn't the actor spawning be part of the service
+ # starting method `Services.start_service()` ?
+
+ # ask root ``pikerd`` daemon to spawn the daemon we need if
+ # pikerd is not live we now become the root of the
+ # process tree
+ async with maybe_open_pikerd(
+
+ loglevel=loglevel,
+ **kwargs,
+
+ ) as pikerd_portal:
+
+ # we are the root and thus are `pikerd`
+ # so spawn the target service directly by calling
+ # the provided target routine.
+ # XXX: this assumes that the target is well formed and will
+ # do the right things to setup both a sub-actor **and** call
+ # the ``_Services`` api from above to start the top level
+ # service task for that actor.
+ started: bool
+ if pikerd_portal is None:
+ started = await service_task_target(**spawn_args)
+
+ else:
+ # tell the remote `pikerd` to start the target,
+ # the target can't return a non-serializable value
+ # since it is expected that service startingn is
+ # non-blocking and the target task will persist running
+ # on `pikerd` after the client requesting it's start
+ # disconnects.
+ started = await pikerd_portal.run(
+ service_task_target,
+ **spawn_args,
+ )
+
+ if started:
+ log.info(f'Service {service_name} started!')
+
+ async with tractor.wait_for_actor(service_name) as portal:
+ lock.release()
+ yield portal
+ await portal.cancel_actor()
+
+
+async def spawn_brokerd(
+
+ brokername: str,
+ loglevel: Optional[str] = None,
+ **tractor_kwargs,
+
+) -> bool:
+
+ log.info(f'Spawning {brokername} broker daemon')
+
+ brokermod = get_brokermod(brokername)
+ dname = f'brokerd.{brokername}'
+
+ extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {})
+ tractor_kwargs.update(extra_tractor_kwargs)
+
+ # ask `pikerd` to spawn a new sub-actor and manage it under its
+ # actor nursery
+ modpath = brokermod.__name__
+ broker_enable = [modpath]
+ for submodname in getattr(
+ brokermod,
+ '__enable_modules__',
+ [],
+ ):
+ subpath = f'{modpath}.{submodname}'
+ broker_enable.append(subpath)
+
+ portal = await Services.actor_n.start_actor(
+ dname,
+ enable_modules=_data_mods + broker_enable,
+ loglevel=loglevel,
+ debug_mode=Services.debug_mode,
+ **tractor_kwargs
+ )
+
+ # non-blocking setup of brokerd service nursery
+ from ..data import _setup_persistent_brokerd
+
+ await Services.start_service_task(
+ dname,
+ portal,
+ _setup_persistent_brokerd,
+ brokername=brokername,
+ )
+ return True
+
+
+@acm
+async def maybe_spawn_brokerd(
+
+ brokername: str,
+ loglevel: Optional[str] = None,
+ **kwargs,
+
+) -> tractor.Portal:
+ '''
+ Helper to spawn a brokerd service *from* a client
+ who wishes to use the sub-actor-daemon.
+
+ '''
+ async with maybe_spawn_daemon(
+
+ f'brokerd.{brokername}',
+ service_task_target=spawn_brokerd,
+ spawn_args={
+ 'brokername': brokername,
+ 'loglevel': loglevel,
+ },
+ loglevel=loglevel,
+ **kwargs,
+
+ ) as portal:
+ yield portal
+
+
+async def spawn_emsd(
+
+ loglevel: Optional[str] = None,
+ **extra_tractor_kwargs
+
+) -> bool:
+ """
+ Start the clearing engine under ``pikerd``.
+
+ """
+ log.info('Spawning emsd')
+
+ portal = await Services.actor_n.start_actor(
+ 'emsd',
+ enable_modules=[
+ 'piker.clearing._ems',
+ 'piker.clearing._client',
+ ],
+ loglevel=loglevel,
+ debug_mode=Services.debug_mode, # set by pikerd flag
+ **extra_tractor_kwargs
+ )
+
+ # non-blocking setup of clearing service
+ from ..clearing._ems import _setup_persistent_emsd
+
+ await Services.start_service_task(
+ 'emsd',
+ portal,
+ _setup_persistent_emsd,
+ )
+ return True
+
+
+@acm
+async def maybe_open_emsd(
+
+ brokername: str,
+ loglevel: Optional[str] = None,
+ **kwargs,
+
+) -> tractor._portal.Portal: # noqa
+
+ async with maybe_spawn_daemon(
+
+ 'emsd',
+ service_task_target=spawn_emsd,
+ spawn_args={'loglevel': loglevel},
+ loglevel=loglevel,
+ **kwargs,
+
+ ) as portal:
+ yield portal
diff --git a/piker/service/_mngr.py b/piker/service/_mngr.py
new file mode 100644
index 000000000..04f396af6
--- /dev/null
+++ b/piker/service/_mngr.py
@@ -0,0 +1,136 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+"""
+daemon-service management API.
+
+"""
+from collections import defaultdict
+from typing import (
+ Callable,
+ Any,
+)
+
+import trio
+from trio_typing import TaskStatus
+import tractor
+
+from ..log import (
+ get_logger,
+)
+
+log = get_logger(__name__)
+
+
+# TODO: factor this into a ``tractor.highlevel`` extension
+# pack for the library.
+class Services:
+
+ actor_n: tractor._supervise.ActorNursery
+ service_n: trio.Nursery
+ debug_mode: bool # tractor sub-actor debug mode flag
+ service_tasks: dict[
+ str,
+ tuple[
+ trio.CancelScope,
+ tractor.Portal,
+ trio.Event,
+ ]
+ ] = {}
+ locks = defaultdict(trio.Lock)
+
+ @classmethod
+ async def start_service_task(
+ self,
+ name: str,
+ portal: tractor.Portal,
+ target: Callable,
+ **kwargs,
+
+ ) -> (trio.CancelScope, tractor.Context):
+ '''
+ Open a context in a service sub-actor, add to a stack
+ that gets unwound at ``pikerd`` teardown.
+
+ This allows for allocating long-running sub-services in our main
+ daemon and explicitly controlling their lifetimes.
+
+ '''
+ async def open_context_in_task(
+ task_status: TaskStatus[
+ tuple[
+ trio.CancelScope,
+ trio.Event,
+ Any,
+ ]
+ ] = trio.TASK_STATUS_IGNORED,
+
+ ) -> Any:
+
+ with trio.CancelScope() as cs:
+ async with portal.open_context(
+ target,
+ **kwargs,
+
+ ) as (ctx, first):
+
+ # unblock once the remote context has started
+ complete = trio.Event()
+ task_status.started((cs, complete, first))
+ log.info(
+ f'`pikerd` service {name} started with value {first}'
+ )
+ try:
+ # wait on any context's return value
+ # and any final portal result from the
+ # sub-actor.
+ ctx_res = await ctx.result()
+
+ # NOTE: blocks indefinitely until cancelled
+ # either by error from the target context
+ # function or by being cancelled here by the
+ # surrounding cancel scope.
+ return (await portal.result(), ctx_res)
+
+ finally:
+ await portal.cancel_actor()
+ complete.set()
+ self.service_tasks.pop(name)
+
+ cs, complete, first = await self.service_n.start(open_context_in_task)
+
+ # store the cancel scope and portal for later cancellation or
+ # retstart if needed.
+ self.service_tasks[name] = (cs, portal, complete)
+
+ return cs, first
+
+ @classmethod
+ async def cancel_service(
+ self,
+ name: str,
+
+ ) -> Any:
+ '''
+ Cancel the service task and actor for the given ``name``.
+
+ '''
+ log.info(f'Cancelling `pikerd` service {name}')
+ cs, portal, complete = self.service_tasks[name]
+ cs.cancel()
+ await complete.wait()
+ assert name not in self.service_tasks, \
+ f'Serice task for {name} not terminated?'
diff --git a/piker/service/_registry.py b/piker/service/_registry.py
new file mode 100644
index 000000000..f487e2a46
--- /dev/null
+++ b/piker/service/_registry.py
@@ -0,0 +1,144 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+"""
+Inter-actor "discovery" (protocol) layer.
+
+"""
+from __future__ import annotations
+from contextlib import (
+ asynccontextmanager as acm,
+)
+from typing import (
+ Any,
+)
+
+import tractor
+
+
+from ..log import (
+ get_logger,
+)
+
+log = get_logger(__name__)
+
+_default_registry_host: str = '127.0.0.1'
+_default_registry_port: int = 6116
+_default_reg_addr: tuple[str, int] = (
+ _default_registry_host,
+ _default_registry_port,
+)
+
+
+# NOTE: this value is set as an actor-global once the first endpoint
+# who is capable, spawns a `pikerd` service tree.
+_registry: Registry | None = None
+
+
+class Registry:
+ addr: None | tuple[str, int] = None
+
+ # TODO: table of uids to sockaddrs
+ peers: dict[
+ tuple[str, str],
+ tuple[str, int],
+ ] = {}
+
+
+_tractor_kwargs: dict[str, Any] = {}
+
+
+@acm
+async def open_registry(
+ addr: None | tuple[str, int] = None,
+ ensure_exists: bool = True,
+
+) -> tuple[str, int]:
+
+ global _tractor_kwargs
+ actor = tractor.current_actor()
+ uid = actor.uid
+ if (
+ Registry.addr is not None
+ and addr
+ ):
+ raise RuntimeError(
+ f'`{uid}` registry addr already bound @ {_registry.sockaddr}'
+ )
+
+ was_set: bool = False
+
+ if (
+ not tractor.is_root_process()
+ and Registry.addr is None
+ ):
+ Registry.addr = actor._arb_addr
+
+ if (
+ ensure_exists
+ and Registry.addr is None
+ ):
+ raise RuntimeError(
+ f"`{uid}` registry should already exist bug doesn't?"
+ )
+
+ if (
+ Registry.addr is None
+ ):
+ was_set = True
+ Registry.addr = addr or _default_reg_addr
+
+ _tractor_kwargs['arbiter_addr'] = Registry.addr
+
+ try:
+ yield Registry.addr
+ finally:
+ # XXX: always clear the global addr if we set it so that the
+ # next (set of) calls will apply whatever new one is passed
+ # in.
+ if was_set:
+ Registry.addr = None
+
+
+@acm
+async def find_service(
+ service_name: str,
+) -> tractor.Portal | None:
+
+ async with open_registry() as reg_addr:
+ log.info(f'Scanning for service `{service_name}`')
+ # attach to existing daemon by name if possible
+ async with tractor.find_actor(
+ service_name,
+ arbiter_sockaddr=reg_addr,
+ ) as maybe_portal:
+ yield maybe_portal
+
+
+async def check_for_service(
+ service_name: str,
+
+) -> None | tuple[str, int]:
+ '''
+ Service daemon "liveness" predicate.
+
+ '''
+ async with open_registry(ensure_exists=False) as reg_addr:
+ async with tractor.query_actor(
+ service_name,
+ arbiter_sockaddr=reg_addr,
+ ) as sockaddr:
+ return sockaddr
diff --git a/piker/data/elastic.py b/piker/service/elastic.py
similarity index 68%
rename from piker/data/elastic.py
rename to piker/service/elastic.py
index 43c6afd08..31221d570 100644
--- a/piker/data/elastic.py
+++ b/piker/service/elastic.py
@@ -15,17 +15,11 @@
# along with this program. If not, see .
from __future__ import annotations
-from contextlib import asynccontextmanager as acm
-from pprint import pformat
from typing import (
Any,
TYPE_CHECKING,
)
-import pyqtgraph as pg
-import numpy as np
-import tractor
-
if TYPE_CHECKING:
import docker
@@ -46,6 +40,9 @@
_config = {
'port': 19200,
'log_level': 'debug',
+
+ # hardcoded to our image version
+ 'version': '7.17.4',
}
@@ -65,14 +62,14 @@ def start_elasticsearch(
-itd \
--rm \
--network=host \
- --mount type=bind,source="$(pwd)"/elastic,target=/usr/share/elasticsearch/data \
+ --mount type=bind,source="$(pwd)"/elastic,\
+ target=/usr/share/elasticsearch/data \
--env "elastic_username=elastic" \
--env "elastic_password=password" \
--env "xpack.security.enabled=false" \
elastic
'''
- import docker
get_console_log('info', name=__name__)
dcntr: DockerContainer = client.containers.run(
@@ -83,27 +80,49 @@ def start_elasticsearch(
remove=True
)
- async def start_matcher(msg: str):
+ async def health_query(msg: str | None = None):
+ if (
+ msg
+ and _config['version'] in msg
+ ):
+ return True
+
try:
health = (await asks.get(
- f'http://localhost:19200/_cat/health',
+ 'http://localhost:19200/_cat/health',
params={'format': 'json'}
)).json()
+ kog.info(
+ 'ElasticSearch cntr health:\n'
+ f'{health}'
+ )
except OSError:
- log.error('couldnt reach elastic container')
+ log.exception('couldnt reach elastic container')
return False
log.info(health)
return health[0]['status'] == 'green'
- async def stop_matcher(msg: str):
+ async def chk_for_closed_msg(msg: str):
return msg == 'closed'
return (
dcntr,
- {},
+ {
+ # apparently we're REALLY tolerant of startup latency
+ # for CI XD
+ 'startup_timeout': 240.0,
+
+ # XXX: decrease http poll period bc docker
+ # is shite at handling fast poll rates..
+ 'startup_query_period': 0.1,
+
+ 'log_msg_key': 'message',
+
+ # 'started_afunc': health_query,
+ },
# expected startup and stop msgs
- start_matcher,
- stop_matcher,
+ health_query,
+ chk_for_closed_msg,
)
diff --git a/piker/data/marketstore.py b/piker/service/marketstore.py
similarity index 97%
rename from piker/data/marketstore.py
rename to piker/service/marketstore.py
index 190667d63..5c4f90db6 100644
--- a/piker/data/marketstore.py
+++ b/piker/service/marketstore.py
@@ -26,7 +26,6 @@
from __future__ import annotations
from contextlib import asynccontextmanager as acm
from datetime import datetime
-from pprint import pformat
from typing import (
Any,
Optional,
@@ -55,7 +54,7 @@
import docker
from ._ahab import DockerContainer
-from .feed import maybe_open_feed
+from ..data.feed import maybe_open_feed
from ..log import get_logger, get_console_log
from .._profile import Profiler
@@ -63,11 +62,12 @@
log = get_logger(__name__)
-# container level config
+# ahabd-supervisor and container level config
_config = {
'grpc_listen_port': 5995,
'ws_listen_port': 5993,
'log_level': 'debug',
+ 'startup_timeout': 2,
}
_yaml_config = '''
@@ -135,7 +135,7 @@ def start_marketstore(
# create dirs when dne
if not os.path.isdir(config._config_dir):
- Path(config._config_dir).mkdir(parents=True, exist_ok=True)
+ Path(config._config_dir).mkdir(parents=True, exist_ok=True)
if not os.path.isdir(mktsdir):
os.mkdir(mktsdir)
@@ -185,7 +185,11 @@ def start_marketstore(
config_dir_mnt,
data_dir_mnt,
],
+
+ # XXX: this must be set to allow backgrounding/non-blocking
+ # usage interaction with the container's process.
detach=True,
+
# stop_signal='SIGINT',
init=True,
# remove=True,
@@ -324,7 +328,7 @@ def quote_to_marketstore_structarray(
@acm
async def get_client(
host: str = 'localhost',
- port: int = 5995
+ port: int = _config['grpc_listen_port'],
) -> MarketstoreClient:
'''
@@ -510,7 +514,6 @@ async def delete_ts(
client = self.client
syms = await client.list_symbols()
- print(syms)
if key not in syms:
raise KeyError(f'`{key}` table key not found in\n{syms}?')
@@ -627,10 +630,10 @@ async def open_storage_client(
yield Storage(client)
-async def tsdb_history_update(
- fqsn: Optional[str] = None,
-
-) -> list[str]:
+@acm
+async def open_tsdb_client(
+ fqsn: str,
+) -> Storage:
# TODO: real-time dedicated task for ensuring
# history consistency between the tsdb, shm and real-time feed..
@@ -659,7 +662,7 @@ async def tsdb_history_update(
# - https://github.com/pikers/piker/issues/98
#
profiler = Profiler(
- disabled=False, # not pg_profile_enabled(),
+ disabled=True, # not pg_profile_enabled(),
delayed=False,
)
@@ -700,14 +703,10 @@ async def tsdb_history_update(
# profiler('Finished db arrays diffs')
- syms = await storage.client.list_symbols()
- log.info(f'Existing tsdb symbol set:\n{pformat(syms)}')
- profiler(f'listed symbols {syms}')
-
- # TODO: ask if user wants to write history for detected
- # available shm buffers?
- from tractor.trionics import ipython_embed
- await ipython_embed()
+ syms = await storage.client.list_symbols()
+ # log.info(f'Existing tsdb symbol set:\n{pformat(syms)}')
+ # profiler(f'listed symbols {syms}')
+ yield storage
# for array in [to_append, to_prepend]:
# if array is None:
diff --git a/piker/testing/__init__.py b/piker/testing/__init__.py
deleted file mode 100644
index 5e3ac93ac..000000000
--- a/piker/testing/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-TEST_CONFIG_DIR_PATH = '_testing'
diff --git a/piker/ui/_annotate.py b/piker/ui/_annotate.py
index 4bad2f66b..f3eeeb074 100644
--- a/piker/ui/_annotate.py
+++ b/piker/ui/_annotate.py
@@ -18,7 +18,7 @@
Annotations for ur faces.
"""
-from typing import Callable, Optional
+from typing import Callable
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QPointF, QRectF
@@ -105,7 +105,7 @@ def __init__(
get_level: Callable[..., float],
size: float = 20,
keep_in_view: bool = True,
- on_paint: Optional[Callable] = None,
+ on_paint: Callable | None = None,
) -> None:
diff --git a/piker/ui/_app.py b/piker/ui/_app.py
index 3be073e79..9978dbe38 100644
--- a/piker/ui/_app.py
+++ b/piker/ui/_app.py
@@ -24,7 +24,7 @@
from PyQt5.QtCore import QEvent
import trio
-from .._daemon import maybe_spawn_brokerd
+from ..service import maybe_spawn_brokerd
from . import _event
from ._exec import run_qtractor
from ..data.feed import install_brokerd_search
diff --git a/piker/ui/_axes.py b/piker/ui/_axes.py
index b6fb92819..62214f60b 100644
--- a/piker/ui/_axes.py
+++ b/piker/ui/_axes.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -20,7 +20,7 @@
"""
from __future__ import annotations
from functools import lru_cache
-from typing import Optional, Callable
+from typing import Callable
from math import floor
import numpy as np
@@ -60,7 +60,8 @@ def __init__(
**kwargs
)
- # XXX: pretty sure this makes things slower
+ # XXX: pretty sure this makes things slower!
+ # no idea why given we only move labels for the most part?
# self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
self.pi = plotitem
@@ -190,7 +191,7 @@ def __init__(
*args,
min_tick: int = 2,
title: str = '',
- formatter: Optional[Callable[[float], str]] = None,
+ formatter: Callable[[float], str] | None = None,
**kwargs
) -> None:
@@ -202,8 +203,8 @@ def __init__(
def set_title(
self,
title: str,
- view: Optional[ChartView] = None,
- color: Optional[str] = None,
+ view: ChartView | None = None,
+ color: str | None = None,
) -> Label:
'''
@@ -303,8 +304,9 @@ def _indexes_to_timestrs(
viz = chart._vizs[chart.name]
shm = viz.shm
array = shm.array
- times = array['time']
- i_0, i_l = times[0], times[-1]
+ ifield = viz.index_field
+ index = array[ifield]
+ i_0, i_l = index[0], index[-1]
# edge cases
if (
@@ -316,11 +318,13 @@ def _indexes_to_timestrs(
(indexes[0] > i_0
and indexes[-1] > i_l)
):
+ # print(f"x-label indexes empty edge case: {indexes}")
return []
- if viz.index_field == 'index':
- arr_len = times.shape[0]
+ if ifield == 'index':
+ arr_len = index.shape[0]
first = shm._first.value
+ times = array['time']
epochs = times[
list(
map(
diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py
index 78a20d9dd..7811278ba 100644
--- a/piker/ui/_chart.py
+++ b/piker/ui/_chart.py
@@ -19,9 +19,12 @@
'''
from __future__ import annotations
+from contextlib import (
+ contextmanager as cm,
+ ExitStack,
+)
from typing import (
Iterator,
- Optional,
TYPE_CHECKING,
)
@@ -102,7 +105,7 @@ def __init__(
super().__init__(parent)
- self.search: Optional[SearchWidget] = None
+ self.search: SearchWidget | None = None
self.hbox = QHBoxLayout(self)
self.hbox.setContentsMargins(0, 0, 0, 0)
@@ -116,22 +119,14 @@ def __init__(
self.hbox.addLayout(self.vbox)
- # self.toolbar_layout = QHBoxLayout()
- # self.toolbar_layout.setContentsMargins(0, 0, 0, 0)
- # self.vbox.addLayout(self.toolbar_layout)
-
- # self.init_timeframes_ui()
- # self.init_strategy_ui()
- # self.vbox.addLayout(self.hbox)
-
self._chart_cache: dict[
str,
tuple[LinkedSplits, LinkedSplits],
] = {}
- self.hist_linked: Optional[LinkedSplits] = None
- self.rt_linked: Optional[LinkedSplits] = None
- self._active_cursor: Optional[Cursor] = None
+ self.hist_linked: LinkedSplits | None = None
+ self.rt_linked: LinkedSplits | None = None
+ self._active_cursor: Cursor | None = None
# assigned in the startup func `_async_main()`
self._root_n: trio.Nursery = None
@@ -143,15 +138,18 @@ def __init__(
# and the window does not? Never right?!
# self.reg_for_resize(self)
- @property
- def linkedsplits(self) -> LinkedSplits:
- return self.rt_linked
-
- # XXX: strat loader/saver that we don't need yet.
+ # TODO: strat loader/saver that we don't need yet.
# def init_strategy_ui(self):
+ # self.toolbar_layout = QHBoxLayout()
+ # self.toolbar_layout.setContentsMargins(0, 0, 0, 0)
+ # self.vbox.addLayout(self.toolbar_layout)
# self.strategy_box = StrategyBoxWidget(self)
# self.toolbar_layout.addWidget(self.strategy_box)
+ @property
+ def linkedsplits(self) -> LinkedSplits:
+ return self.rt_linked
+
def set_chart_symbols(
self,
group_key: tuple[str], # of form .
@@ -263,7 +261,9 @@ async def load_symbols(
# last had the xlast in view, if so then shift so it's
# still in view, if the user was viewing history then
# do nothing yah?
- self.rt_linked.chart.default_view()
+ self.rt_linked.chart.main_viz.default_view(
+ do_min_bars=True,
+ )
# if a history chart instance is already up then
# set the search widget as its sidepane.
@@ -372,7 +372,7 @@ class ChartnPane(QFrame):
'''
sidepane: FieldsForm | SearchWidget
hbox: QHBoxLayout
- chart: Optional[ChartPlotWidget] = None
+ chart: ChartPlotWidget | None = None
def __init__(
self,
@@ -432,7 +432,7 @@ def __init__(
self.godwidget = godwidget
self.chart: ChartPlotWidget = None # main (ohlc) chart
- self.subplots: dict[tuple[str, ...], ChartPlotWidget] = {}
+ self.subplots: dict[str, ChartPlotWidget] = {}
self.godwidget = godwidget
# placeholder for last appended ``PlotItem``'s bottom axis.
@@ -450,7 +450,7 @@ def __init__(
# chart-local graphics state that can be passed to
# a ``graphic_update_cycle()`` call by any task wishing to
# update the UI for a given "chart instance".
- self.display_state: Optional[DisplayState] = None
+ self.display_state: DisplayState | None = None
self._symbol: Symbol = None
@@ -480,7 +480,7 @@ def symbol(self) -> Symbol:
def set_split_sizes(
self,
- prop: Optional[float] = None,
+ prop: float | None = None,
) -> None:
'''
@@ -494,7 +494,7 @@ def set_split_sizes(
prop = 3/8
h = self.height()
- histview_h = h * (6/16)
+ histview_h = h * (4/11)
h = h - histview_h
major = 1 - prop
@@ -574,11 +574,11 @@ def add_plot(
shm: ShmArray,
flume: Flume,
- array_key: Optional[str] = None,
+ array_key: str | None = None,
style: str = 'line',
_is_main: bool = False,
- sidepane: Optional[QWidget] = None,
+ sidepane: QWidget | None = None,
draw_kwargs: dict = {},
**cpw_kwargs,
@@ -634,6 +634,7 @@ def add_plot(
axis.pi = cpw.plotItem
cpw.hideAxis('left')
+ # cpw.removeAxis('left')
cpw.hideAxis('bottom')
if (
@@ -750,12 +751,12 @@ def add_plot(
# NOTE: back-link the new sub-chart to trigger y-autoranging in
# the (ohlc parent) main chart for this linked set.
- if self.chart:
- main_viz = self.chart.get_viz(self.chart.name)
- self.chart.view.enable_auto_yrange(
- src_vb=cpw.view,
- viz=main_viz,
- )
+ # if self.chart:
+ # main_viz = self.chart.get_viz(self.chart.name)
+ # self.chart.view.enable_auto_yrange(
+ # src_vb=cpw.view,
+ # viz=main_viz,
+ # )
graphics = viz.graphics
data_key = viz.name
@@ -793,7 +794,7 @@ def add_plot(
def resize_sidepanes(
self,
- from_linked: Optional[LinkedSplits] = None,
+ from_linked: LinkedSplits | None = None,
) -> None:
'''
@@ -816,11 +817,17 @@ def resize_sidepanes(
self.chart.sidepane.setMinimumWidth(sp_w)
-# TODO: we should really drop using this type and instead just
-# write our own wrapper around `PlotItem`..
+# TODO: a general rework of this widget-interface:
+# - we should really drop using this type and instead just lever our
+# own override of `PlotItem`..
+# - possibly rename to class -> MultiChart(pg.PlotWidget):
+# where the widget is responsible for containing management
+# harness for multi-Viz "view lists" and their associated mode-panes
+# (fsp chain, order ctl, feed queue-ing params, actor ctl, etc).
+
class ChartPlotWidget(pg.PlotWidget):
'''
- ``GraphicsView`` subtype containing a ``.plotItem: PlotItem`` as well
+ ``PlotWidget`` subtype containing a ``.plotItem: PlotItem`` as well
as a `.pi_overlay: PlotItemOverlay`` which helps manage and overlay flow
graphics view multiple compose view boxes.
@@ -861,7 +868,7 @@ def __init__(
# TODO: load from config
use_open_gl: bool = False,
- static_yrange: Optional[tuple[float, float]] = None,
+ static_yrange: tuple[float, float] | None = None,
parent=None,
**kwargs,
@@ -876,7 +883,7 @@ def __init__(
# NOTE: must be set bfore calling ``.mk_vb()``
self.linked = linkedsplits
- self.sidepane: Optional[FieldsForm] = None
+ self.sidepane: FieldsForm | None = None
# source of our custom interactions
self.cv = self.mk_vb(name)
@@ -1010,36 +1017,10 @@ def marker_right_points(
# )
return line_end, marker_right, r_axis_x
- def default_view(
- self,
- bars_from_y: int = int(616 * 3/8),
- y_offset: int = 0,
- do_ds: bool = True,
-
- ) -> None:
- '''
- Set the view box to the "default" startup view of the scene.
-
- '''
- viz = self.get_viz(self.name)
-
- if not viz:
- log.warning(f'`Viz` for {self.name} not loaded yet?')
- return
-
- viz.default_view(
- bars_from_y,
- y_offset,
- do_ds,
- )
-
- if do_ds:
- self.linked.graphics_cycle()
-
def increment_view(
self,
datums: int = 1,
- vb: Optional[ChartView] = None,
+ vb: ChartView | None = None,
) -> None:
'''
@@ -1057,6 +1038,7 @@ def increment_view(
# breakpoint()
return
+ # should trigger broadcast on all overlays right?
view.setXRange(
min=l + x_shift,
max=r + x_shift,
@@ -1069,8 +1051,8 @@ def increment_view(
def overlay_plotitem(
self,
name: str,
- index: Optional[int] = None,
- axis_title: Optional[str] = None,
+ index: int | None = None,
+ axis_title: str | None = None,
axis_side: str = 'right',
axis_kwargs: dict = {},
@@ -1119,6 +1101,15 @@ def overlay_plotitem(
link_axes=(0,),
)
+ # hide all axes not named by ``axis_side``
+ for axname in (
+ ({'bottom'} | allowed_sides) - {axis_side}
+ ):
+ try:
+ pi.hideAxis(axname)
+ except Exception:
+ pass
+
# add axis title
# TODO: do we want this API to still work?
# raxis = pi.getAxis('right')
@@ -1134,11 +1125,11 @@ def draw_curve(
shm: ShmArray,
flume: Flume,
- array_key: Optional[str] = None,
+ array_key: str | None = None,
overlay: bool = False,
- color: Optional[str] = None,
+ color: str | None = None,
add_label: bool = True,
- pi: Optional[pg.PlotItem] = None,
+ pi: pg.PlotItem | None = None,
step_mode: bool = False,
is_ohlc: bool = False,
add_sticky: None | str = 'right',
@@ -1197,6 +1188,10 @@ def draw_curve(
)
pi.viz = viz
+ # so that viewboxes are associated 1-to-1 with
+ # their parent plotitem
+ pi.vb._viz = viz
+
assert isinstance(viz.shm, ShmArray)
# TODO: this probably needs its own method?
@@ -1209,17 +1204,21 @@ def draw_curve(
pi = overlay
if add_sticky:
- axis = pi.getAxis(add_sticky)
- if pi.name not in axis._stickies:
- if pi is not self.plotItem:
- overlay = self.pi_overlay
- assert pi in overlay.overlays
- overlay_axis = overlay.get_axis(
- pi,
- add_sticky,
- )
- assert overlay_axis is axis
+ if pi is not self.plotItem:
+ # overlay = self.pi_overlay
+ # assert pi in overlay.overlays
+ overlay = self.pi_overlay
+ assert pi in overlay.overlays
+ axis = overlay.get_axis(
+ pi,
+ add_sticky,
+ )
+
+ else:
+ axis = pi.getAxis(add_sticky)
+
+ if pi.name not in axis._stickies:
# TODO: UGH! just make this not here! we should
# be making the sticky from code which has access
@@ -1263,7 +1262,7 @@ def draw_ohlc(
shm: ShmArray,
flume: Flume,
- array_key: Optional[str] = None,
+ array_key: str | None = None,
**draw_curve_kwargs,
) -> Viz:
@@ -1280,24 +1279,6 @@ def draw_ohlc(
**draw_curve_kwargs,
)
- def update_graphics_from_flow(
- self,
- graphics_name: str,
- array_key: Optional[str] = None,
-
- **kwargs,
-
- ) -> pg.GraphicsObject:
- '''
- Update the named internal graphics from ``array``.
-
- '''
- viz = self._vizs[array_key or graphics_name]
- return viz.update_graphics(
- array_key=array_key,
- **kwargs,
- )
-
# TODO: pretty sure we can just call the cursor
# directly not? i don't wee why we need special "signal proxies"
# for this lul..
@@ -1310,43 +1291,6 @@ def leaveEvent(self, ev): # noqa
self.sig_mouse_leave.emit(self)
self.scene().leaveEvent(ev)
- def maxmin(
- self,
- name: Optional[str] = None,
- bars_range: Optional[tuple[
- int, int, int, int, int, int
- ]] = None,
-
- ) -> tuple[float, float]:
- '''
- Return the max and min y-data values "in view".
-
- If ``bars_range`` is provided use that range.
-
- '''
- # TODO: here we should instead look up the ``Viz.shm.array``
- # and read directly from shm to avoid copying to memory first
- # and then reading it again here.
- viz_key = name or self.name
- viz = self._vizs.get(viz_key)
- if viz is None:
- log.error(f"viz {viz_key} doesn't exist in chart {self.name} !?")
- return 0, 0
-
- res = viz.maxmin()
-
- if (
- res is None
- ):
- mxmn = 0, 0
- if not self._on_screen:
- self.default_view(do_ds=False)
- self._on_screen = True
- else:
- x_range, read_slc, mxmn = res
-
- return mxmn
-
def get_viz(
self,
key: str,
@@ -1360,3 +1304,32 @@ def get_viz(
@property
def main_viz(self) -> Viz:
return self.get_viz(self.name)
+
+ def iter_vizs(self) -> Iterator[Viz]:
+ return iter(self._vizs.values())
+
+ @cm
+ def reset_graphics_caches(self) -> None:
+ '''
+ Reset all managed ``Viz`` (flow) graphics objects
+ Qt cache modes (to ``NoCache`` mode) on enter and
+ restore on exit.
+
+ '''
+ with ExitStack() as stack:
+ for viz in self.iter_vizs():
+ stack.enter_context(
+ viz.graphics.reset_cache(),
+ )
+
+ # also reset any downsampled alt-graphics objects which
+ # might be active.
+ dsg = viz.ds_graphics
+ if dsg:
+ stack.enter_context(
+ dsg.reset_cache(),
+ )
+ try:
+ yield
+ finally:
+ stack.close()
diff --git a/piker/ui/_cursor.py b/piker/ui/_cursor.py
index 8c358c3f2..79df305b7 100644
--- a/piker/ui/_cursor.py
+++ b/piker/ui/_cursor.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -21,7 +21,6 @@
from __future__ import annotations
from functools import partial
from typing import (
- Optional,
Callable,
TYPE_CHECKING,
)
@@ -38,7 +37,10 @@
_font_small,
_font,
)
-from ._axes import YAxisLabel, XAxisLabel
+from ._axes import (
+ YAxisLabel,
+ XAxisLabel,
+)
from ..log import get_logger
if TYPE_CHECKING:
@@ -167,7 +169,7 @@ def __init__(
anchor_at: str = ('top', 'right'),
justify_text: str = 'left',
- font_size: Optional[int] = None,
+ font_size: int | None = None,
) -> None:
@@ -338,7 +340,7 @@ def __init__(
self.linked = linkedsplits
self.graphics: dict[str, pg.GraphicsObject] = {}
- self.xaxis_label: Optional[XAxisLabel] = None
+ self.xaxis_label: XAxisLabel | None = None
self.always_show_xlabel: bool = True
self.plots: list['PlotChartWidget'] = [] # type: ignore # noqa
self.active_plot = None
diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py
index c9ebebcd3..5442d3471 100644
--- a/piker/ui/_curve.py
+++ b/piker/ui/_curve.py
@@ -19,7 +19,7 @@
"""
from contextlib import contextmanager as cm
-from typing import Optional, Callable
+from typing import Callable
import numpy as np
import pyqtgraph as pg
@@ -86,7 +86,7 @@ def __init__(
# line styling
color: str = 'bracket',
last_step_color: str | None = None,
- fill_color: Optional[str] = None,
+ fill_color: str | None = None,
style: str = 'solid',
**kwargs
@@ -158,14 +158,37 @@ def x_last(self) -> float | None:
drawn yet, ``None``.
'''
- return self._last_line.x1() if self._last_line else None
+ if self._last_line:
+ return self._last_line.x1()
+
+ return None
+
+ # XXX: due to a variety of weird jitter bugs and "smearing"
+ # artifacts when click-drag panning and viewing history time series,
+ # we offer this ctx-mngr interface to allow temporarily disabling
+ # Qt's graphics caching mode; this is now currently used from
+ # ``ChartView.start/signal_ic()`` methods which also disable the
+ # rt-display loop when the user is moving around a view.
+ @cm
+ def reset_cache(self) -> None:
+ try:
+ none = QGraphicsItem.NoCache
+ log.debug(
+ f'{self._name} -> CACHE DISABLE: {none}'
+ )
+ self.setCacheMode(none)
+ yield
+ finally:
+ mode = self.cache_mode
+ log.debug(f'{self._name} -> CACHE ENABLE {mode}')
+ self.setCacheMode(mode)
class Curve(FlowGraphic):
'''
A faster, simpler, append friendly version of
``pyqtgraph.PlotCurveItem`` built for highly customizable real-time
- updates.
+ updates; a graphics object to render a simple "line" plot.
This type is a much stripped down version of a ``pyqtgraph`` style
"graphics object" in the sense that the internal lower level
@@ -191,14 +214,14 @@ class Curve(FlowGraphic):
'''
# TODO: can we remove this?
- # sub_br: Optional[Callable] = None
+ # sub_br: Callable | None = None
def __init__(
self,
*args,
# color: str = 'default_lightest',
- # fill_color: Optional[str] = None,
+ # fill_color: str | None = None,
# style: str = 'solid',
**kwargs
@@ -248,12 +271,6 @@ def clear(self):
self.fast_path.clear()
# self.fast_path = None
- @cm
- def reset_cache(self) -> None:
- self.setCacheMode(QtWidgets.QGraphicsItem.NoCache)
- yield
- self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
-
def boundingRect(self):
'''
Compute and then cache our rect.
@@ -378,7 +395,6 @@ def draw_last_datum(
) -> None:
# default line draw last call
- # with self.reset_cache():
x = src_data[index_field]
y = src_data[array_key]
@@ -406,10 +422,20 @@ def draw_last_datum(
# element such that the current datum in view can be shown
# (via it's max / min) even when highly zoomed out.
class FlattenedOHLC(Curve):
+ '''
+ More or less the exact same as a standard line ``Curve`` above
+ but meant to handle a traced-and-downsampled OHLC time series.
+ _
+ _| | _
+ |_ | |_ | |
+ _| => |_| |
+ | |
+ |_ |_
+
+ The main implementation different is that ``.draw_last_datum()``
+ expects an underlying OHLC array for the ``src_data`` input.
- # avoids strange dragging/smearing artifacts when panning..
- cache_mode: int = QGraphicsItem.NoCache
-
+ '''
def draw_last_datum(
self,
path: QPainterPath,
@@ -434,7 +460,19 @@ def draw_last_datum(
class StepCurve(Curve):
+ '''
+ A familiar rectangle-with-y-height-per-datum type curve:
+ ||
+ || ||
+ || || ||||
+ _||_||_||_||||_ where each datum's y-value is drawn as
+ a nearly full rectangle, each "level" spans some x-step size.
+
+ This is most often used for vlm and option OI style curves and/or
+ the very popular "bar chart".
+
+ '''
def declare_paintables(
self,
) -> None:
diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py
index 1e798f68f..3c686619b 100644
--- a/piker/ui/_dataviz.py
+++ b/piker/ui/_dataviz.py
@@ -19,17 +19,20 @@
'''
from __future__ import annotations
+from functools import lru_cache
from math import (
ceil,
floor,
)
from typing import (
- Optional,
Literal,
TYPE_CHECKING,
)
-import msgspec
+from msgspec import (
+ Struct,
+ field,
+)
import numpy as np
import pyqtgraph as pg
from PyQt5.QtCore import QLineF
@@ -225,15 +228,51 @@ def render_baritems(
_sample_rates: set[float] = {1, 60}
-class Viz(msgspec.Struct): # , frozen=True):
+class ViewState(Struct):
+ '''
+ Indexing objects representing the current view x-range -> y-range.
+
+ '''
+ # (xl, xr) "input" view range in x-domain
+ xrange: tuple[
+ float | int,
+ float | int
+ ] | None = None
+
+ # TODO: cache the (ixl, ixr) read_slc-into-.array style slice index?
+
+ # (ymn, ymx) "output" min and max in viewed y-codomain
+ yrange: tuple[
+ float | int,
+ float | int
+ ] | None = None
+
+ # last in view ``ShmArray.array[read_slc]`` data
+ in_view: np.ndarray | None = None
+
+
+class Viz(Struct):
'''
(Data) "Visualization" compound type which wraps a real-time
shm array stream with displayed graphics (curves, charts)
for high level access and control as well as efficient incremental
- update.
+ update, oriented around the idea of a "view state".
+
+ The (backend) intention is for this interface and type is to
+ eventually be capable of shm-passing of incrementally updated
+ graphics stream data, thus providing a cross-actor solution to
+ sharing UI-related update state potentionally in a (compressed)
+ binary-interchange format.
- The intention is for this type to eventually be capable of shm-passing
- of incrementally updated graphics stream data between actors.
+ Further, from an interaction-triggers-view-in-UI perspective, this type
+ operates as a transform:
+ (x_left, x_right) -> output metrics {ymn, ymx, uppx, ...}
+
+ wherein each x-domain range maps to some output set of (graphics
+ related) vizualization metrics. In further documentation we often
+ refer to this abstraction as a vizualization curve: Ci. Each Ci is
+ considered a function which maps an x-range (input view range) to
+ a multi-variate (metrics) output.
'''
name: str
@@ -242,13 +281,17 @@ class Viz(msgspec.Struct): # , frozen=True):
flume: Flume
graphics: Curve | BarItems
- # for tracking y-mn/mx for y-axis auto-ranging
- yrange: tuple[float, float] = None
+ vs: ViewState = field(default_factory=ViewState)
+
+ # last calculated y-mn/mx from m4 downsample code, this
+ # is updated in the body of `Renderer.render()`.
+ ds_yrange: tuple[float, float] | None = None
+ yrange: tuple[float, float] | None = None
# in some cases a viz may want to change its
# graphical "type" or, "form" when downsampling, to
# start this is only ever an interpolation line.
- ds_graphics: Optional[Curve] = None
+ ds_graphics: Curve | None = None
is_ohlc: bool = False
render: bool = True # toggle for display loop
@@ -264,7 +307,7 @@ class Viz(msgspec.Struct): # , frozen=True):
] = 'time'
- # downsampling state
+ # TODO: maybe compound this into a downsampling state type?
_last_uppx: float = 0
_in_ds: bool = False
_index_step: float | None = None
@@ -282,20 +325,44 @@ class Viz(msgspec.Struct): # , frozen=True):
tuple[float, float],
] = {}
+ # cache of median calcs from input read slice hashes
+ # see `.median()`
+ _meds: dict[
+ int,
+ float,
+ ] = {}
+
+ # to make lru_cache-ing work, see
+ # https://docs.python.org/3/faq/programming.html#how-do-i-cache-method-calls
+ def __eq__(self, other):
+ return self._shm._token == other._shm._token
+
+ def __hash__(self):
+ return hash(self._shm._token)
+
@property
def shm(self) -> ShmArray:
return self._shm
@property
def index_field(self) -> str:
+ '''
+ The column name as ``str`` in the underlying ``._shm: ShmArray``
+ which will deliver the "index" array.
+
+ '''
return self._index_field
def index_step(
self,
reset: bool = False,
-
) -> float:
+ '''
+ Return the size between sample steps in the units of the
+ x-domain, normally either an ``int`` array index size or an
+ epoch time in seconds.
+ '''
# attempt to dectect the best step size by scanning a sample of
# the source data.
if self._index_step is None:
@@ -378,7 +445,7 @@ def maxmin(
# TODO: hash the slice instead maybe?
# https://stackoverflow.com/a/29980872
- lbar, rbar = ixrng = round(x_range[0]), round(x_range[1])
+ ixrng = lbar, rbar = round(x_range[0]), round(x_range[1])
if use_caching:
cached_result = self._mxmns.get(ixrng)
@@ -389,6 +456,7 @@ def maxmin(
f'{ixrng} -> {cached_result}'
)
read_slc, mxmn = cached_result
+ self.vs.yrange = mxmn
return (
ixrng,
read_slc,
@@ -421,8 +489,8 @@ def maxmin(
)
return None
- elif self.yrange:
- mxmn = self.yrange
+ elif self.ds_yrange:
+ mxmn = self.ds_yrange
if do_print:
print(
f'{self.name} M4 maxmin:\n'
@@ -455,6 +523,7 @@ def maxmin(
# cache result for input range
assert mxmn
self._mxmns[ixrng] = (read_slc, mxmn)
+ self.vs.yrange = mxmn
profiler(f'yrange mxmn cacheing: {x_range} -> {mxmn}')
return (
ixrng,
@@ -473,20 +542,11 @@ def view_range(self) -> tuple[int, int]:
vr.right(),
)
- def bars_range(self) -> tuple[int, int, int, int]:
- '''
- Return a range tuple for the left-view, left-datum, right-datum
- and right-view x-indices.
-
- '''
- l, start, datum_start, datum_stop, stop, r = self.datums_range()
- return l, datum_start, datum_stop, r
-
def datums_range(
self,
view_range: None | tuple[float, float] = None,
index_field: str | None = None,
- array: None | np.ndarray = None,
+ array: np.ndarray | None = None,
) -> tuple[
int, int, int, int, int, int
@@ -499,42 +559,47 @@ def datums_range(
index_field: str = index_field or self.index_field
if index_field == 'index':
- l, r = round(l), round(r)
+ l: int = round(l)
+ r: int = round(r)
if array is None:
array = self.shm.array
index = array[index_field]
- first = floor(index[0])
- last = ceil(index[-1])
-
- # first and last datums in view determined by
- # l / r view range.
- leftmost = floor(l)
- rightmost = ceil(r)
+ first: int = floor(index[0])
+ last: int = ceil(index[-1])
# invalid view state
if (
r < l
or l < 0
or r < 0
- or (l > last and r > last)
+ or (
+ l > last
+ and r > last
+ )
):
- leftmost = first
- rightmost = last
+ leftmost: int = first
+ rightmost: int = last
+
else:
+ # determine first and last datums in view determined by
+ # l -> r view range.
rightmost = max(
- min(last, rightmost),
+ min(last, ceil(r)),
first,
)
leftmost = min(
- max(first, leftmost),
+ max(first, floor(l)),
last,
rightmost - 1,
)
- assert leftmost < rightmost
+ # sanity
+ # assert leftmost < rightmost
+
+ self.vs.xrange = leftmost, rightmost
return (
l, # left x-in-view
@@ -547,7 +612,7 @@ def datums_range(
def read(
self,
- array_field: Optional[str] = None,
+ array_field: str | None = None,
index_field: str | None = None,
profiler: None | Profiler = None,
@@ -563,11 +628,9 @@ def read(
'''
index_field: str = index_field or self.index_field
- vr = l, r = self.view_range()
# readable data
array = self.shm.array
-
if profiler:
profiler('self.shm.array READ')
@@ -579,7 +642,6 @@ def read(
ilast,
r,
) = self.datums_range(
- view_range=vr,
index_field=index_field,
array=array,
)
@@ -595,17 +657,21 @@ def read(
array,
start_t=lbar,
stop_t=rbar,
+ step=self.index_step(),
)
# TODO: maybe we should return this from the slicer call
# above?
in_view = array[read_slc]
if in_view.size:
+ self.vs.in_view = in_view
abs_indx = in_view['index']
abs_slc = slice(
int(abs_indx[0]),
int(abs_indx[-1]),
)
+ else:
+ self.vs.in_view = None
if profiler:
profiler(
@@ -626,10 +692,11 @@ def read(
# BUT the ``in_view`` slice DOES..
read_slc = slice(lbar_i, rbar_i)
in_view = array[lbar_i: rbar_i + 1]
+ self.vs.in_view = in_view
# in_view = array[lbar_i-1: rbar_i+1]
-
# XXX: same as ^
# to_draw = array[lbar - ifirst:(rbar - ifirst) + 1]
+
if profiler:
profiler('index arithmetic for slicing')
@@ -664,8 +731,8 @@ def update_graphics(
pg.GraphicsObject,
]:
'''
- Read latest datums from shm and render to (incrementally)
- render to graphics.
+ Read latest datums from shm and (incrementally) render to
+ graphics.
'''
profiler = Profiler(
@@ -955,9 +1022,11 @@ def draw_last(
def default_view(
self,
- bars_from_y: int = int(616 * 3/8),
+ min_bars_from_y: int = int(616 * 4/11),
y_offset: int = 0, # in datums
+
do_ds: bool = True,
+ do_min_bars: bool = False,
) -> None:
'''
@@ -1013,12 +1082,10 @@ def default_view(
data_diff = last_datum - first_datum
rl_diff = vr - vl
rescale_to_data: bool = False
- # new_uppx: float = 1
if rl_diff > data_diff:
rescale_to_data = True
rl_diff = data_diff
- new_uppx: float = data_diff / self.px_width()
# orient by offset from the y-axis including
# space to compensate for the L1 labels.
@@ -1027,17 +1094,29 @@ def default_view(
offset = l1_offset
- if (
- rescale_to_data
- ):
+ if rescale_to_data:
+ new_uppx: float = data_diff / self.px_width()
offset = (offset / uppx) * new_uppx
else:
offset = (y_offset * step) + uppx*step
+ # NOTE: if we are in the midst of start-up and a bunch of
+ # widgets are spawning/rendering concurrently, it's likely the
+ # label size above `l1_offset` won't have yet fully rendered.
+ # Here we try to compensate for that ensure at least a static
+ # bar gap between the last datum and the y-axis.
+ if (
+ do_min_bars
+ and offset <= (6 * step)
+ ):
+ offset = 6 * step
+
# align right side of view to the rightmost datum + the selected
# offset from above.
- r_reset = (self.graphics.x_last() or last_datum) + offset
+ r_reset = (
+ self.graphics.x_last() or last_datum
+ ) + offset
# no data is in view so check for the only 2 sane cases:
# - entire view is LEFT of data
@@ -1062,12 +1141,20 @@ def default_view(
else:
log.warning(f'Unknown view state {vl} -> {vr}')
return
- # raise RuntimeError(f'Unknown view state {vl} -> {vr}')
-
else:
# maintain the l->r view distance
l_reset = r_reset - rl_diff
+ if (
+ do_min_bars
+ and (r_reset - l_reset) < min_bars_from_y
+ ):
+ l_reset = (
+ (r_reset + offset)
+ -
+ min_bars_from_y * step
+ )
+
# remove any custom user yrange setttings
if chartw._static_yrange == 'axis':
chartw._static_yrange = None
@@ -1079,9 +1166,7 @@ def default_view(
)
if do_ds:
- # view.interaction_graphics_cycle()
- view.maybe_downsample_graphics()
- view._set_yrange(viz=self)
+ view.interact_graphics_cycle()
def incr_info(
self,
@@ -1236,3 +1321,152 @@ def px_width(self) -> float:
vr, 0,
)
).length()
+
+ @lru_cache(maxsize=6116)
+ def median_from_range(
+ self,
+ start: int,
+ stop: int,
+
+ ) -> float:
+ in_view = self.shm.array[start:stop]
+ if self.is_ohlc:
+ return np.median(in_view['close'])
+ else:
+ return np.median(in_view[self.name])
+
+ @lru_cache(maxsize=6116)
+ def _dispersion(
+ self,
+ # xrange: tuple[float, float],
+ ymn: float,
+ ymx: float,
+ yref: float,
+
+ ) -> tuple[float, float]:
+ return (
+ (ymx - yref) / yref,
+ (ymn - yref) / yref,
+ )
+
+ def disp_from_range(
+ self,
+ xrange: tuple[float, float] | None = None,
+ yref: float | None = None,
+ method: Literal[
+ 'up',
+ 'down',
+ 'full', # both sides
+ 'both', # both up and down as separate scalars
+
+ ] = 'full',
+
+ ) -> float | tuple[float, float] | None:
+ '''
+ Return a dispersion metric referenced from an optionally
+ provided ``yref`` or the left-most datum level by default.
+
+ '''
+ vs = self.vs
+ yrange = vs.yrange
+ if yrange is None:
+ return None
+
+ ymn, ymx = yrange
+ key = 'open' if self.is_ohlc else self.name
+ yref = yref or vs.in_view[0][key]
+ # xrange = xrange or vs.xrange
+
+ # call into the lru_cache-d sigma calculator method
+ r_up, r_down = self._dispersion(ymn, ymx, yref)
+ match method:
+ case 'full':
+ return r_up - r_down
+ case 'up':
+ return r_up
+ case 'down':
+ return r_up
+ case 'both':
+ return r_up, r_down
+
+ # @lru_cache(maxsize=6116)
+ def i_from_t(
+ self,
+ t: float,
+ return_y: bool = False,
+
+ ) -> int | tuple[int, float]:
+
+ istart = slice_from_time(
+ self.vs.in_view,
+ start_t=t,
+ stop_t=t,
+ step=self.index_step(),
+ ).start
+
+ if not return_y:
+ return istart
+
+ vs = self.vs
+ arr = vs.in_view
+ key = 'open' if self.is_ohlc else self.name
+ yref = arr[istart][key]
+ return istart, yref
+
+ def scalars_from_index(
+ self,
+ xref: float | None = None,
+
+ ) -> tuple[
+ int,
+ float,
+ float,
+ float,
+ ] | None:
+ '''
+ Calculate and deliver the log-returns scalars specifically
+ according to y-data supported on this ``Viz``'s underlying
+ x-domain data range from ``xref`` -> ``.vs.xrange[1]``.
+
+ The main use case for this method (currently) is to generate
+ scalars which will allow calculating the required y-range for
+ some "pinned" curve to be aligned *from* the ``xref`` time
+ stamped datum *to* the curve rendered by THIS viz.
+
+ '''
+ vs = self.vs
+ arr = vs.in_view
+
+ # TODO: make this work by parametrizing over input
+ # .vs.xrange input for caching?
+ # read_slc_start = self.i_from_t(xref)
+
+ read_slc = slice_from_time(
+ arr=self.vs.in_view,
+ start_t=xref,
+ stop_t=vs.xrange[1],
+ step=self.index_step(),
+ )
+ key = 'open' if self.is_ohlc else self.name
+
+ # NOTE: old code, it's no faster right?
+ # read_slc_start = read_slc.start
+ # yref = arr[read_slc_start][key]
+
+ read = arr[read_slc][key]
+ if not read.size:
+ return None
+
+ yref = read[0]
+ ymn, ymx = self.vs.yrange
+ # print(
+ # f'Viz[{self.name}].scalars_from_index(xref={xref})\n'
+ # f'read_slc: {read_slc}\n'
+ # f'ymnmx: {(ymn, ymx)}\n'
+ # )
+ return (
+ read_slc.start,
+ yref,
+ (ymx - yref) / yref,
+ (ymn - yref) / yref,
+ )
diff --git a/piker/ui/_display.py b/piker/ui/_display.py
index c934f089d..3da338094 100644
--- a/piker/ui/_display.py
+++ b/piker/ui/_display.py
@@ -21,18 +21,18 @@
graphics update methods via our custom ``pyqtgraph`` charting api.
'''
-from functools import partial
import itertools
from math import floor
import time
from typing import (
- Optional,
Any,
+ TYPE_CHECKING,
)
import tractor
import trio
import pyqtgraph as pg
+# import pendulum
from msgspec import field
@@ -82,6 +82,9 @@
from ..log import get_logger
from .._profile import Profiler
+if TYPE_CHECKING:
+ from ._interaction import ChartView
+
log = get_logger(__name__)
@@ -146,12 +149,11 @@ def multi_maxmin(
profiler(f'vlm_viz.maxmin({read_slc})')
return (
- mx,
-
# enforcing price can't be negative?
# TODO: do we even need this?
max(mn, 0),
+ mx,
mx_vlm_in_view, # vlm max
)
@@ -183,29 +185,23 @@ class DisplayState(Struct):
# misc state tracking
vars: dict[str, Any] = field(
default_factory=lambda: {
- 'tick_margin': 0,
'i_last': 0,
'i_last_append': 0,
'last_mx_vlm': 0,
- 'last_mx': 0,
- 'last_mn': 0,
}
)
hist_vars: dict[str, Any] = field(
default_factory=lambda: {
- 'tick_margin': 0,
'i_last': 0,
'i_last_append': 0,
'last_mx_vlm': 0,
- 'last_mx': 0,
- 'last_mn': 0,
}
)
globalz: None | dict[str, Any] = None
- vlm_chart: Optional[ChartPlotWidget] = None
- vlm_sticky: Optional[YAxisLabel] = None
+ vlm_chart: ChartPlotWidget | None = None
+ vlm_sticky: YAxisLabel | None = None
wap_in_history: bool = False
@@ -261,7 +257,10 @@ async def increment_history_view(
profiler('`hist Viz.update_graphics()` call')
if liv:
- hist_viz.plot.vb._set_yrange(viz=hist_viz)
+ hist_viz.plot.vb.interact_graphics_cycle(
+ do_linked_charts=False,
+ do_overlay_scaling=True, # always overlayT slow chart
+ )
profiler('hist chart yrange view')
# check if tread-in-place view x-shift is needed
@@ -351,8 +350,8 @@ async def graphics_update_loop(
vlm_viz = vlm_chart._vizs.get('volume') if vlm_chart else None
(
- last_mx,
last_mn,
+ last_mx,
last_mx_vlm,
) = multi_maxmin(
None,
@@ -379,9 +378,6 @@ async def graphics_update_loop(
# levels this might be dark volume we need to
# present differently -> likely dark vlm
- tick_size = symbol.tick_size
- tick_margin = 3 * tick_size
-
fast_chart.show()
last_quote_s = time.time()
@@ -389,7 +385,6 @@ async def graphics_update_loop(
'fqsn': fqsn,
'godwidget': godwidget,
'quotes': {},
- # 'maxmin': maxmin,
'flume': flume,
@@ -406,12 +401,11 @@ async def graphics_update_loop(
'l1': l1,
'vars': {
- 'tick_margin': tick_margin,
'i_last': 0,
'i_last_append': 0,
'last_mx_vlm': last_mx_vlm,
- 'last_mx': last_mx,
- 'last_mn': last_mn,
+ # 'last_mx': last_mx,
+ # 'last_mn': last_mn,
},
'globalz': globalz,
})
@@ -422,7 +416,9 @@ async def graphics_update_loop(
ds.vlm_chart = vlm_chart
ds.vlm_sticky = vlm_sticky
- fast_chart.default_view()
+ fast_chart.main_viz.default_view(
+ do_min_bars=True,
+ )
# ds.hist_vars.update({
# 'i_last_append': 0,
@@ -474,7 +470,7 @@ async def graphics_update_loop(
fast_chart.pause_all_feeds()
continue
- ic = fast_chart.view._ic
+ ic = fast_chart.view._in_interact
if ic:
fast_chart.pause_all_feeds()
print(f'{fqsn} PAUSING DURING INTERACTION')
@@ -494,7 +490,7 @@ def graphics_update_cycle(
wap_in_history: bool = False,
trigger_all: bool = False, # flag used by prepend history updates
- prepend_update_index: Optional[int] = None,
+ prepend_update_index: int | None = None,
) -> None:
@@ -517,7 +513,7 @@ def graphics_update_cycle(
chart = ds.chart
vlm_chart = ds.vlm_chart
- varz = ds.vars
+ # varz = ds.vars
l1 = ds.l1
flume = ds.flume
ohlcv = flume.rt_shm
@@ -527,8 +523,6 @@ def graphics_update_cycle(
main_viz = ds.viz
index_field = main_viz.index_field
- tick_margin = varz['tick_margin']
-
(
uppx,
liv,
@@ -547,35 +541,37 @@ def graphics_update_cycle(
# them as an additional graphic.
clear_types = _tick_groups['clears']
- mx = varz['last_mx']
- mn = varz['last_mn']
- mx_vlm_in_view = varz['last_mx_vlm']
+ # TODO: fancier y-range sorting..
+ # https://github.com/pikers/piker/issues/325
+ # - a proper streaming mxmn algo as per above issue.
+ # - we should probably scale the view margin based on the size of
+ # the true range? This way you can slap in orders outside the
+ # current L1 (only) book range.
+ main_vb: ChartView = main_viz.plot.vb
+ this_viz: Viz = chart._vizs[fqsn]
+ this_vb: ChartView = this_viz.plot.vb
+ this_yr = this_vb._yrange
+ if this_yr:
+ lmn, lmx = this_yr
+ else:
+ lmn = lmx = 0
+
+ mn: float = lmn
+ mx: float = lmx
+ mx_vlm_in_view: float | None = None
+ yrange_margin = 0.09
# update ohlc sampled price bars
if (
- # do_rt_update
- # or do_px_step
(liv and do_px_step)
or trigger_all
):
+ # TODO: i think we're double calling this right now
+ # since .interact_graphics_cycle() also calls it?
+ # I guess we can add a guard in there?
_, i_read_range, _ = main_viz.update_graphics()
profiler('`Viz.update_graphics()` call')
- (
- mx_in_view,
- mn_in_view,
- mx_vlm_in_view,
- ) = multi_maxmin(
- i_read_range,
- main_viz,
- ds.vlm_viz,
- profiler,
- )
-
- mx = mx_in_view + tick_margin
- mn = mn_in_view - tick_margin
- profiler('{fqsdn} `multi_maxmin()` call')
-
# don't real-time "shift" the curve to the
# left unless we get one of the following:
if (
@@ -583,7 +579,6 @@ def graphics_update_cycle(
or trigger_all
):
chart.increment_view(datums=append_diff)
- # main_viz.plot.vb._set_yrange(viz=main_viz)
# NOTE: since vlm and ohlc charts are axis linked now we don't
# need the double increment request?
@@ -592,6 +587,21 @@ def graphics_update_cycle(
profiler('view incremented')
+ # NOTE: do this **after** the tread to ensure we take the yrange
+ # from the most current view x-domain.
+ (
+ mn,
+ mx,
+ mx_vlm_in_view,
+ ) = multi_maxmin(
+ i_read_range,
+ main_viz,
+ ds.vlm_viz,
+ profiler,
+ )
+
+ profiler(f'{fqsn} `multi_maxmin()` call')
+
# iterate frames of ticks-by-type such that we only update graphics
# using the last update per type where possible.
ticks_by_type = quote.get('tbt', {})
@@ -613,8 +623,22 @@ def graphics_update_cycle(
# TODO: make sure IB doesn't send ``-1``!
and price > 0
):
- mx = max(price + tick_margin, mx)
- mn = min(price - tick_margin, mn)
+ if (
+ price < mn
+ ):
+ mn = price
+ yrange_margin = 0.16
+ # # print(f'{this_viz.name} new MN from TICK {mn}')
+
+ if (
+ price > mx
+ ):
+ mx = price
+ yrange_margin = 0.16
+ # # print(f'{this_viz.name} new MX from TICK {mx}')
+
+ # mx = max(price, mx)
+ # mn = min(price, mn)
# clearing price update:
# generally, we only want to update grahpics from the *last*
@@ -677,14 +701,16 @@ def graphics_update_cycle(
# Y-autoranging: adjust y-axis limits based on state tracking
# of previous "last" L1 values which are in view.
- lmx = varz['last_mx']
- lmn = varz['last_mn']
- mx_diff = mx - lmx
mn_diff = mn - lmn
+ mx_diff = mx - lmx
if (
- mx_diff
- or mn_diff
+ mn_diff or mx_diff # covers all cases below?
+ # (mx - lmx) > 0 # upward expansion
+ # or (mn - lmn) < 0 # downward expansion
+
+ # or (lmx - mx) > 0 # upward contraction
+ # or (lmn - mn) < 0 # downward contraction
):
# complain about out-of-range outliers which can show up
# in certain annoying feeds (like ib)..
@@ -703,53 +729,77 @@ def graphics_update_cycle(
f'mn_diff: {mn_diff}\n'
)
- # FAST CHART resize case
+ # TODO: track local liv maxmin without doing a recompute all the
+ # time..plus, just generally the user is more likely to be
+ # zoomed out enough on the slow chart that this is never an
+ # issue (the last datum going out of y-range).
+
+ # FAST CHART y-auto-range resize case
elif (
liv
and not chart._static_yrange == 'axis'
):
- main_vb = main_viz.plot.vb
+ # NOTE: this auto-yranging approach is a sort of, hybrid,
+ # between always aligning overlays to the their common ref
+ # sample and not updating at all:
+ # - whenever an interaction happens the overlays are scaled
+ # to one another and thus are ref-point aligned and
+ # scaled.
+ # - on treads and range updates due to new mn/mx from last
+ # datum, we don't scale to the overlayT instead only
+ # adjusting when the latest datum is outside the previous
+ # dispersion range.
+ mn = min(mn, lmn)
+ mx = max(mx, lmx)
if (
- main_vb._ic is None
- or not main_vb._ic.is_set()
+ main_vb._in_interact is None
+ or not main_vb._in_interact.is_set()
):
- yr = (mn, mx)
- # print(
- # f'MAIN VIZ yrange update\n'
- # f'{fqsn}: {yr}'
- # )
-
- main_vb._set_yrange(
- # TODO: we should probably scale
- # the view margin based on the size
- # of the true range? This way you can
- # slap in orders outside the current
- # L1 (only) book range.
- # range_margin=0.1,
- yrange=yr
+ # print(f'SETTING Y-mnmx -> {main_viz.name}: {(mn, mx)}')
+ this_vb.interact_graphics_cycle(
+ do_linked_charts=False,
+ # TODO: we could optionally offer always doing this
+ # on treads thus always keeping fast-chart overlays
+ # aligned by their LHS datum?
+ do_overlay_scaling=False,
+ yrange_kwargs={
+ this_viz: {
+ 'yrange': (mn, mx),
+ 'range_margin': yrange_margin,
+ },
+ }
)
profiler('main vb y-autorange')
- # SLOW CHART resize case
- (
- _,
- hist_liv,
- _,
- _,
- _,
- _,
- _,
- ) = hist_viz.incr_info(
- ds=ds,
- is_1m=True,
- )
- profiler('hist `Viz.incr_info()`')
+ # SLOW CHART y-auto-range resize casd
+ # (NOTE: still is still inside the y-range
+ # guard block above!)
+ # (
+ # _,
+ # hist_liv,
+ # _,
+ # _,
+ # _,
+ # _,
+ # _,
+ # ) = hist_viz.incr_info(
+ # ds=ds,
+ # is_1m=True,
+ # )
+
+ # if hist_liv:
+ # times = hist_viz.shm.array['time']
+ # last_t = times[-1]
+ # dt = pendulum.from_timestamp(last_t)
+ # log.info(
+ # f'{hist_viz.name} TIMESTEP:'
+ # f'epoch: {last_t}\n'
+ # f'datetime: {dt}\n'
+ # )
+
+ # profiler('hist `Viz.incr_info()`')
- # TODO: track local liv maxmin without doing a recompute all the
- # time..plut, just generally the user is more likely to be
- # zoomed out enough on the slow chart that this is never an
- # issue (the last datum going out of y-range).
# hist_chart = ds.hist_chart
# if (
# hist_liv
@@ -764,7 +814,8 @@ def graphics_update_cycle(
# XXX: update this every draw cycle to ensure y-axis auto-ranging
# only adjusts when the in-view data co-domain actually expands or
# contracts.
- varz['last_mx'], varz['last_mn'] = mx, mn
+ # varz['last_mn'] = mn
+ # varz['last_mx'] = mx
# TODO: a similar, only-update-full-path-on-px-step approach for all
# fsp overlays and vlm stuff..
@@ -772,10 +823,12 @@ def graphics_update_cycle(
# run synchronous update on all `Viz` overlays
for curve_name, viz in chart._vizs.items():
+ if viz.is_ohlc:
+ continue
+
# update any overlayed fsp flows
if (
curve_name != fqsn
- and not viz.is_ohlc
):
update_fsp_chart(
viz,
@@ -788,8 +841,7 @@ def graphics_update_cycle(
# px column to give the user the mx/mn
# range of that set.
if (
- curve_name != fqsn
- and liv
+ liv
# and not do_px_step
# and not do_rt_update
):
@@ -809,8 +861,14 @@ def graphics_update_cycle(
# TODO: can we unify this with the above loop?
if vlm_chart:
vlm_vizs = vlm_chart._vizs
-
main_vlm_viz = vlm_vizs['volume']
+ main_vlm_vb = main_vlm_viz.plot.vb
+
+ # TODO: we should probably read this
+ # from the `Viz.vs: ViewState`!
+ vlm_yr = main_vlm_vb._yrange
+ if vlm_yr:
+ (_, vlm_ymx) = vlm_yrange = vlm_yr
# always update y-label
ds.vlm_sticky.update_from_data(
@@ -848,16 +906,30 @@ def graphics_update_cycle(
profiler('`main_vlm_viz.update_graphics()`')
if (
- mx_vlm_in_view != varz['last_mx_vlm']
+ mx_vlm_in_view
+ and vlm_yr
+ and mx_vlm_in_view != vlm_ymx
):
- varz['last_mx_vlm'] = mx_vlm_in_view
- # vlm_yr = (0, mx_vlm_in_view * 1.375)
- # vlm_chart.view._set_yrange(yrange=vlm_yr)
- # profiler('`vlm_chart.view._set_yrange()`')
+ # in this case we want to scale all overlays in the
+ # sub-chart but only incrementally update the vlm since
+ # we already calculated the new range above.
+ # TODO: in theory we can incrementally update all
+ # overlays as well though it will require iteration of
+ # them here in the display loop right?
+ main_vlm_viz.plot.vb.interact_graphics_cycle(
+ do_overlay_scaling=True,
+ do_linked_charts=False,
+ yrange_kwargs={
+ main_vlm_viz: {
+ 'yrange': vlm_yrange,
+ # 'range_margin': yrange_margin,
+ },
+ },
+ )
+ profiler('`vlm_chart.view.interact_graphics_cycle()`')
# update all downstream FSPs
for curve_name, viz in vlm_vizs.items():
-
if curve_name == 'volume':
continue
@@ -882,10 +954,13 @@ def graphics_update_cycle(
# XXX: without this we get completely
# mangled/empty vlm display subchart..
# fvb = viz.plot.vb
- # fvb._set_yrange(
- # viz=viz,
+ # fvb.interact_graphics_cycle(
+ # do_linked_charts=False,
+ # do_overlay_scaling=False,
# )
- profiler(f'vlm `Viz[{viz.name}].plot.vb._set_yrange()`')
+ profiler(
+ f'Viz[{viz.name}].plot.vb.interact_graphics_cycle()`'
+ )
# even if we're downsampled bigly
# draw the last datum in the final
@@ -1224,6 +1299,9 @@ async def display_symbol_data(
# to avoid internal pane creation.
# sidepane=False,
sidepane=godwidget.search,
+ draw_kwargs={
+ 'last_step_color': 'original',
+ },
)
# ensure the last datum graphic is generated
@@ -1242,6 +1320,9 @@ async def display_symbol_data(
# in the case of history chart we explicitly set `False`
# to avoid internal pane creation.
sidepane=pp_pane,
+ draw_kwargs={
+ 'last_step_color': 'original',
+ },
)
rt_viz = rt_chart.get_viz(fqsn)
pis.setdefault(fqsn, [None, None])[0] = rt_chart.plotItem
@@ -1308,13 +1389,6 @@ async def display_symbol_data(
name=fqsn,
axis_title=fqsn,
)
- # only show a singleton bottom-bottom axis by default.
- hist_pi.hideAxis('bottom')
-
- # XXX: TODO: THIS WILL CAUSE A GAP ON OVERLAYS,
- # i think it needs to be "removed" instead when there
- # are none?
- hist_pi.hideAxis('left')
hist_viz = hist_chart.draw_curve(
fqsn,
@@ -1333,10 +1407,6 @@ async def display_symbol_data(
# for zoom-interaction purposes.
hist_viz.draw_last(array_key=fqsn)
- hist_pi.vb.maxmin = partial(
- hist_chart.maxmin,
- name=fqsn,
- )
# TODO: we need a better API to do this..
# specially store ref to shm for lookup in display loop
# since only a placeholder of `None` is entered in
@@ -1350,9 +1420,6 @@ async def display_symbol_data(
axis_title=fqsn,
)
- rt_pi.hideAxis('left')
- rt_pi.hideAxis('bottom')
-
rt_viz = rt_chart.draw_curve(
fqsn,
ohlcv,
@@ -1365,10 +1432,6 @@ async def display_symbol_data(
color=bg_chart_color,
last_step_color=bg_last_bar_color,
)
- rt_pi.vb.maxmin = partial(
- rt_chart.maxmin,
- name=fqsn,
- )
# TODO: we need a better API to do this..
# specially store ref to shm for lookup in display loop
@@ -1395,7 +1458,9 @@ async def display_symbol_data(
for fqsn, flume in feed.flumes.items():
# size view to data prior to order mode init
- rt_chart.default_view()
+ rt_chart.main_viz.default_view(
+ do_min_bars=True,
+ )
rt_linked.graphics_cycle()
# TODO: look into this because not sure why it was
@@ -1406,7 +1471,9 @@ async def display_symbol_data(
# determine if auto-range adjustements should be made.
# rt_linked.subplots.pop('volume', None)
- hist_chart.default_view()
+ hist_chart.main_viz.default_view(
+ do_min_bars=True,
+ )
hist_linked.graphics_cycle()
godwidget.resize_all()
@@ -1449,10 +1516,14 @@ async def display_symbol_data(
# default view adjuments and sidepane alignment
# as final default UX touch.
- rt_chart.default_view()
+ rt_chart.main_viz.default_view(
+ do_min_bars=True,
+ )
await trio.sleep(0)
- hist_chart.default_view()
+ hist_chart.main_viz.default_view(
+ do_min_bars=True,
+ )
hist_viz = hist_chart.get_viz(fqsn)
await trio.sleep(0)
diff --git a/piker/ui/_editors.py b/piker/ui/_editors.py
index 08f198529..df8813147 100644
--- a/piker/ui/_editors.py
+++ b/piker/ui/_editors.py
@@ -21,7 +21,6 @@
from __future__ import annotations
from collections import defaultdict
from typing import (
- Optional,
TYPE_CHECKING
)
@@ -67,7 +66,7 @@ def add(
x: float,
y: float,
color='default',
- pointing: Optional[str] = None,
+ pointing: str | None = None,
) -> pg.ArrowItem:
'''
@@ -221,7 +220,7 @@ def remove_line(
line: LevelLine = None,
uuid: str = None,
- ) -> Optional[LevelLine]:
+ ) -> LevelLine | None:
'''Remove a line by refernce or uuid.
If no lines or ids are provided remove all lines under the
diff --git a/piker/ui/_exec.py b/piker/ui/_exec.py
index d8eabb706..19663cacd 100644
--- a/piker/ui/_exec.py
+++ b/piker/ui/_exec.py
@@ -49,7 +49,7 @@
import trio
from outcome import Error
-from .._daemon import (
+from ..service import (
maybe_open_pikerd,
get_tractor_runtime_kwargs,
)
diff --git a/piker/ui/_forms.py b/piker/ui/_forms.py
index a6cddae98..a86cf9030 100644
--- a/piker/ui/_forms.py
+++ b/piker/ui/_forms.py
@@ -23,7 +23,9 @@
from functools import partial
from math import floor
from typing import (
- Optional, Any, Callable, Awaitable
+ Any,
+ Callable,
+ Awaitable,
)
import trio
@@ -263,7 +265,7 @@ def set_items(
def set_icon(
self,
key: str,
- icon_name: Optional[str],
+ icon_name: str | None,
) -> None:
self.setItemIcon(
@@ -344,7 +346,7 @@ def add_field_label(
name: str,
- font_size: Optional[int] = None,
+ font_size: int | None = None,
font_color: str = 'default_lightest',
) -> QtGui.QLabel:
@@ -469,7 +471,7 @@ def mk_form(
parent: QWidget,
fields_schema: dict,
- font_size: Optional[int] = None,
+ font_size: int | None = None,
) -> FieldsForm:
@@ -628,7 +630,7 @@ def mk_fill_status_bar(
parent_pane: QWidget,
form: FieldsForm,
pane_vbox: QVBoxLayout,
- label_font_size: Optional[int] = None,
+ label_font_size: int | None = None,
) -> (
# TODO: turn this into a composite?
@@ -738,7 +740,7 @@ def mk_fill_status_bar(
def mk_order_pane_layout(
parent: QWidget,
- # accounts: dict[str, Optional[str]],
+ # accounts: dict[str, str | None],
) -> FieldsForm:
diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py
index 2e2e76c1c..6e600743c 100644
--- a/piker/ui/_fsp.py
+++ b/piker/ui/_fsp.py
@@ -24,7 +24,10 @@
from functools import partial
import inspect
from itertools import cycle
-from typing import Optional, AsyncGenerator, Any
+from typing import (
+ AsyncGenerator,
+ Any,
+)
import numpy as np
import msgspec
@@ -80,7 +83,7 @@ def has_vlm(ohlcv: ShmArray) -> bool:
def update_fsp_chart(
viz,
graphics_name: str,
- array_key: Optional[str],
+ array_key: str | None,
**kwargs,
) -> None:
@@ -476,7 +479,7 @@ async def start_engine_task(
target: Fsp,
conf: dict[str, dict[str, Any]],
- worker_name: Optional[str] = None,
+ worker_name: str | None = None,
loglevel: str = 'info',
) -> (Flume, trio.Event):
@@ -608,10 +611,11 @@ async def open_vlm_displays(
linked: LinkedSplits,
flume: Flume,
dvlm: bool = True,
+ loglevel: str = 'info',
task_status: TaskStatus[ChartPlotWidget] = trio.TASK_STATUS_IGNORED,
-) -> ChartPlotWidget:
+) -> None:
'''
Volume subchart displays.
@@ -666,7 +670,6 @@ async def open_vlm_displays(
# built-in vlm which we plot ASAP since it's
# usually data provided directly with OHLC history.
shm = ohlcv
- # ohlc_chart = linked.chart
vlm_chart = linked.add_plot(
name='volume',
@@ -690,7 +693,14 @@ async def open_vlm_displays(
# the axis on the left it's totally not lined up...
# show volume units value on LHS (for dinkus)
# vlm_chart.hideAxis('right')
- # vlm_chart.showAxis('left')
+ vlm_chart.hideAxis('left')
+
+ # TODO: is it worth being able to remove axes (from i guess
+ # a perf perspective) enough that we can actually do this and
+ # other axis related calls (for eg. label upddates in the
+ # display loop) don't raise when a the axis can't be loaded and
+ # thus would normally cause many label related calls to crash?
+ # axis = vlm_chart.removeAxis('left')
# send back new chart to caller
task_status.started(vlm_chart)
@@ -704,17 +714,9 @@ async def open_vlm_displays(
# read from last calculated value
value = shm.array['volume'][-1]
-
last_val_sticky.update_from_data(-1, value)
- _, _, vlm_curve = vlm_chart.update_graphics_from_flow(
- 'volume',
- )
-
- # size view to data once at outset
- vlm_chart.view._set_yrange(
- viz=vlm_viz
- )
+ _, _, vlm_curve = vlm_viz.update_graphics()
# add axis title
axis = vlm_chart.getAxis('right')
@@ -722,7 +724,6 @@ async def open_vlm_displays(
if dvlm:
- tasks_ready = []
# spawn and overlay $ vlm on the same subchart
dvlm_flume, started = await admin.start_engine_task(
dolla_vlm,
@@ -736,22 +737,8 @@ async def open_vlm_displays(
},
},
},
- # loglevel,
+ loglevel,
)
- tasks_ready.append(started)
-
- # FIXME: we should error on starting the same fsp right
- # since it might collide with existing shm.. or wait we
- # had this before??
- # dolla_vlm
-
- tasks_ready.append(started)
- # profiler(f'created shm for fsp actor: {display_name}')
-
- # wait for all engine tasks to startup
- async with trio.open_nursery() as n:
- for event in tasks_ready:
- n.start_soon(event.wait)
# dolla vlm overlay
# XXX: the main chart already contains a vlm "units" axis
@@ -774,10 +761,6 @@ async def open_vlm_displays(
},
)
- # TODO: should this maybe be implicit based on input args to
- # `.overlay_plotitem()` above?
- dvlm_pi.hideAxis('bottom')
-
# all to be overlayed curve names
dvlm_fields = [
'dolla_vlm',
@@ -827,6 +810,7 @@ def chart_curves(
)
assert viz.plot is pi
+ await started.wait()
chart_curves(
dvlm_fields,
dvlm_pi,
@@ -835,19 +819,17 @@ def chart_curves(
step_mode=True,
)
- # spawn flow rates fsp **ONLY AFTER** the 'dolla_vlm' fsp is
- # up since this one depends on it.
-
+ # NOTE: spawn flow rates fsp **ONLY AFTER** the 'dolla_vlm' fsp is
+ # up since calculating vlm "rates" obvs first requires the
+ # underlying vlm event feed ;)
fr_flume, started = await admin.start_engine_task(
flow_rates,
{ # fsp engine conf
'func_name': 'flow_rates',
'zero_on_step': True,
},
- # loglevel,
+ loglevel,
)
- await started.wait()
-
# chart_curves(
# dvlm_rate_fields,
# dvlm_pi,
@@ -859,13 +841,15 @@ def chart_curves(
# hide the original vlm curve since the $vlm one is now
# displayed and the curves are effectively the same minus
# liquidity events (well at least on low OHLC periods - 1s).
- vlm_curve.hide()
+ # vlm_curve.hide()
vlm_chart.removeItem(vlm_curve)
vlm_viz = vlm_chart._vizs['volume']
- vlm_viz.render = False
-
- # avoid range sorting on volume once disabled
vlm_chart.view.disable_auto_yrange()
+ # NOTE: DON'T DO THIS.
+ # WHY: we want range sorting on volume for the RHS label!
+ # -> if you don't want that then use this but likely you
+ # only will if we decide to drop unit vlm..
+ # vlm_viz.render = False
# Trade rate overlay
# XXX: requires an additional overlay for
@@ -888,8 +872,8 @@ def chart_curves(
},
)
- tr_pi.hideAxis('bottom')
+ await started.wait()
chart_curves(
trade_rate_fields,
tr_pi,
diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py
index c0e22d500..b4a78931b 100644
--- a/piker/ui/_interaction.py
+++ b/piker/ui/_interaction.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -14,16 +14,17 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-"""
+'''
Chart view box primitives
-"""
+'''
from __future__ import annotations
-from contextlib import asynccontextmanager
-from functools import partial
+from contextlib import (
+ asynccontextmanager,
+ ExitStack,
+)
import time
from typing import (
- Optional,
Callable,
TYPE_CHECKING,
)
@@ -40,6 +41,7 @@
from ..log import get_logger
from .._profile import Profiler
from .._profile import pg_profile_enabled, ms_slower_then
+from .view_mode import overlay_viewlists
# from ._style import _min_points_to_show
from ._editors import SelectRect
from . import _event
@@ -73,7 +75,7 @@
async def handle_viewmode_kb_inputs(
- view: 'ChartView',
+ view: ChartView,
recv_chan: trio.abc.ReceiveChannel,
) -> None:
@@ -87,7 +89,7 @@ async def handle_viewmode_kb_inputs(
last = time.time()
action: str
- on_next_release: Optional[Callable] = None
+ on_next_release: Callable | None = None
# for quick key sequence-combo pattern matching
# we have a min_tap period and these should not
@@ -142,6 +144,23 @@ async def handle_viewmode_kb_inputs(
if mods == Qt.ControlModifier:
ctrl = True
+ # UI REPL-shell
+ if (
+ ctrl and key in {
+ Qt.Key_U,
+ }
+ ):
+ import tractor
+ god = order_mode.godw # noqa
+ feed = order_mode.feed # noqa
+ chart = order_mode.chart # noqa
+ viz = chart.main_viz # noqa
+ vlm_chart = chart.linked.subplots['volume'] # noqa
+ vlm_viz = vlm_chart.main_viz # noqa
+ dvlm_pi = vlm_chart._vizs['dolla_vlm'].plot # noqa
+ await tractor.breakpoint()
+ view.interact_graphics_cycle()
+
# SEARCH MODE #
# ctlr-/ for "lookup", "search" -> open search tree
if (
@@ -169,9 +188,13 @@ async def handle_viewmode_kb_inputs(
# View modes
if key == Qt.Key_R:
- # TODO: set this for all subplots
- # edge triggered default view activation
- view.chart.default_view()
+ # NOTE: seems that if we don't yield a Qt render
+ # cycle then the m4 downsampled curves will show here
+ # without another reset..
+ view._viz.default_view()
+ view.interact_graphics_cycle()
+ await trio.sleep(0)
+ view.interact_graphics_cycle()
if len(fast_key_seq) > 1:
# begin matches against sequences
@@ -313,7 +336,7 @@ async def handle_viewmode_kb_inputs(
async def handle_viewmode_mouse(
- view: 'ChartView',
+ view: ChartView,
recv_chan: trio.abc.ReceiveChannel,
) -> None:
@@ -359,7 +382,7 @@ def __init__(
name: str,
parent: pg.PlotItem = None,
- static_yrange: Optional[tuple[float, float]] = None,
+ static_yrange: tuple[float, float] | None = None,
**kwargs,
):
@@ -392,8 +415,13 @@ def __init__(
self.order_mode: bool = False
self.setFocusPolicy(QtCore.Qt.StrongFocus)
- self._ic = None
- self._yranger: Callable | None = None
+ self._in_interact: trio.Event | None = None
+ self._interact_stack: ExitStack = ExitStack()
+
+ # TODO: probably just assign this whenever a new `PlotItem` is
+ # allocated since they're 1to1 with views..
+ self._viz: Viz | None = None
+ self._yrange: tuple[float, float] | None = None
def start_ic(
self,
@@ -403,10 +431,15 @@ def start_ic(
to any interested task waiters.
'''
- if self._ic is None:
+ if self._in_interact is None:
+ chart = self.chart
try:
- self.chart.pause_all_feeds()
- self._ic = trio.Event()
+ self._in_interact = trio.Event()
+
+ chart.pause_all_feeds()
+ self._interact_stack.enter_context(
+ chart.reset_graphics_caches()
+ )
except RuntimeError:
pass
@@ -420,11 +453,13 @@ def signal_ic(
to any waiters.
'''
- if self._ic:
+ if self._in_interact:
try:
- self._ic.set()
- self._ic = None
+ self._interact_stack.close()
self.chart.resume_all_feeds()
+
+ self._in_interact.set()
+ self._in_interact = None
except RuntimeError:
pass
@@ -432,7 +467,7 @@ def signal_ic(
async def open_async_input_handler(
self,
- ) -> 'ChartView':
+ ) -> ChartView:
async with (
_event.open_handlers(
@@ -492,7 +527,7 @@ def wheelEvent(
# don't zoom more then the min points setting
viz = chart.get_viz(chart.name)
- vl, lbar, rbar, vr = viz.bars_range()
+ _, vl, lbar, rbar, vr, r = viz.datums_range()
# TODO: max/min zoom limits incorporating time step size.
# rl = vr - vl
@@ -507,7 +542,7 @@ def wheelEvent(
# return
# actual scaling factor
- s = 1.015 ** (ev.delta() * -1 / 20) # self.state['wheelScaleFactor'])
+ s = 1.016 ** (ev.delta() * -1 / 20) # self.state['wheelScaleFactor'])
s = [(None if m is False else s) for m in mask]
if (
@@ -533,12 +568,13 @@ def wheelEvent(
# scale_y = 1.3 ** (center.y() * -1 / 20)
self.scaleBy(s, center)
+ # zoom in view-box area
else:
# use right-most point of current curve graphic
xl = viz.graphics.x_last()
focal = min(
xl,
- vr,
+ r,
)
self._resetTarget()
@@ -552,7 +588,7 @@ def wheelEvent(
# update, but i gotta feelin that because this one is signal
# based (and thus not necessarily sync invoked right away)
# that calling the resize method manually might work better.
- self.sigRangeChangedManually.emit(mask)
+ # self.sigRangeChangedManually.emit(mask)
# XXX: without this is seems as though sometimes
# when zooming in from far out (and maybe vice versa?)
@@ -562,14 +598,15 @@ def wheelEvent(
# that never seems to happen? Only question is how much this
# "double work" is causing latency when these missing event
# fires don't happen?
- self.maybe_downsample_graphics()
+ self.interact_graphics_cycle()
+ self.interact_graphics_cycle()
ev.accept()
def mouseDragEvent(
self,
ev,
- axis: Optional[int] = None,
+ axis: int | None = None,
) -> None:
pos = ev.pos()
@@ -581,7 +618,10 @@ def mouseDragEvent(
button = ev.button()
# Ignore axes if mouse is disabled
- mouseEnabled = np.array(self.state['mouseEnabled'], dtype=np.float)
+ mouseEnabled = np.array(
+ self.state['mouseEnabled'],
+ dtype=np.float,
+ )
mask = mouseEnabled.copy()
if axis is not None:
mask[1-axis] = 0.0
@@ -645,9 +685,6 @@ def mouseDragEvent(
self.start_ic()
except RuntimeError:
pass
- # if self._ic is None:
- # self.chart.pause_all_feeds()
- # self._ic = trio.Event()
if axis == 1:
self.chart._static_yrange = 'axis'
@@ -664,16 +701,19 @@ def mouseDragEvent(
if x is not None or y is not None:
self.translateBy(x=x, y=y)
- self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
+ # self.sigRangeChangedManually.emit(mask)
+ # self.state['mouseEnabled']
+ # )
+ self.interact_graphics_cycle()
if ev.isFinish():
self.signal_ic()
- # self._ic.set()
- # self._ic = None
+ # self._in_interact.set()
+ # self._in_interact = None
# self.chart.resume_all_feeds()
- # XXX: WHY
- ev.accept()
+ # # XXX: WHY
+ # ev.accept()
# WEIRD "RIGHT-CLICK CENTER ZOOM" MODE
elif button & QtCore.Qt.RightButton:
@@ -695,10 +735,12 @@ def mouseDragEvent(
center = Point(tr.map(ev.buttonDownPos(QtCore.Qt.RightButton)))
self._resetTarget()
self.scaleBy(x=x, y=y, center=center)
- self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
- # XXX: WHY
- ev.accept()
+ # self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
+ self.interact_graphics_cycle()
+
+ # XXX: WHY
+ ev.accept()
# def mouseClickEvent(self, event: QtCore.QEvent) -> None:
# '''This routine is rerouted to an async handler.
@@ -719,19 +761,19 @@ def _set_yrange(
self,
*,
- yrange: Optional[tuple[float, float]] = None,
+ yrange: tuple[float, float] | None = None,
viz: Viz | None = None,
# NOTE: this value pairs (more or less) with L1 label text
# height offset from from the bid/ask lines.
- range_margin: float = 0.09,
+ range_margin: float | None = 0.06,
- bars_range: Optional[tuple[int, int, int, int]] = None,
+ bars_range: tuple[int, int, int, int] | None = None,
# flag to prevent triggering sibling charts from the same linked
# set from recursion errors.
autoscale_linked_plots: bool = False,
- name: Optional[str] = None,
+ name: str | None = None,
) -> None:
'''
@@ -743,14 +785,13 @@ def _set_yrange(
'''
name = self.name
- # print(f'YRANGE ON {name}')
+ # print(f'YRANGE ON {name} -> yrange{yrange}')
profiler = Profiler(
msg=f'`ChartView._set_yrange()`: `{name}`',
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
delayed=True,
)
- set_range = True
chart = self._chart
# view has been set in 'axis' mode
@@ -759,8 +800,8 @@ def _set_yrange(
# - disable autoranging
# - remove any y range limits
if chart._static_yrange == 'axis':
- set_range = False
self.setLimits(yMin=None, yMax=None)
+ return
# static y-range has been set likely by
# a specialized FSP configuration.
@@ -773,54 +814,72 @@ def _set_yrange(
elif yrange is not None:
ylow, yhigh = yrange
- if set_range:
-
- # XXX: only compute the mxmn range
- # if none is provided as input!
- if not yrange:
+ # XXX: only compute the mxmn range
+ # if none is provided as input!
+ if not yrange:
- if not viz:
- breakpoint()
+ if not viz:
+ breakpoint()
- out = viz.maxmin()
- if out is None:
- log.warning(f'No yrange provided for {name}!?')
- return
- (
- ixrng,
- _,
- yrange
- ) = out
+ out = viz.maxmin()
+ if out is None:
+ log.warning(f'No yrange provided for {name}!?')
+ return
+ (
+ ixrng,
+ _,
+ yrange
+ ) = out
- profiler(f'`{self.name}:Viz.maxmin()` -> {ixrng}=>{yrange}')
+ profiler(f'`{self.name}:Viz.maxmin()` -> {ixrng}=>{yrange}')
- if yrange is None:
- log.warning(f'No yrange provided for {name}!?')
- return
+ if yrange is None:
+ log.warning(f'No yrange provided for {name}!?')
+ return
ylow, yhigh = yrange
- # view margins: stay within a % of the "true range"
+ # always stash last range for diffing by
+ # incremental update calculations BEFORE adding
+ # margin.
+ self._yrange = ylow, yhigh
+
+ # view margins: stay within a % of the "true range"
+ if range_margin is not None:
diff = yhigh - ylow
- ylow = ylow - (diff * range_margin)
- yhigh = yhigh + (diff * range_margin)
-
- # XXX: this often needs to be unset
- # to get different view modes to operate
- # correctly!
- self.setLimits(
- yMin=ylow,
- yMax=yhigh,
+ ylow = max(
+ ylow - (diff * range_margin),
+ 0,
+ )
+ yhigh = min(
+ yhigh + (diff * range_margin),
+ yhigh * (1 + range_margin),
)
- self.setYRange(ylow, yhigh)
- profiler(f'set limits: {(ylow, yhigh)}')
+ # print(
+ # f'set limits {self.name}:\n'
+ # f'ylow: {ylow}\n'
+ # f'yhigh: {yhigh}\n'
+ # )
+ self.setYRange(
+ ylow,
+ yhigh,
+ padding=0,
+ )
+ self.setLimits(
+ yMin=ylow,
+ yMax=yhigh,
+ )
+ self.update()
+
+ # LOL: yet anothercucking pg buggg..
+ # can't use `msg=f'setYRange({ylow}, {yhigh}')`
profiler.finish()
def enable_auto_yrange(
self,
viz: Viz,
- src_vb: Optional[ChartView] = None,
+ src_vb: ChartView | None = None,
) -> None:
'''
@@ -831,18 +890,6 @@ def enable_auto_yrange(
if src_vb is None:
src_vb = self
- if self._yranger is None:
- self._yranger = partial(
- self._set_yrange,
- viz=viz,
- )
-
- # widget-UIs/splitter(s) resizing
- src_vb.sigResized.connect(self._yranger)
-
- # mouse wheel doesn't emit XRangeChanged
- src_vb.sigRangeChangedManually.connect(self._yranger)
-
# re-sampling trigger:
# TODO: a smarter way to avoid calling this needlessly?
# 2 things i can think of:
@@ -850,23 +897,20 @@ def enable_auto_yrange(
# iterate those.
# - only register this when certain downsample-able graphics are
# "added to scene".
- src_vb.sigRangeChangedManually.connect(
- self.maybe_downsample_graphics
+ # src_vb.sigRangeChangedManually.connect(
+ # self.interact_graphics_cycle
+ # )
+
+ # widget-UIs/splitter(s) resizing
+ src_vb.sigResized.connect(
+ self.interact_graphics_cycle
)
def disable_auto_yrange(self) -> None:
# XXX: not entirely sure why we can't de-reg this..
self.sigResized.disconnect(
- self._yranger,
- )
-
- self.sigRangeChangedManually.disconnect(
- self._yranger,
- )
-
- self.sigRangeChangedManually.disconnect(
- self.maybe_downsample_graphics
+ self.interact_graphics_cycle
)
def x_uppx(self) -> float:
@@ -887,57 +931,54 @@ def x_uppx(self) -> float:
else:
return 0
- def maybe_downsample_graphics(
+ def interact_graphics_cycle(
self,
- autoscale_overlays: bool = False,
+ *args, # capture Qt signal (slot) inputs
+
+ # debug_print: bool = False,
+ do_linked_charts: bool = True,
+ do_overlay_scaling: bool = True,
+
+ yrange_kwargs: dict[
+ str,
+ tuple[float, float],
+ ] | None = None,
+
):
profiler = Profiler(
- msg=f'ChartView.maybe_downsample_graphics() for {self.name}',
+ msg=f'ChartView.interact_graphics_cycle() for {self.name}',
disabled=not pg_profile_enabled(),
+ ms_threshold=ms_slower_then,
# XXX: important to avoid not seeing underlying
- # ``.update_graphics_from_flow()`` nested profiling likely
+ # ``Viz.update_graphics()`` nested profiling likely
# due to the way delaying works and garbage collection of
# the profiler in the delegated method calls.
- ms_threshold=6,
- # ms_threshold=ms_slower_then,
- )
+ delayed=True,
- # TODO: a faster single-loop-iterator way of doing this XD
- chart = self._chart
- plots = {chart.name: chart}
+ # for hardcore latency checking, comment these flags above.
+ # disabled=False,
+ # ms_threshold=4,
+ )
linked = self.linked
- if linked:
+ if (
+ do_linked_charts
+ and linked
+ ):
+ plots = {linked.chart.name: linked.chart}
plots |= linked.subplots
- for chart_name, chart in plots.items():
- for name, flow in chart._vizs.items():
-
- if (
- not flow.render
-
- # XXX: super important to be aware of this.
- # or not flow.graphics.isVisible()
- ):
- # print(f'skipping {flow.name}')
- continue
-
- # pass in no array which will read and render from the last
- # passed array (normally provided by the display loop.)
- chart.update_graphics_from_flow(name)
-
- # for each overlay on this chart auto-scale the
- # y-range to max-min values.
- # if autoscale_overlays:
- # overlay = chart.pi_overlay
- # if overlay:
- # for pi in overlay.overlays:
- # pi.vb._set_yrange(
- # # TODO: get the range once up front...
- # # bars_range=br,
- # viz=pi.viz,
- # )
- # profiler('autoscaled linked plots')
-
- profiler(f'<{chart_name}>.update_graphics_from_flow({name})')
+ else:
+ chart = self._chart
+ plots = {chart.name: chart}
+
+ # TODO: a faster single-loop-iterator way of doing this?
+ return overlay_viewlists(
+ self._viz,
+ plots,
+ profiler,
+ do_overlay_scaling=do_overlay_scaling,
+ do_linked_charts=do_linked_charts,
+ yrange_kwargs=yrange_kwargs,
+ )
diff --git a/piker/ui/_label.py b/piker/ui/_label.py
index 247b4cc09..85fbbb8a7 100644
--- a/piker/ui/_label.py
+++ b/piker/ui/_label.py
@@ -19,7 +19,10 @@
"""
from inspect import isfunction
-from typing import Callable, Optional, Any
+from typing import (
+ Callable,
+ Any,
+)
import pyqtgraph as pg
from PyQt5 import QtGui, QtWidgets
@@ -70,9 +73,7 @@ def __init__(
self._fmt_str = fmt_str
self._view_xy = QPointF(0, 0)
- self.scene_anchor: Optional[
- Callable[..., QPointF]
- ] = None
+ self.scene_anchor: Callable[..., QPointF] | None = None
self._x_offset = x_offset
@@ -164,7 +165,7 @@ def set_view_pos(
self,
y: float,
- x: Optional[float] = None,
+ x: float | None = None,
) -> None:
diff --git a/piker/ui/_lines.py b/piker/ui/_lines.py
index 461544e73..4469a6735 100644
--- a/piker/ui/_lines.py
+++ b/piker/ui/_lines.py
@@ -22,7 +22,6 @@
from functools import partial
from math import floor
from typing import (
- Optional,
Callable,
TYPE_CHECKING,
)
@@ -32,7 +31,7 @@
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QPointF
-from ._annotate import qgo_draw_markers, LevelMarker
+from ._annotate import LevelMarker
from ._anchors import (
vbr_left,
right_axis,
@@ -295,7 +294,7 @@ def mouseDragEvent(self, ev):
# show y-crosshair again
cursor.show_xhair()
- def get_cursor(self) -> Optional[Cursor]:
+ def get_cursor(self) -> Cursor | None:
chart = self._chart
cur = chart.linked.cursor
@@ -610,11 +609,11 @@ def order_line(
chart,
level: float,
- action: Optional[str] = 'buy', # buy or sell
+ action: str | None = 'buy', # buy or sell
- marker_style: Optional[str] = None,
- level_digits: Optional[float] = 3,
- size: Optional[int] = 1,
+ marker_style: str | None = None,
+ level_digits: float | None = 3,
+ size: int | None = 1,
size_digits: int = 1,
show_markers: bool = False,
submit_price: float = None,
diff --git a/piker/ui/_notify.py b/piker/ui/_notify.py
index c14b3cbbe..4a33dabb1 100644
--- a/piker/ui/_notify.py
+++ b/piker/ui/_notify.py
@@ -21,7 +21,6 @@
import os
import platform
import subprocess
-from typing import Optional
import trio
@@ -33,7 +32,7 @@
log = get_logger(__name__)
-_dbus_uid: Optional[str] = ''
+_dbus_uid: str | None = ''
async def notify_from_ems_status_msg(
diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py
index 104b860cf..33d7bbdaa 100644
--- a/piker/ui/_ohlc.py
+++ b/piker/ui/_ohlc.py
@@ -28,7 +28,6 @@
QLineF,
QRectF,
)
-from PyQt5.QtWidgets import QGraphicsItem
from PyQt5.QtGui import QPainterPath
from ._curve import FlowGraphic
@@ -91,10 +90,6 @@ class BarItems(FlowGraphic):
"Price range" bars graphics rendered from a OHLC sampled sequence.
'''
- # XXX: causes this weird jitter bug when click-drag panning
- # where the path curve will awkwardly flicker back and forth?
- cache_mode: int = QGraphicsItem.NoCache
-
def __init__(
self,
*args,
@@ -113,9 +108,10 @@ def x_last(self) -> None | float:
'''
if self._last_bar_lines:
close_arm_line = self._last_bar_lines[-1]
- return close_arm_line.x2() if close_arm_line else None
- else:
- return None
+ if close_arm_line:
+ return close_arm_line.x2()
+
+ return None
# Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect
def boundingRect(self):
diff --git a/piker/ui/_orm.py b/piker/ui/_orm.py
index 8dea0b6d5..eaca69e2f 100644
--- a/piker/ui/_orm.py
+++ b/piker/ui/_orm.py
@@ -20,8 +20,9 @@
"""
from __future__ import annotations
from typing import (
- Optional, Generic,
- TypeVar, Callable,
+ Generic,
+ TypeVar,
+ Callable,
)
# from pydantic import BaseModel, validator
@@ -42,13 +43,11 @@
class Field(GenericModel, Generic[DataType]):
- widget_factory: Optional[
- Callable[
- [QWidget, 'Field'],
- QWidget
- ]
- ]
- value: Optional[DataType] = None
+ widget_factory: Callable[
+ [QWidget, 'Field'],
+ QWidget
+ ] | None = None
+ value: DataType | None = None
class Selection(Field[DataType], Generic[DataType]):
diff --git a/piker/ui/_overlay.py b/piker/ui/_overlay.py
index 7a5f047d1..6b2d1bd50 100644
--- a/piker/ui/_overlay.py
+++ b/piker/ui/_overlay.py
@@ -22,7 +22,6 @@
from functools import partial
from typing import (
Callable,
- Optional,
)
from pyqtgraph.graphicsItems.AxisItem import AxisItem
@@ -116,6 +115,7 @@ def __init__(
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
+ layout.setMinimumWidth(0)
if name in ('top', 'bottom'):
orient = Qt.Vertical
@@ -125,7 +125,11 @@ def __init__(
layout.setOrientation(orient)
- self.insert_plotitem(0, pi)
+ self.insert_plotitem(
+ 0,
+ pi,
+ remove_axes=False,
+ )
# insert surrounding linear layouts into the parent pi's layout
# such that additional axes can be appended arbitrarily without
@@ -140,7 +144,9 @@ def __init__(
assert linlayout.itemAt(0) is axis
# XXX: see comment in ``.insert_plotitem()``...
+ # our `PlotItem.removeAxis()` does this internally.
# pi.layout.removeItem(axis)
+
pi.layout.addItem(linlayout, *index)
layout = pi.layout.itemAt(*index)
assert layout is linlayout
@@ -165,6 +171,8 @@ def insert_plotitem(
index: int,
plotitem: PlotItem,
+ remove_axes: bool = False,
+
) -> tuple[int, list[AxisItem]]:
'''
Place item at index by inserting all axes into the grid
@@ -193,25 +201,19 @@ def insert_plotitem(
axis_view = axis.linkedView()
assert axis_view is plotitem.vb
- if (
- not axis.isVisible()
-
- # XXX: we never skip moving the axes for the *root*
- # plotitem inserted (even if not shown) since we need to
- # move all the hidden axes into linear sub-layouts for
- # that "central" plot in the overlay. Also if we don't
- # do it there's weird geomoetry calc offsets that make
- # view coords slightly off somehow .. smh
- and not len(self.pitems) == 0
- ):
- continue
-
- # XXX: Remove old axis?
- # No, turns out we don't need this?
- # DON'T UNLINK IT since we need the original ``ViewBox`` to
- # still drive it with events/handlers B)
- # popped = plotitem.removeAxis(name, unlink=False)
- # assert axis is popped
+ # if (
+ # not axis.isVisible()
+
+ # # XXX: we never skip moving the axes for the *root*
+ # # plotitem inserted (even if not shown) since we need to
+ # # move all the hidden axes into linear sub-layouts for
+ # # that "central" plot in the overlay. Also if we don't
+ # # do it there's weird geomoetry calc offsets that make
+ # # view coords slightly off somehow .. smh
+ # and not len(self.pitems) == 0
+ # ):
+ # print(f'SKIPPING MOVE: {plotitem.name}:{name} -> {axis}')
+ # continue
# invert insert index for layouts which are
# not-left-to-right, top-to-bottom insert oriented
@@ -225,6 +227,16 @@ def insert_plotitem(
self._register_item(index, plotitem)
+ if remove_axes:
+ for name, axis_info in plotitem.axes.copy().items():
+ axis = axis_info['item']
+ # XXX: Remove old axis?
+ # No, turns out we don't need this?
+ # DON'T UNLINK IT since we need the original ``ViewBox`` to
+ # still drive it with events/handlers B)
+ popped = plotitem.removeAxis(name, unlink=False)
+ assert axis is popped
+
return (index, inserted_axes)
def append_plotitem(
@@ -246,7 +258,7 @@ def get_axis(
plot: PlotItem,
name: str,
- ) -> Optional[AxisItem]:
+ ) -> AxisItem | None:
'''
Retrieve the named axis for overlayed ``plot`` or ``None``
if axis for that name is not shown.
@@ -321,7 +333,7 @@ def overlays(self) -> list[PlotItem]:
def add_plotitem(
self,
plotitem: PlotItem,
- index: Optional[int] = None,
+ index: int | None = None,
# event/signal names which will be broadcasted to all added
# (relayee) ``PlotItem``s (eg. ``ViewBox.mouseDragEvent``).
@@ -376,7 +388,7 @@ def broadcast(
# TODO: drop this viewbox specific input and
# allow a predicate to be passed in by user.
- axis: 'Optional[int]' = None,
+ axis: int | None = None,
*,
@@ -487,10 +499,10 @@ def broadcast(
else:
insert_index, axes = self.layout.insert_plotitem(index, plotitem)
- plotitem.setGeometry(root.vb.sceneBoundingRect())
+ plotitem.vb.setGeometry(root.vb.sceneBoundingRect())
def size_to_viewbox(vb: 'ViewBox'):
- plotitem.setGeometry(vb.sceneBoundingRect())
+ plotitem.vb.setGeometry(root.vb.sceneBoundingRect())
root.vb.sigResized.connect(size_to_viewbox)
diff --git a/piker/ui/_pg_overrides.py b/piker/ui/_pg_overrides.py
index b7c0b9aa9..bd35064be 100644
--- a/piker/ui/_pg_overrides.py
+++ b/piker/ui/_pg_overrides.py
@@ -22,8 +22,6 @@
view transforms.
"""
-from typing import Optional
-
import pyqtgraph as pg
from ._axes import Axis
@@ -47,9 +45,10 @@ def invertQTransform(tr):
def _do_overrides() -> None:
- """Dooo eeet.
+ '''
+ Dooo eeet.
- """
+ '''
# we don't care about potential fp issues inside Qt
pg.functions.invertQTransform = invertQTransform
pg.PlotItem = PlotItem
@@ -91,7 +90,7 @@ def __init__(
title=None,
viewBox=None,
axisItems=None,
- default_axes=['left', 'bottom'],
+ default_axes=['right', 'bottom'],
enableMenu=True,
**kargs
):
@@ -119,7 +118,7 @@ def removeAxis(
name: str,
unlink: bool = True,
- ) -> Optional[pg.AxisItem]:
+ ) -> pg.AxisItem | None:
"""
Remove an axis from the contained axis items
by ```name: str```.
@@ -130,7 +129,7 @@ def removeAxis(
If the ``unlink: bool`` is set to ``False`` then the axis will
stay linked to its view and will only be removed from the
- layoutonly be removed from the layout.
+ layout.
If no axis with ``name: str`` is found then this is a noop.
@@ -144,7 +143,10 @@ def removeAxis(
axis = entry['item']
self.layout.removeItem(axis)
- axis.scene().removeItem(axis)
+ scn = axis.scene()
+ if scn:
+ scn.removeItem(axis)
+
if unlink:
axis.unlinkFromView()
@@ -166,14 +168,14 @@ def removeAxis(
def setAxisItems(
self,
# XXX: yeah yeah, i know we can't use type annots like this yet.
- axisItems: Optional[dict[str, pg.AxisItem]] = None,
+ axisItems: dict[str, pg.AxisItem] | None = None,
add_to_layout: bool = True,
default_axes: list[str] = ['left', 'bottom'],
):
- """
- Override axis item setting to only
+ '''
+ Override axis item setting to only what is passed in.
- """
+ '''
axisItems = axisItems or {}
# XXX: wth is is this even saying?!?
diff --git a/piker/ui/_position.py b/piker/ui/_position.py
index 9baca8ee6..41421fb67 100644
--- a/piker/ui/_position.py
+++ b/piker/ui/_position.py
@@ -25,7 +25,6 @@
from math import floor, copysign
from typing import (
Callable,
- Optional,
TYPE_CHECKING,
)
@@ -170,12 +169,12 @@ class SettingsPane:
limit_label: QLabel
# encompasing high level namespace
- order_mode: Optional['OrderMode'] = None # typing: ignore # noqa
+ order_mode: OrderMode | None = None # typing: ignore # noqa
def set_accounts(
self,
names: list[str],
- sizes: Optional[list[float]] = None,
+ sizes: list[float] | None = None,
) -> None:
combo = self.form.fields['account']
@@ -540,8 +539,8 @@ class Nav(Struct):
charts: dict[int, ChartPlotWidget]
pp_labels: dict[str, Label] = {}
size_labels: dict[str, Label] = {}
- lines: dict[str, Optional[LevelLine]] = {}
- level_markers: dict[str, Optional[LevelMarker]] = {}
+ lines: dict[str, LevelLine | None] = {}
+ level_markers: dict[str, LevelMarker | None] = {}
color: str = 'default_lightest'
def update_ui(
@@ -550,7 +549,7 @@ def update_ui(
price: float,
size: float,
slots_used: float,
- size_digits: Optional[int] = None,
+ size_digits: int | None = None,
) -> None:
'''
@@ -847,7 +846,7 @@ def pane(self) -> FieldsForm:
def update_from_pp(
self,
- position: Optional[Position] = None,
+ position: Position | None = None,
set_as_startup: bool = False,
) -> None:
diff --git a/piker/ui/_render.py b/piker/ui/_render.py
index dc162834c..fb41b696b 100644
--- a/piker/ui/_render.py
+++ b/piker/ui/_render.py
@@ -51,7 +51,20 @@
class Renderer(msgspec.Struct):
-
+ '''
+ Low(er) level interface for converting a source, real-time updated,
+ data buffer (usually held in a ``ShmArray``) to a graphics data
+ format usable by `Qt`.
+
+ A renderer reads in context-specific source data using a ``Viz``,
+ formats that data to a 2D-xy pre-graphics format using
+ a ``IncrementalFormatter``, then renders that data to a set of
+ output graphics objects normally a ``.ui._curve.FlowGraphics``
+ sub-type to which the ``Renderer.path`` is applied and further "last
+ datum" graphics are updated from the source buffer's latest
+ sample(s).
+
+ '''
viz: Viz
fmtr: IncrementalFormatter
@@ -179,6 +192,10 @@ def render(
) = fmt_out
+ if not x_1d.size:
+ log.warning(f'{array_key} has no `.size`?')
+ return
+
# redraw conditions
if (
prepend_length > 0
@@ -195,7 +212,7 @@ def render(
fast_path: QPainterPath = self.fast_path
reset: bool = False
- self.viz.yrange = None
+ self.viz.ds_yrange = None
# redraw the entire source data if we have either of:
# - no prior path graphic rendered or,
@@ -218,7 +235,7 @@ def render(
)
if ds_out is not None:
x_1d, y_1d, ymn, ymx = ds_out
- self.viz.yrange = ymn, ymx
+ self.viz.ds_yrange = ymn, ymx
# print(f'{self.viz.name} post ds: ymn, ymx: {ymn},{ymx}')
reset = True
diff --git a/piker/ui/_search.py b/piker/ui/_search.py
index ef0cca80d..9627e83d1 100644
--- a/piker/ui/_search.py
+++ b/piker/ui/_search.py
@@ -35,7 +35,6 @@
from contextlib import asynccontextmanager
from functools import partial
from typing import (
- Optional,
Callable,
Awaitable,
Sequence,
@@ -178,8 +177,8 @@ def set_font_size(self, size: int = 18):
def resize_to_results(
self,
- w: Optional[float] = 0,
- h: Optional[float] = None,
+ w: float | None = 0,
+ h: float | None = None,
) -> None:
model = self.model()
@@ -380,7 +379,7 @@ def find_section(
self,
section: str,
- ) -> Optional[QModelIndex]:
+ ) -> QModelIndex | None:
'''
Find the *first* depth = 1 section matching ``section`` in
the tree and return its index.
@@ -504,7 +503,7 @@ def set_section_entries(
def show_matches(
self,
- wh: Optional[tuple[float, float]] = None,
+ wh: tuple[float, float] | None = None,
) -> None:
@@ -529,7 +528,7 @@ def __init__(
self,
parent: QWidget,
godwidget: QWidget,
- view: Optional[CompleterView] = None,
+ view: CompleterView | None = None,
**kwargs,
) -> None:
@@ -708,7 +707,7 @@ async def chart_current_item(
self,
clear_to_cache: bool = True,
- ) -> Optional[str]:
+ ) -> str | None:
'''
Attempt to load and switch the current selected
completion result to the affiliated chart app.
@@ -1167,7 +1166,7 @@ async def register_symbol_search(
provider_name: str,
search_routine: Callable,
- pause_period: Optional[float] = None,
+ pause_period: float | None = None,
) -> AsyncIterator[dict]:
diff --git a/piker/ui/_style.py b/piker/ui/_style.py
index 52ac753a6..67f14a93d 100644
--- a/piker/ui/_style.py
+++ b/piker/ui/_style.py
@@ -18,7 +18,7 @@
Qt UI styling.
'''
-from typing import Optional, Dict
+from typing import Dict
import math
import pyqtgraph as pg
@@ -52,7 +52,7 @@ def __init__(
# TODO: move to config
name: str = 'Hack',
font_size: str = 'default',
- # size_in_inches: Optional[float] = None,
+
) -> None:
self.name = name
self._qfont = QtGui.QFont(name)
@@ -91,13 +91,14 @@ def scale(self) -> float:
def px_size(self) -> int:
return self._qfont.pixelSize()
- def configure_to_dpi(self, screen: Optional[QtGui.QScreen] = None):
- """Set an appropriately sized font size depending on the screen DPI.
+ def configure_to_dpi(self, screen: QtGui.QScreen | None = None):
+ '''
+ Set an appropriately sized font size depending on the screen DPI.
If we end up needing to generalize this more here there are resources
listed in the script in ``snippets/qt_screen_info.py``.
- """
+ '''
if screen is None:
screen = self.screen
diff --git a/piker/ui/_window.py b/piker/ui/_window.py
index a2c432616..0fc87c24e 100644
--- a/piker/ui/_window.py
+++ b/piker/ui/_window.py
@@ -23,7 +23,6 @@
import time
from typing import (
Callable,
- Optional,
Union,
)
import uuid
@@ -64,9 +63,9 @@ def open_status(
self,
msg: str,
- final_msg: Optional[str] = None,
+ final_msg: str | None = None,
clear_on_next: bool = False,
- group_key: Optional[Union[bool, str]] = False,
+ group_key: Union[bool, str] | None = False,
) -> Union[Callable[..., None], str]:
'''
@@ -178,11 +177,11 @@ def __init__(self, parent=None):
self.setWindowTitle(self.title)
# set by runtime after `trio` is engaged.
- self.godwidget: Optional[GodWidget] = None
+ self.godwidget: GodWidget | None = None
self._status_bar: QStatusBar = None
self._status_label: QLabel = None
- self._size: Optional[tuple[int, int]] = None
+ self._size: tuple[int, int] | None = None
@property
def mode_label(self) -> QLabel:
@@ -289,7 +288,7 @@ def current_screen(self) -> QScreen:
def configure_to_desktop(
self,
- size: Optional[tuple[int, int]] = None,
+ size: tuple[int, int] | None = None,
) -> None:
'''
diff --git a/piker/ui/cli.py b/piker/ui/cli.py
index a72c2f5c9..15b3e9f61 100644
--- a/piker/ui/cli.py
+++ b/piker/ui/cli.py
@@ -24,7 +24,7 @@
from ..cli import cli
from .. import watchlists as wl
-from .._daemon import maybe_spawn_brokerd
+from ..service import maybe_spawn_brokerd
_config_dir = click.get_app_dir('piker')
@@ -181,9 +181,6 @@ def chart(
'debug_mode': pdb,
'loglevel': tractorloglevel,
'name': 'chart',
- 'enable_modules': [
- 'piker.clearing._client'
- ],
'registry_addr': config.get('registry_addr'),
},
)
diff --git a/piker/ui/order_mode.py b/piker/ui/order_mode.py
index 4a194a79f..cf5f53b1b 100644
--- a/piker/ui/order_mode.py
+++ b/piker/ui/order_mode.py
@@ -25,7 +25,6 @@
from pprint import pformat
import time
from typing import (
- Optional,
Callable,
Any,
TYPE_CHECKING,
@@ -129,7 +128,7 @@ class OrderMode:
trackers: dict[str, PositionTracker]
# switched state, the current position
- current_pp: Optional[PositionTracker] = None
+ current_pp: PositionTracker | None = None
active: bool = False
name: str = 'order'
dialogs: dict[str, Dialog] = field(default_factory=dict)
@@ -139,7 +138,7 @@ class OrderMode:
'buy': 'buy_green',
'sell': 'sell_red',
}
- _staged_order: Optional[Order] = None
+ _staged_order: Order | None = None
def on_level_change_update_next_order_info(
self,
@@ -180,7 +179,7 @@ def on_level_change_update_next_order_info(
def new_line_from_order(
self,
order: Order,
- chart: Optional[ChartPlotWidget] = None,
+ chart: ChartPlotWidget | None = None,
**line_kwargs,
) -> LevelLine:
@@ -340,7 +339,7 @@ def stage_order(
def submit_order(
self,
send_msg: bool = True,
- order: Optional[Order] = None,
+ order: Order | None = None,
) -> Dialog:
'''
@@ -452,7 +451,7 @@ def order_line_modify_complete(
def on_submit(
self,
uuid: str,
- order: Optional[Order] = None,
+ order: Order | None = None,
) -> Dialog:
'''
@@ -496,7 +495,7 @@ def on_fill(
price: float,
time_s: float,
- pointing: Optional[str] = None,
+ pointing: str | None = None,
) -> None:
'''
diff --git a/piker/ui/view_mode.py b/piker/ui/view_mode.py
new file mode 100644
index 000000000..ecb62557a
--- /dev/null
+++ b/piker/ui/view_mode.py
@@ -0,0 +1,899 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+Overlay (aka multi-chart) UX machinery.
+
+'''
+from __future__ import annotations
+from typing import (
+ Any,
+ Literal,
+ TYPE_CHECKING,
+)
+
+import numpy as np
+import pendulum
+import pyqtgraph as pg
+
+from ..data.types import Struct
+from ..data._pathops import slice_from_time
+from ..log import get_logger
+from .._profile import Profiler
+
+if TYPE_CHECKING:
+ from ._chart import ChartPlotWidget
+ from ._dataviz import Viz
+ from ._interaction import ChartView
+
+
+log = get_logger(__name__)
+
+
+class OverlayT(Struct):
+ '''
+ An overlay co-domain range transformer.
+
+ Used to translate and apply a range from one y-range
+ to another based on a returns logarithm:
+
+ R(ymn, ymx, yref) = (ymx - yref)/yref
+
+ which gives the log-scale multiplier, and
+
+ ymx_t = yref * (1 + R)
+
+ which gives the inverse to translate to the same value
+ in the target co-domain.
+
+ '''
+ viz: Viz | None = None
+ start_t: float | None = None
+
+ # % "range" computed from some ref value to the mn/mx
+ rng: float | None = None
+ in_view: np.ndarray | None = None
+
+ # pinned-minor curve modified mn and max for the major dispersion
+ # curve due to one series being shorter and the pin + scaling from
+ # that pin point causing the original range to have to increase.
+ y_val: float | None = None
+
+ def apply_r(
+ self,
+ y_ref: float, # reference value for dispersion metric
+
+ ) -> float:
+ return y_ref * (1 + self.rng)
+
+
+def intersect_from_longer(
+ start_t_first: float,
+ in_view_first: np.ndarray,
+
+ start_t_second: float,
+ in_view_second: np.ndarray,
+ step: float,
+
+) -> np.ndarray:
+
+ tdiff = start_t_first - start_t_second
+
+ if tdiff == 0:
+ return False
+
+ i: int = 0
+
+ # first time series has an "earlier" first time stamp then the 2nd.
+ # aka 1st is "shorter" then the 2nd.
+ if tdiff > 0:
+ longer = in_view_second
+ find_t = start_t_first
+ i = 1
+
+ # second time series has an "earlier" first time stamp then the 1st.
+ # aka 2nd is "shorter" then the 1st.
+ elif tdiff < 0:
+ longer = in_view_first
+ find_t = start_t_second
+ i = 0
+
+ slc = slice_from_time(
+ arr=longer,
+ start_t=find_t,
+ stop_t=find_t,
+ step=step,
+ )
+ return (
+ longer[slc.start],
+ find_t,
+ i,
+ )
+
+
+def _maybe_calc_yrange(
+ viz: Viz,
+ yrange_kwargs: dict[Viz, dict[str, Any]],
+ profiler: Profiler,
+ chart_name: str,
+
+) -> tuple[
+ slice,
+ dict,
+] | None:
+
+ if not viz.render:
+ return
+
+ # pass in no array which will read and render from the last
+ # passed array (normally provided by the display loop.)
+ in_view, i_read_range, _ = viz.update_graphics()
+
+ if not in_view:
+ return
+
+ profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`')
+
+ # check if explicit yrange (kwargs) was passed in by the caller
+ yrange_kwargs = yrange_kwargs.get(viz) if yrange_kwargs else None
+ if yrange_kwargs is not None:
+ read_slc = slice(*i_read_range)
+
+ else:
+ out = viz.maxmin(i_read_range=i_read_range)
+ if out is None:
+ log.warning(f'No yrange provided for {viz.name}!?')
+ return
+ (
+ _, # ixrng,
+ read_slc,
+ yrange
+ ) = out
+ profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`')
+ yrange_kwargs = {'yrange': yrange}
+
+ return (
+ read_slc,
+ yrange_kwargs,
+ )
+
+
+def overlay_viewlists(
+ active_viz: Viz,
+ plots: dict[str, ChartPlotWidget],
+ profiler: Profiler,
+
+ # public config ctls
+ do_linked_charts: bool = True,
+ do_overlay_scaling: bool = True,
+ yrange_kwargs: dict[
+ str,
+ tuple[float, float],
+ ] | None = None,
+
+ method: Literal[
+ 'loglin_ref_to_curve',
+ 'loglin_ref_to_first',
+ 'mxmn',
+ 'solo',
+
+ ] = 'loglin_ref_to_curve',
+
+ # internal debug
+ debug_print: bool = False,
+
+) -> None:
+ '''
+ Calculate and apply y-domain (axis y-range) multi-curve overlay adjustments
+ a set of ``plots`` based on the requested ``method``.
+
+ '''
+ chart_name: str
+ chart: ChartPlotWidget
+ for chart_name, chart in plots.items():
+
+ overlay_viz_items = chart._vizs.items()
+
+ # Common `PlotItem` maxmin table; presumes that some path
+ # graphics (and thus their backing data sets) are in the
+ # same co-domain and view box (since the were added
+ # a separate graphics objects to a common plot) and thus can
+ # be sorted as one set per plot.
+ mxmns_by_common_pi: dict[
+ pg.PlotItem,
+ tuple[float, float],
+ ] = {}
+
+ # proportional group auto-scaling per overlay set.
+ # -> loop through overlays on each multi-chart widget
+ # and scale all y-ranges based on autoscale config.
+ # -> for any "group" overlay we want to dispersion normalize
+ # and scale minor charts onto the major chart: the chart
+ # with the most dispersion in the set.
+
+ # ONLY auto-yrange the viz mapped to THIS view box
+ if (
+ not do_overlay_scaling
+ or len(overlay_viz_items) < 2
+ ):
+ viz = active_viz
+ out = _maybe_calc_yrange(
+ viz,
+ yrange_kwargs,
+ profiler,
+ chart_name,
+ )
+
+ if out is None:
+ continue
+
+ read_slc, yrange_kwargs = out
+ viz.plot.vb._set_yrange(**yrange_kwargs)
+ profiler(f'{viz.name}@{chart_name} single curve yrange')
+
+ if debug_print:
+ print(f'ONLY ranging THIS viz: {viz.name}')
+
+ # don't iterate overlays, just move to next chart
+ continue
+
+ if debug_print:
+ divstr = '#'*46
+ print(
+ f'BEGIN UX GRAPHICS CYCLE: @{chart_name}\n'
+ +
+ divstr
+ +
+ '\n'
+ )
+
+ # create a group overlay log-linearized y-range transform to
+ # track and eventually inverse transform all overlay curves
+ # to a common target max dispersion range.
+ dnt = OverlayT()
+ upt = OverlayT()
+
+ # collect certain flows have grapics objects **in seperate
+ # plots/viewboxes** into groups and do a common calc to
+ # determine auto-ranging input for `._set_yrange()`.
+ # this is primarly used for our so called "log-linearized
+ # multi-plot" overlay technique.
+ overlay_table: dict[
+ float,
+ tuple[
+ ChartView,
+ Viz,
+ float, # y start
+ float, # y min
+ float, # y max
+ float, # y median
+ slice, # in-view array slice
+ np.ndarray, # in-view array
+ float, # returns up scalar
+ float, # return down scalar
+ ],
+ ] = {}
+
+ # multi-curve overlay processing stage
+ for name, viz in overlay_viz_items:
+
+ out = _maybe_calc_yrange(
+ viz,
+ yrange_kwargs,
+ profiler,
+ chart_name,
+ )
+ if out is None:
+ continue
+
+ read_slc, yrange_kwargs = out
+ yrange = yrange_kwargs['yrange']
+ pi = viz.plot
+
+ # handle multiple graphics-objs per viewbox cases
+ mxmn = mxmns_by_common_pi.get(pi)
+ if mxmn:
+ yrange = mxmns_by_common_pi[pi] = (
+ min(yrange[0], mxmn[0]),
+ max(yrange[1], mxmn[1]),
+ )
+
+ else:
+ mxmns_by_common_pi[pi] = yrange
+
+ profiler(f'{viz.name}@{chart_name} common pi sort')
+
+ # non-overlay group case
+ if (
+ not viz.is_ohlc
+ or method == 'solo'
+ ):
+ pi.vb._set_yrange(yrange=yrange)
+ profiler(
+ f'{viz.name}@{chart_name} simple std `._set_yrange()`'
+ )
+ continue
+
+ # handle overlay log-linearized group scaling cases
+ # TODO: a better predicate here, likely something
+ # to do with overlays and their settings..
+ # TODO: we probably eventually might want some other
+ # charts besides OHLC?
+ else:
+ ymn, ymx = yrange
+
+ # determine start datum in view
+ in_view = viz.vs.in_view
+ if in_view.size < 2:
+ if debug_print:
+ print(f'{viz.name} not in view?')
+ continue
+
+ row_start = in_view[0]
+ if viz.is_ohlc:
+ y_ref = row_start['open']
+ else:
+ y_ref = row_start[viz.name]
+
+ profiler(f'{viz.name}@{chart_name} MINOR curve median')
+
+ key = 'open' if viz.is_ohlc else viz.name
+ start_t = row_start['time']
+
+ # returns scalars
+ r_up = (ymx - y_ref) / y_ref
+ r_down = (ymn - y_ref) / y_ref
+ disp = r_up - r_down
+
+ msg = (
+ f'Viz[{viz.name}][{key}]: @{chart_name}\n'
+ f' .yrange = {viz.vs.yrange}\n'
+ f' .xrange = {viz.vs.xrange}\n\n'
+ f'start_t: {start_t}\n'
+ f'y_ref: {y_ref}\n'
+ f'ymn: {ymn}\n'
+ f'ymx: {ymx}\n'
+ f'r_up: {r_up}\n'
+ f'r_down: {r_down}\n'
+ f'(full) disp: {disp}\n'
+ )
+ profiler(msg)
+ if debug_print:
+ print(msg)
+
+ # track the "major" curve as the curve with most
+ # dispersion.
+ if (
+ dnt.rng is None
+ or (
+ r_down < dnt.rng
+ and r_down < 0
+ )
+ ):
+ dnt.viz = viz
+ dnt.rng = r_down
+ dnt.in_view = in_view
+ dnt.start_t = in_view[0]['time']
+ dnt.y_val = ymn
+
+ profiler(f'NEW DOWN: {viz.name}@{chart_name} r: {r_down}')
+ else:
+ # minor in the down swing range so check that if
+ # we apply the current rng to the minor that it
+ # doesn't go outside the current range for the major
+ # otherwise we recompute the minor's range (when
+ # adjusted for it's intersect point to be the new
+ # major's range.
+ intersect = intersect_from_longer(
+ dnt.start_t,
+ dnt.in_view,
+ start_t,
+ in_view,
+ viz.index_step(),
+ )
+ profiler(f'{viz.name}@{chart_name} intersect by t')
+
+ if intersect:
+ longer_in_view, _t, i = intersect
+
+ scaled_mn = dnt.apply_r(y_ref)
+ if scaled_mn > ymn:
+ # after major curve scaling we detected
+ # the minor curve is still out of range
+ # so we need to adjust the major's range
+ # to include the new composed range.
+ y_maj_ref = longer_in_view[key]
+ new_major_ymn = y_maj_ref * (1 + r_down)
+
+ # rewrite the major range to the new
+ # minor-pinned-to-major range and mark
+ # the transform as "virtual".
+ msg = (
+ f'EXPAND DOWN bc {viz.name}@{chart_name}\n'
+ f'y_start epoch time @ {_t}:\n'
+ f'y_maj_ref @ {_t}: {y_maj_ref}\n'
+ f'R: {dnt.rng} -> {r_down}\n'
+ f'MN: {dnt.y_val} -> {new_major_ymn}\n'
+ )
+ dnt.rng = r_down
+ dnt.y_val = new_major_ymn
+ profiler(msg)
+ if debug_print:
+ print(msg)
+
+ # is the current up `OverlayT` not yet defined or
+ # the current `r_up` greater then the previous max.
+ if (
+ upt.rng is None
+ or (
+ r_up > upt.rng
+ and r_up > 0
+ )
+ ):
+ upt.rng = r_up
+ upt.viz = viz
+ upt.in_view = in_view
+ upt.start_t = in_view[0]['time']
+ upt.y_val = ymx
+ profiler(f'NEW UP: {viz.name}@{chart_name} r: {r_up}')
+
+ else:
+ intersect = intersect_from_longer(
+ upt.start_t,
+ upt.in_view,
+ start_t,
+ in_view,
+ viz.index_step(),
+ )
+ profiler(f'{viz.name}@{chart_name} intersect by t')
+
+ if intersect:
+ longer_in_view, _t, i = intersect
+
+ # after major curve scaling we detect if
+ # the minor curve is still out of range
+ # so we need to adjust the major's range
+ # to include the new composed range.
+ scaled_mx = upt.apply_r(y_ref)
+ if scaled_mx < ymx:
+ y_maj_ref = longer_in_view[key]
+ new_major_ymx = y_maj_ref * (1 + r_up)
+
+ # rewrite the major range to the new
+ # minor-pinned-to-major range and mark
+ # the transform as "virtual".
+ msg = (
+ f'EXPAND UP bc {viz.name}@{chart_name}:\n'
+ f'y_maj_ref @ {_t}: {y_maj_ref}\n'
+ f'R: {upt.rng} -> {r_up}\n'
+ f'MX: {upt.y_val} -> {new_major_ymx}\n'
+ )
+ upt.rng = r_up
+ upt.y_val = new_major_ymx
+ profiler(msg)
+ print(msg)
+
+ # register curves by a "full" dispersion metric for
+ # later sort order in the overlay (technique
+ # ) application loop below.
+ overlay_table[disp] = (
+ viz.plot.vb,
+ viz,
+ y_ref,
+ ymn,
+ ymx,
+ read_slc,
+ in_view,
+ r_up,
+ r_down,
+ )
+ profiler(f'{viz.name}@{chart_name} yrange scan complete')
+
+ # __ END OF scan phase (loop) __
+
+ # NOTE: if no there were no overlay charts
+ # detected/collected (could be either no group detected or
+ # chart with a single symbol, thus a single viz/overlay)
+ # then we ONLY set the mone chart's (viz) yrange and short
+ # circuit to the next chart in the linked charts loop. IOW
+ # there's no reason to go through the overlay dispersion
+ # scaling in the next loop below when only one curve is
+ # detected.
+ if (
+ not mxmns_by_common_pi
+ and len(overlay_table) < 2
+ ):
+ if debug_print:
+ print(f'ONLY ranging major: {viz.name}')
+
+ out = _maybe_calc_yrange(
+ viz,
+ yrange_kwargs,
+ profiler,
+ chart_name,
+ )
+ if out is None:
+ continue
+
+ read_slc, yrange_kwargs = out
+ viz.plot.vb._set_yrange(**yrange_kwargs)
+ profiler(f'{viz.name}@{chart_name} single curve yrange')
+
+ # move to next chart in linked set since
+ # no overlay transforming is needed.
+ continue
+
+ elif (
+ mxmns_by_common_pi
+ and not overlay_table
+ ):
+ # move to next chart in linked set since
+ # no overlay transforming is needed.
+ continue
+
+ profiler('`Viz` curve (first) scan phase complete\n')
+
+ r_up_mx: float
+ r_dn_mn: float
+ mx_disp = max(overlay_table)
+
+ if debug_print:
+ # print overlay table in descending dispersion order
+ msg = 'overlays in dispersion order:\n'
+ for i, disp in enumerate(reversed(overlay_table)):
+ entry = overlay_table[disp]
+ msg += f' [{i}] {disp}: {entry[1].name}\n'
+
+ print(
+ 'TRANSFORM PHASE' + '-'*100 + '\n\n'
+ +
+ msg
+ )
+
+ if method == 'loglin_ref_to_curve':
+ mx_entry = overlay_table.pop(mx_disp)
+ else:
+ # TODO: for pin to first-in-view we need to no pop this from the
+ # table, but can we simplify below code even more?
+ mx_entry = overlay_table[mx_disp]
+
+ (
+ mx_view, # viewbox
+ mx_viz, # viz
+ _, # y_ref
+ mx_ymn,
+ mx_ymx,
+ _, # read_slc
+ mx_in_view, # in_view array
+ r_up_mx,
+ r_dn_mn,
+ ) = mx_entry
+ mx_time = mx_in_view['time']
+ mx_xref = mx_time[0]
+
+ # conduct "log-linearized multi-plot" range transform
+ # calculations for curves detected as overlays in the previous
+ # loop:
+ # -> iterate all curves Ci in dispersion-measure sorted order
+ # going from smallest swing to largest via the
+ # ``overlay_table: dict``,
+ # -> match on overlay ``method: str`` provided by caller,
+ # -> calc y-ranges from each curve's time series and store in
+ # a final table ``scaled: dict`` for final application in the
+ # scaling loop; the final phase.
+ scaled: dict[
+ float,
+ tuple[Viz, float, float, float, float]
+ ] = {}
+
+ for full_disp in reversed(overlay_table):
+ (
+ view,
+ viz,
+ y_start,
+ y_min,
+ y_max,
+ read_slc,
+ minor_in_view,
+ r_up,
+ r_dn,
+ ) = overlay_table[full_disp]
+
+ key = 'open' if viz.is_ohlc else viz.name
+ xref = minor_in_view[0]['time']
+ match method:
+ # Pin this curve to the "major dispersion" (or other
+ # target) curve:
+ #
+ # - find the intersect datum and then scaling according
+ # to the returns log-lin tranform 'at that intersect
+ # reference data'.
+ # - if the pinning/log-returns-based transform scaling
+ # results in this minor/pinned curve being out of
+ # view, adjust the scalars to match **this** curve's
+ # y-range to stay in view and then backpropagate that
+ # scaling to all curves, including the major-target,
+ # which were previously scaled before.
+ case 'loglin_ref_to_curve':
+
+ # calculate y-range scalars from the earliest
+ # "intersect" datum with the target-major
+ # (dispersion) curve so as to "pin" the curves
+ # in the y-domain at that spot.
+ # NOTE: there are 2 cases for un-matched support
+ # in x-domain (where one series is shorter then the
+ # other):
+ # => major is longer then minor:
+ # - need to scale the minor *from* the first
+ # supported datum in both series.
+ #
+ # => major is shorter then minor:
+ # - need to scale the minor *from* the first
+ # supported datum in both series (the
+ # intersect x-value) but using the
+ # intersecting point from the minor **not**
+ # its first value in view!
+ yref = y_start
+
+ if mx_xref > xref:
+ (
+ xref_pin,
+ yref,
+ ) = viz.i_from_t(
+ mx_xref,
+ return_y=True,
+ )
+ xref_pin_dt = pendulum.from_timestamp(xref_pin)
+ xref = mx_xref
+
+ if debug_print:
+ print(
+ 'MAJOR SHORTER!!!\n'
+ f'xref: {xref}\n'
+ f'xref_pin: {xref_pin}\n'
+ f'xref_pin-dt: {xref_pin_dt}\n'
+ f'yref@xref_pin: {yref}\n'
+ )
+
+ # XXX: we need to handle not-in-view cases?
+ # still not sure why or when tf this happens..
+ mx_scalars = mx_viz.scalars_from_index(xref)
+ if mx_scalars is None:
+ continue
+ (
+ i_start,
+ y_ref_major,
+ r_up_from_major_at_xref,
+ r_down_from_major_at_xref,
+ ) = mx_scalars
+
+ if debug_print:
+ print(
+ 'MAJOR PIN SCALING\n'
+ f'mx_xref: {mx_xref}\n'
+ f'major i_start: {i_start}\n'
+ f'y_ref_major: {y_ref_major}\n'
+ f'r_up_from_major_at_xref '
+ f'{r_up_from_major_at_xref}\n'
+ f'r_down_from_major_at_xref: '
+ f'{r_down_from_major_at_xref}\n'
+ f'-----to minor-----\n'
+ f'xref: {xref}\n'
+ f'y_start: {y_start}\n'
+ f'yref: {yref}\n'
+ )
+ ymn = yref * (1 + r_down_from_major_at_xref)
+ ymx = yref * (1 + r_up_from_major_at_xref)
+
+ # if this curve's y-range is detected as **not
+ # being in view** after applying the
+ # target-major's transform, adjust the
+ # target-major curve's range to (log-linearly)
+ # include it (the extra missing range) by
+ # adjusting the y-mxmn to this new y-range and
+ # applying the inverse transform of the minor
+ # back on the target-major (and possibly any
+ # other previously-scaled-to-target/major, minor
+ # curves).
+ if ymn >= y_min:
+ ymn = y_min
+ r_dn_minor = (ymn - yref) / yref
+
+ # rescale major curve's y-max to include new
+ # range increase required by **this minor**.
+ mx_ymn = y_ref_major * (1 + r_dn_minor)
+ mx_viz.vs.yrange = mx_ymn, mx_viz.vs.yrange[1]
+
+ if debug_print:
+ print(
+ f'RESCALE {mx_viz.name} DUE TO {viz.name} '
+ f'ymn -> {y_min}\n'
+ f'-> MAJ ymn (w r_down: {r_dn_minor}) '
+ f'-> {mx_ymn}\n\n'
+ )
+ # rescale all already scaled curves to new
+ # increased range for this side as
+ # determined by ``y_min`` staying in view;
+ # re-set the `scaled: dict` entry to
+ # ensure that this minor curve will be
+ # entirely in view.
+ # TODO: re updating already-scaled minor curves
+ # - is there a faster way to do this by
+ # mutating state on some object instead?
+ for _view in scaled:
+ _viz, _yref, _ymn, _ymx, _xref = scaled[_view]
+ (
+ _,
+ _,
+ _,
+ r_down_from_out_of_range,
+ ) = mx_viz.scalars_from_index(_xref)
+
+ new_ymn = _yref * (1 + r_down_from_out_of_range)
+
+ scaled[_view] = (
+ _viz, _yref, new_ymn, _ymx, _xref)
+
+ if debug_print:
+ print(
+ f'RESCALE {_viz.name} ymn -> {new_ymn}'
+ f'RESCALE MAJ ymn -> {mx_ymn}'
+ )
+
+ # same as above but for minor being out-of-range
+ # on the upside.
+ if ymx <= y_max:
+ ymx = y_max
+ r_up_minor = (ymx - yref) / yref
+ mx_ymx = y_ref_major * (1 + r_up_minor)
+ mx_viz.vs.yrange = mx_viz.vs.yrange[0], mx_ymx
+
+ if debug_print:
+ print(
+ f'RESCALE {mx_viz.name} DUE TO {viz.name} '
+ f'ymx -> {y_max}\n'
+ f'-> MAJ ymx (r_up: {r_up_minor} '
+ f'-> {mx_ymx}\n\n'
+ )
+
+ for _view in scaled:
+ _viz, _yref, _ymn, _ymx, _xref = scaled[_view]
+ (
+ _,
+ _,
+ r_up_from_out_of_range,
+ _,
+ ) = mx_viz.scalars_from_index(_xref)
+
+ new_ymx = _yref * (1 + r_up_from_out_of_range)
+ scaled[_view] = (
+ _viz, _yref, _ymn, new_ymx, _xref)
+
+ if debug_print:
+ print(
+ f'RESCALE {_viz.name} ymn -> {new_ymx}'
+ )
+
+ # register all overlays for a final pass where we
+ # apply all pinned-curve y-range transform scalings.
+ scaled[view] = (viz, yref, ymn, ymx, xref)
+
+ if debug_print:
+ print(
+ f'Viz[{viz.name}]: @ {chart_name}\n'
+ f' .yrange = {viz.vs.yrange}\n'
+ f' .xrange = {viz.vs.xrange}\n\n'
+ f'xref: {xref}\n'
+ f'xref-dt: {pendulum.from_timestamp(xref)}\n'
+ f'y_min: {y_min}\n'
+ f'y_max: {y_max}\n'
+ f'RESCALING\n'
+ f'r dn: {r_down_from_major_at_xref}\n'
+ f'r up: {r_up_from_major_at_xref}\n'
+ f'ymn: {ymn}\n'
+ f'ymx: {ymx}\n'
+ )
+
+ # Pin all curves by their first datum in view to all
+ # others such that each curve's earliest datum provides the
+ # reference point for returns vs. every other curve in
+ # view.
+ case 'loglin_ref_to_first':
+ ymn = dnt.apply_r(y_start)
+ ymx = upt.apply_r(y_start)
+ view._set_yrange(yrange=(ymn, ymx))
+
+ # Do not pin curves by log-linearizing their y-ranges,
+ # instead allow each curve to fully scale to the
+ # time-series in view's min and max y-values.
+ case 'mxmn':
+ view._set_yrange(yrange=(y_min, y_max))
+
+ case _:
+ raise RuntimeError(
+ f'overlay ``method`` is invalid `{method}'
+ )
+
+ # __ END OF transform calc phase (loop) __
+
+ # finally, scale the major target/dispersion curve to
+ # the (possibly re-scaled/modified) values were set in
+ # transform phase loop.
+ mx_view._set_yrange(yrange=(mx_ymn, mx_ymx))
+
+ if scaled:
+ if debug_print:
+ print(
+ 'SCALING PHASE' + '-'*100 + '\n\n'
+ '_________MAJOR INFO___________\n'
+ f'SIGMA MAJOR C: {mx_viz.name} -> {mx_disp}\n'
+ f'UP MAJOR C: {upt.viz.name} with disp: {upt.rng}\n'
+ f'DOWN MAJOR C: {dnt.viz.name} with disp: {dnt.rng}\n'
+ f'xref: {mx_xref}\n'
+ f'xref-dt: {pendulum.from_timestamp(mx_xref)}\n'
+ f'dn: {r_dn_mn}\n'
+ f'up: {r_up_mx}\n'
+ f'mx_ymn: {mx_ymn}\n'
+ f'mx_ymx: {mx_ymx}\n'
+ '------------------------------'
+ )
+
+ for (
+ view,
+ (viz, yref, ymn, ymx, xref)
+ ) in scaled.items():
+
+ # NOTE XXX: we have to set each curve's range once (and
+ # ONLY ONCE) here since we're doing this entire routine
+ # inside of a single render cycle (and apparently calling
+ # `ViewBox.setYRange()` multiple times within one only takes
+ # the first call as serious...) XD
+ view._set_yrange(yrange=(ymn, ymx))
+ profiler(f'{viz.name}@{chart_name} log-SCALE minor')
+
+ if debug_print:
+ print(
+ '_________MINOR INFO___________\n'
+ f'Viz[{viz.name}]: @ {chart_name}\n'
+ f' .yrange = {viz.vs.yrange}\n'
+ f' .xrange = {viz.vs.xrange}\n\n'
+ f'xref: {xref}\n'
+ f'xref-dt: {pendulum.from_timestamp(xref)}\n'
+ f'y_start: {y_start}\n'
+ f'y min: {y_min}\n'
+ f'y max: {y_max}\n'
+ f'T scaled ymn: {ymn}\n'
+ f'T scaled ymx: {ymx}\n\n'
+ '--------------------------------\n'
+ )
+
+ # __ END OF overlay scale phase (loop) __
+
+ if debug_print:
+ print(
+ f'END UX GRAPHICS CYCLE: @{chart_name}\n'
+ +
+ divstr
+ +
+ '\n'
+ )
+
+ profiler(f'<{chart_name}>.interact_graphics_cycle()')
+
+ if not do_linked_charts:
+ break
+
+ profiler.finish()
diff --git a/tests/conftest.py b/tests/conftest.py
index 8218ec164..3a0afba24 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,19 +1,18 @@
from contextlib import asynccontextmanager as acm
from functools import partial
+import logging
import os
-from typing import AsyncContextManager
from pathlib import Path
-from shutil import rmtree
import pytest
import tractor
from piker import (
- # log,
config,
)
-from piker._daemon import (
+from piker.service import (
Services,
)
+from piker.log import get_console_log
from piker.clearing._client import open_ems
@@ -70,8 +69,24 @@ def ci_env() -> bool:
return _ci_env
+@pytest.fixture()
+def log(
+ request: pytest.FixtureRequest,
+ loglevel: str,
+) -> logging.Logger:
+ '''
+ Deliver a per-test-named ``piker.log`` instance.
+
+ '''
+ return get_console_log(
+ level=loglevel,
+ name=request.node.name,
+ )
+
+
@acm
async def _open_test_pikerd(
+ tmpconfdir: str,
reg_addr: tuple[str, int] | None = None,
loglevel: str = 'warning',
**kwargs,
@@ -88,7 +103,7 @@ async def _open_test_pikerd(
'''
import random
- from piker._daemon import maybe_open_pikerd
+ from piker.service import maybe_open_pikerd
if reg_addr is None:
port = random.randint(6e3, 7e3)
@@ -98,7 +113,17 @@ async def _open_test_pikerd(
maybe_open_pikerd(
registry_addr=reg_addr,
loglevel=loglevel,
+
+ tractor_runtime_overrides={
+ 'piker_test_dir': tmpconfdir,
+ },
+
+ # tests may need to spawn containers dynamically
+ # or just in sequence per test, so we keep root.
+ drop_root_perms_for_ahab=False,
+
**kwargs,
+
) as service_manager,
):
# this proc/actor is the pikerd
@@ -120,18 +145,40 @@ async def _open_test_pikerd(
@pytest.fixture
def open_test_pikerd(
- request,
+ request: pytest.FixtureRequest,
+ tmp_path: Path,
loglevel: str,
):
+ tmpconfdir: Path = tmp_path / '_testing'
+ tmpconfdir.mkdir()
+ tmpconfdir_str: str = str(tmpconfdir)
+
+ # NOTE: on linux the tmp config dir is generally located at:
+ # /tmp/pytest-of-/pytest-/test_/
+ # the default `pytest` config ensures that only the last 4 test
+ # suite run's dirs will be persisted, otherwise they are removed:
+ # https://docs.pytest.org/en/6.2.x/tmpdir.html#the-default-base-temporary-directory
+ print(f'CURRENT TEST CONF DIR: {tmpconfdir}')
yield partial(
_open_test_pikerd,
+ # pass in a unique temp dir for this test request
+ # so that we can have multiple tests running (maybe in parallel)
+ # bwitout clobbering each other's config state.
+ tmpconfdir=tmpconfdir_str,
+
# bind in level from fixture, which is itself set by
# `--ll ` cli flag.
loglevel=loglevel,
)
+ # NOTE: the `tmp_dir` fixture will wipe any files older then 3 test
+ # sessions by default:
+ # https://docs.pytest.org/en/6.2.x/tmpdir.html#the-default-base-temporary-directory
+ # BUT, if we wanted to always wipe conf dir and all contained files,
+ # rmtree(str(tmp_path))
+
# TODO: teardown checks such as,
# - no leaked subprocs or shm buffers
# - all requested container service are torn down
@@ -151,8 +198,9 @@ async def _open_test_pikerd_and_ems(
fqsn,
mode=mode,
loglevel=loglevel,
- ) as ems_services):
- yield (services, ems_services)
+ ) as ems_services,
+ ):
+ yield (services, ems_services)
@pytest.fixture
@@ -168,20 +216,4 @@ def open_test_pikerd_and_ems(
mode,
loglevel,
open_test_pikerd
- )
-
-
-@pytest.fixture(scope='module')
-def delete_testing_dir():
- '''
- This fixture removes the temp directory
- used for storing all config/ledger/pp data
- created during testing sessions. During test runs
- this file can be found in .config/piker/_testing
-
- '''
- yield
- app_dir = Path(config.get_app_dir('piker')).resolve()
- if app_dir.is_dir():
- rmtree(str(app_dir))
- assert not app_dir.is_dir()
+ )
diff --git a/tests/test_databases.py b/tests/test_databases.py
index 4eb444f33..554b09902 100644
--- a/tests/test_databases.py
+++ b/tests/test_databases.py
@@ -1,66 +1,124 @@
-import pytest
-import trio
-
from typing import AsyncContextManager
+import logging
-from piker._daemon import Services
-from piker.log import get_logger
+import trio
+from elasticsearch import (
+ Elasticsearch,
+ ConnectionError,
+)
+
+from piker.service import marketstore
+from piker.service import elastic
-from elasticsearch import Elasticsearch
-from piker.data import marketstore
def test_marketstore_startup_and_version(
open_test_pikerd: AsyncContextManager,
- loglevel,
+ loglevel: str,
):
-
'''
- Verify marketstore starts correctly
+ Verify marketstore tsdb starts up and we can
+ connect with a client to do basic API reqs.
'''
- log = get_logger(__name__)
-
async def main():
- # port = 5995
async with (
open_test_pikerd(
loglevel=loglevel,
tsdb=True
- ) as (s, i, pikerd_portal, services),
- marketstore.get_client() as client
+ ) as (
+ _, # host
+ _, # port
+ pikerd_portal,
+ services,
+ ),
):
+ # TODO: we should probably make this connection poll
+ # loop part of the `get_client()` implementation no?
+
+ # XXX NOTE: we use a retry-connect loop because it seems
+ # that if we connect *too fast* to a booting container
+ # instance (i.e. if mkts's IPC machinery isn't up early
+ # enough) the client will hang on req-resp submissions. So,
+ # instead we actually reconnect the client entirely in
+ # a loop until we get a response.
+ for _ in range(3):
+
+ # NOTE: default sockaddr is embedded within
+ async with marketstore.get_client() as client:
+
+ with trio.move_on_after(1) as cs:
+ syms = await client.list_symbols()
+
+ if cs.cancelled_caught:
+ continue
- assert (
- len(await client.server_version()) ==
- len('3862e9973da36cfc6004b88172c08f09269aaf01')
- )
+ # should be an empty db (for now) since we spawn
+ # marketstore in a ephemeral test-harness dir.
+ assert not syms
+ print(f'RX syms resp: {syms}')
+ assert (
+ len(await client.server_version()) ==
+ len('3862e9973da36cfc6004b88172c08f09269aaf01')
+ )
+ print('VERSION CHECKED')
+
+ break # get out of retry-connect loop
trio.run(main)
def test_elasticsearch_startup_and_version(
open_test_pikerd: AsyncContextManager,
- loglevel,
+ loglevel: str,
+ log: logging.Logger,
):
'''
- Verify elasticsearch starts correctly
+ Verify elasticsearch starts correctly (like at some point before
+ infinity time)..
'''
-
- log = get_logger(__name__)
-
async def main():
port = 19200
- async with open_test_pikerd(
- loglevel=loglevel,
- es=True
- ) as (s, i, pikerd_portal, services):
-
- es = Elasticsearch(hosts=[f'http://localhost:{port}'])
- assert es.info()['version']['number'] == '7.17.4'
-
+ async with (
+ open_test_pikerd(
+ loglevel=loglevel,
+ es=True
+ ) as (
+ _, # host
+ _, # port
+ pikerd_portal,
+ services,
+ ),
+ ):
+ # TODO: much like the above connect loop for mkts, we should
+ # probably make this sync start part of the
+ # ``open_client()`` implementation?
+ for i in range(240):
+ with Elasticsearch(
+ hosts=[f'http://localhost:{port}']
+ ) as es:
+ try:
+
+ resp = es.info()
+ assert (
+ resp['version']['number']
+ ==
+ elastic._config['version']
+ )
+ print(
+ "OMG ELASTIX FINALLY CUKCING CONNECTED!>!>!\n"
+ f'resp: {resp}'
+ )
+ break
+
+ except ConnectionError:
+ log.exception(
+ f'RETRYING client connection for {i} time!'
+ )
+ await trio.sleep(1)
+ continue
trio.run(main)
diff --git a/tests/test_paper.py b/tests/test_paper.py
index 8da1cf122..53e03f472 100644
--- a/tests/test_paper.py
+++ b/tests/test_paper.py
@@ -17,7 +17,6 @@
from piker.log import get_logger
from piker.clearing._messages import Order
from piker.pp import (
- open_trade_ledger,
open_pps,
)
@@ -42,18 +41,19 @@ async def _async_main(
price: int = 30000,
executions: int = 1,
size: float = 0.01,
+
# Assert options
assert_entries: bool = False,
assert_pps: bool = False,
assert_zeroed_pps: bool = False,
assert_msg: bool = False,
+
) -> None:
'''
Start piker, place a trade and assert data in
pps stream, ledger and position table.
'''
-
oid: str = ''
last_msg = {}
@@ -136,7 +136,7 @@ def _assert(
def _run_test_and_check(fn):
- '''
+ '''
Close position and assert empty position in pps
'''
@@ -150,8 +150,7 @@ def _run_test_and_check(fn):
def test_buy(
- open_test_pikerd_and_ems: AsyncContextManager,
- delete_testing_dir
+ open_test_pikerd_and_ems: AsyncContextManager,
):
'''
Enter a trade and assert entries are made in pps and ledger files.
@@ -177,8 +176,7 @@ def test_buy(
def test_sell(
- open_test_pikerd_and_ems: AsyncContextManager,
- delete_testing_dir
+ open_test_pikerd_and_ems: AsyncContextManager,
):
'''
Sell position and ensure pps are zeroed.
@@ -201,13 +199,13 @@ def test_sell(
),
)
+
def test_multi_sell(
- open_test_pikerd_and_ems: AsyncContextManager,
- delete_testing_dir
+ open_test_pikerd_and_ems: AsyncContextManager,
):
'''
- Make 5 market limit buy orders and
- then sell 5 slots at the same price.
+ Make 5 market limit buy orders and
+ then sell 5 slots at the same price.
Finally, assert cleared positions.
'''
diff --git a/tests/test_services.py b/tests/test_services.py
index 763b438e5..29e613e34 100644
--- a/tests/test_services.py
+++ b/tests/test_services.py
@@ -9,8 +9,7 @@
import trio
import tractor
-from piker.log import get_logger
-from piker._daemon import (
+from piker.service import (
find_service,
Services,
)