diff --git a/piker/_daemon.py b/piker/_daemon.py
index b9c74853f..f4acf9f3b 100644
--- a/piker/_daemon.py
+++ b/piker/_daemon.py
@@ -257,7 +257,7 @@ async def open_piker_runtime(
# and spawn the service tree distributed per that.
start_method: str = 'trio',
- tractor_kwargs: dict = {},
+ **tractor_kwargs,
) -> tuple[
tractor.Actor,
diff --git a/piker/_profile.py b/piker/_profile.py
index 5262ffc63..07a20c411 100644
--- a/piker/_profile.py
+++ b/piker/_profile.py
@@ -152,9 +152,14 @@ def __new__(
# don't do anything
return cls._disabledProfiler
- # create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
+ obj._msgs = []
+
+ # create an actual profiling object
+ if cls._depth < 1:
+ cls._msgs = []
+
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
@@ -174,8 +179,12 @@ def __call__(self, msg=None):
self._markCount += 1
newTime = perf_counter()
+ tot_ms = (newTime - self._firstTime) * 1000
ms = (newTime - self._lastTime) * 1000
- self._newMsg(" %s: %0.4f ms", msg, ms)
+ self._newMsg(
+ f" {msg}: {ms:0.4f}, tot:{tot_ms:0.4f}"
+ )
+
self._lastTime = newTime
def mark(self, msg=None):
diff --git a/piker/brokers/ib/api.py b/piker/brokers/ib/api.py
index dda79d4e1..bfa66a9db 100644
--- a/piker/brokers/ib/api.py
+++ b/piker/brokers/ib/api.py
@@ -161,10 +161,17 @@ def __init__(self):
'CME',
'CMECRYPTO',
'COMEX',
- 'CMDTY', # special name case..
+ # 'CMDTY', # special name case..
'CBOT', # (treasury) yield futures
)
+_adhoc_cmdty_set = {
+ # metals
+ # https://misc.interactivebrokers.com/cstools/contract_info/v3.10/index.php?action=Conid%20Info&wlId=IB&conid=69067924
+ 'xauusd.cmdty', # london gold spot ^
+ 'xagusd.cmdty', # silver spot
+}
+
_adhoc_futes_set = {
# equities
@@ -186,16 +193,12 @@ def __init__(self):
# raw
'lb.comex', # random len lumber
- # metals
- # https://misc.interactivebrokers.com/cstools/contract_info/v3.10/index.php?action=Conid%20Info&wlId=IB&conid=69067924
- 'xauusd.cmdty', # london gold spot ^
'gc.comex',
'mgc.comex', # micro
# oil & gas
'cl.comex',
- 'xagusd.cmdty', # silver spot
'ni.comex', # silver futes
'qi.comex', # mini-silver futes
@@ -259,6 +262,7 @@ def __init__(self):
'FUNDSERV',
'SWB2',
'PSE',
+ 'PHLX',
}
_enters = 0
@@ -514,15 +518,18 @@ async def search_symbols(
except ConnectionError:
return {}
+ dict_results: dict[str, dict] = {}
for key, deats in results.copy().items():
tract = deats.contract
sym = tract.symbol
sectype = tract.secType
+ deats_dict = asdict(deats)
if sectype == 'IND':
- results[f'{sym}.IND'] = tract
results.pop(key)
+ key = f'{sym}.IND'
+ results[key] = tract
# exch = tract.exchange
# XXX: add back one of these to get the weird deadlock
@@ -559,20 +566,25 @@ async def search_symbols(
# if cons:
all_deats = await self.con_deats([con])
results |= all_deats
+ for key in all_deats:
+ dict_results[key] = asdict(all_deats[key])
# forex pairs
elif sectype == 'CASH':
+ results.pop(key)
dst, src = tract.localSymbol.split('.')
pair_key = "/".join([dst, src])
exch = tract.exchange.lower()
- results[f'{pair_key}.{exch}'] = tract
- results.pop(key)
+ key = f'{pair_key}.{exch}'
+ results[key] = tract
# XXX: again seems to trigger the weird tractor
# bug with the debugger..
# assert 0
- return results
+ dict_results[key] = deats_dict
+
+ return dict_results
async def get_fute(
self,
@@ -1036,7 +1048,11 @@ def con2fqsn(
# TODO: option symbol parsing and sane display:
symbol = con.localSymbol.replace(' ', '')
- case ibis.Commodity():
+ case (
+ ibis.Commodity()
+ # search API endpoint returns std con box..
+ | ibis.Contract(secType='CMDTY')
+ ):
# commodities and forex don't have an exchange name and
# no real volume so we have to calculate the price
suffix = con.secType
diff --git a/piker/ui/_pathops.py b/piker/data/_formatters.py
similarity index 52%
rename from piker/ui/_pathops.py
rename to piker/data/_formatters.py
index 807cde65f..f71e3e4a9 100644
--- a/piker/ui/_pathops.py
+++ b/piker/data/_formatters.py
@@ -14,7 +14,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""
-Super fast ``QPainterPath`` generation related operator routines.
+Pre-(path)-graphics formatted x/y nd/1d rendering subsystem.
"""
from __future__ import annotations
@@ -26,41 +26,21 @@
import msgspec
import numpy as np
from numpy.lib import recfunctions as rfn
-from numba import njit, float64, int64 # , optional
-# import pyqtgraph as pg
-# from PyQt5 import QtGui
-# from PyQt5.QtCore import QLineF, QPointF
-from ..data._sharedmem import (
+from ._sharedmem import (
ShmArray,
)
-# from .._profile import pg_profile_enabled, ms_slower_then
-from ._compression import (
- ds_m4,
+from ._pathops import (
+ path_arrays_from_ohlc,
)
if TYPE_CHECKING:
- from ._flows import (
- Renderer,
- Flow,
+ from ._dataviz import (
+ Viz,
)
from .._profile import Profiler
-def by_index_and_key(
- renderer: Renderer,
- array: np.ndarray,
- array_key: str,
- vr: tuple[int, int],
-
-) -> tuple[
- np.ndarray,
- np.ndarray,
- np.ndarray,
-]:
- return array['index'], array[array_key], 'all'
-
-
class IncrementalFormatter(msgspec.Struct):
'''
Incrementally updating, pre-path-graphics tracking, formatter.
@@ -72,45 +52,63 @@ class IncrementalFormatter(msgspec.Struct):
'''
shm: ShmArray
- flow: Flow
-
- # last read from shm (usually due to an update call)
- _last_read: tuple[
- int,
- int,
- np.ndarray
+ viz: Viz
- ]
+ # the value to be multiplied any any index into the x/y_1d arrays
+ # given the input index is based on the original source data array.
+ flat_index_ratio: float = 1
@property
- def last_read(self) -> tuple | None:
- return self._last_read
+ def index_field(self) -> 'str':
+ '''
+ Value (``str``) used to look up the "index series" from the
+ underlying source ``numpy`` struct-array; delegate directly to
+ the managing ``Viz``.
- def __repr__(self) -> str:
- msg = (
- f'{type(self)}: ->\n\n'
- f'fqsn={self.flow.name}\n'
- f'shm_name={self.shm.token["shm_name"]}\n\n'
+ '''
+ return self.viz.index_field
- f'last_vr={self._last_vr}\n'
- f'last_ivdr={self._last_ivdr}\n\n'
+ # Incrementally updated xy ndarray formatted data, a pre-1d
+ # format which is updated and cached independently of the final
+ # pre-graphics-path 1d format.
+ x_nd: Optional[np.ndarray] = None
+ y_nd: Optional[np.ndarray] = None
- f'xy_nd_start={self.xy_nd_start}\n'
- f'xy_nd_stop={self.xy_nd_stop}\n\n'
+ @property
+ def xy_nd(self) -> tuple[np.ndarray, np.ndarray]:
+ return (
+ self.x_nd[self.xy_slice],
+ self.y_nd[self.xy_slice],
)
- x_nd_len = 0
- y_nd_len = 0
- if self.x_nd is not None:
- x_nd_len = len(self.x_nd)
- y_nd_len = len(self.y_nd)
-
- msg += (
- f'x_nd_len={x_nd_len}\n'
- f'y_nd_len={y_nd_len}\n'
+ @property
+ def xy_slice(self) -> slice:
+ return slice(
+ self.xy_nd_start,
+ self.xy_nd_stop,
)
- return msg
+ # indexes which slice into the above arrays (which are allocated
+ # based on source data shm input size) and allow retrieving
+ # incrementally updated data.
+ xy_nd_start: int | None = None
+ xy_nd_stop: int | None = None
+
+ # TODO: eventually incrementally update 1d-pre-graphics path data?
+ x_1d: np.ndarray | None = None
+ y_1d: np.ndarray | None = None
+
+ # incremental view-change state(s) tracking
+ _last_vr: tuple[float, float] | None = None
+ _last_ivdr: tuple[float, float] | None = None
+
+ @property
+ def index_step_size(self) -> float:
+ '''
+ Readonly value computed on first ``.diff()`` call.
+
+ '''
+ return self.viz.index_step()
def diff(
self,
@@ -120,17 +118,13 @@ def diff(
np.ndarray,
np.ndarray,
]:
- (
- last_xfirst,
- last_xlast,
- last_array,
- last_ivl,
- last_ivr,
- last_in_view,
- ) = self.last_read
-
- # TODO: can the renderer just call ``Flow.read()`` directly?
- # unpack latest source data read
+ # TODO:
+ # - can the renderer just call ``Viz.read()`` directly? unpack
+ # latest source data read
+ # - eventually maybe we can implement some kind of
+ # transform on the ``QPainterPath`` that will more or less
+ # detect the diff in "elements" terms? update diff state since
+ # we've now rendered paths.
(
xfirst,
xlast,
@@ -140,41 +134,44 @@ def diff(
in_view,
) = new_read
+ index = array['index']
+
+ # if the first index in the read array is 0 then
+ # it means the source buffer has bee completely backfilled to
+ # available space.
+ src_start = index[0]
+ src_stop = index[-1] + 1
+
+ # these are the "formatted output data" indices
+ # for the pre-graphics arrays.
+ nd_start = self.xy_nd_start
+ nd_stop = self.xy_nd_stop
+
+ if (
+ nd_start is None
+ ):
+ assert nd_stop is None
+
+ # setup to do a prepend of all existing src history
+ nd_start = self.xy_nd_start = src_stop
+ # set us in a zero-to-append state
+ nd_stop = self.xy_nd_stop = src_stop
+
# compute the length diffs between the first/last index entry in
# the input data and the last indexes we have on record from the
# last time we updated the curve index.
- prepend_length = int(last_xfirst - xfirst)
- append_length = int(xlast - last_xlast)
+ prepend_length = int(nd_start - src_start)
+ append_length = int(src_stop - nd_stop)
# blah blah blah
# do diffing for prepend, append and last entry
return (
- slice(xfirst, last_xfirst),
+ slice(src_start, nd_start),
prepend_length,
append_length,
- slice(last_xlast, xlast),
+ slice(nd_stop, src_stop),
)
- # Incrementally updated xy ndarray formatted data, a pre-1d
- # format which is updated and cached independently of the final
- # pre-graphics-path 1d format.
- x_nd: Optional[np.ndarray] = None
- y_nd: Optional[np.ndarray] = None
-
- # indexes which slice into the above arrays (which are allocated
- # based on source data shm input size) and allow retrieving
- # incrementally updated data.
- xy_nd_start: int = 0
- xy_nd_stop: int = 0
-
- # TODO: eventually incrementally update 1d-pre-graphics path data?
- # x_1d: Optional[np.ndarray] = None
- # y_1d: Optional[np.ndarray] = None
-
- # incremental view-change state(s) tracking
- _last_vr: tuple[float, float] | None = None
- _last_ivdr: tuple[float, float] | None = None
-
def _track_inview_range(
self,
view_range: tuple[int, int],
@@ -223,8 +220,6 @@ def format_to_1d(
array_key: str,
profiler: Profiler,
- slice_to_head: int = -1,
- read_src_from_key: bool = True,
slice_to_inview: bool = True,
) -> tuple[
@@ -250,99 +245,81 @@ def format_to_1d(
post_slice,
) = self.diff(new_read)
+ # we first need to allocate xy data arrays
+ # from the source data.
if self.y_nd is None:
- # we first need to allocate xy data arrays
- # from the source data.
- self.x_nd, self.y_nd = self.allocate_xy_nd(
- shm,
- array_key,
- )
self.xy_nd_start = shm._first.value
self.xy_nd_stop = shm._last.value
- profiler('allocated xy history')
-
- if prepend_len:
- y_prepend = shm._array[pre_slice]
- if read_src_from_key:
- y_prepend = y_prepend[array_key]
-
- (
- new_y_nd,
- y_nd_slc,
-
- ) = self.incr_update_xy_nd(
- shm,
- array_key,
-
- # this is the pre-sliced, "normally expected"
- # new data that an updater would normally be
- # expected to process, however in some cases (like
- # step curves) the updater routine may want to do
- # the source history-data reading itself, so we pass
- # both here.
- y_prepend,
- pre_slice,
- prepend_len,
-
- self.xy_nd_start,
- self.xy_nd_stop,
- is_append=False,
- )
-
- # y_nd_view = self.y_nd[y_nd_slc]
- self.y_nd[y_nd_slc] = new_y_nd
- # if read_src_from_key:
- # y_nd_view[:][array_key] = new_y_nd
- # else:
- # y_nd_view[:] = new_y_nd
-
- self.xy_nd_start = shm._first.value
- profiler('prepended xy history: {prepend_length}')
-
- if append_len:
- y_append = shm._array[post_slice]
- if read_src_from_key:
- y_append = y_append[array_key]
-
- (
- new_y_nd,
- y_nd_slc,
-
- ) = self.incr_update_xy_nd(
+ self.x_nd, self.y_nd = self.allocate_xy_nd(
shm,
array_key,
-
- y_append,
- post_slice,
- append_len,
-
- self.xy_nd_start,
- self.xy_nd_stop,
- is_append=True,
)
- # self.y_nd[post_slice] = new_y_nd
- # self.y_nd[xy_slice or post_slice] = xy_data
- self.y_nd[y_nd_slc] = new_y_nd
- # if read_src_from_key:
- # y_nd_view[:][array_key] = new_y_nd
- # else:
- # y_nd_view[:] = new_y_nd
+ profiler('allocated xy history')
- self.xy_nd_stop = shm._last.value
- profiler('appened xy history: {append_length}')
+ # once allocated we do incremental pre/append
+ # updates from the diff with the source buffer.
+ else:
+ if prepend_len:
+
+ self.incr_update_xy_nd(
+ shm,
+ array_key,
+
+ # this is the pre-sliced, "normally expected"
+ # new data that an updater would normally be
+ # expected to process, however in some cases (like
+ # step curves) the updater routine may want to do
+ # the source history-data reading itself, so we pass
+ # both here.
+ shm._array[pre_slice],
+ pre_slice,
+ prepend_len,
+
+ self.xy_nd_start,
+ self.xy_nd_stop,
+ is_append=False,
+ )
+
+ self.xy_nd_start -= prepend_len
+ profiler('prepended xy history: {prepend_length}')
+
+ if append_len:
+ self.incr_update_xy_nd(
+ shm,
+ array_key,
+
+ shm._array[post_slice],
+ post_slice,
+ append_len,
+
+ self.xy_nd_start,
+ self.xy_nd_stop,
+ is_append=True,
+ )
+ self.xy_nd_stop += append_len
+ profiler('appened xy history: {append_length}')
+ # sanity
+ # slice_ln = post_slice.stop - post_slice.start
+ # assert append_len == slice_ln
view_changed: bool = False
view_range: tuple[int, int] = (ivl, ivr)
if slice_to_inview:
view_changed = self._track_inview_range(view_range)
array = in_view
- profiler(f'{self.flow.name} view range slice {view_range}')
+ profiler(f'{self.viz.name} view range slice {view_range}')
+
+ # TODO: we need to check if the last-datum-in-view is true and
+ # if so only slice to the 2nd last datumonly slice to the 2nd
+ # last datum.
+ # hist = array[:slice_to_head]
- hist = array[:slice_to_head]
+ # XXX: WOA WTF TRACTOR DEBUGGING BUGGG
+ # assert 0
# xy-path data transform: convert source data to a format
# able to be passed to a `QPainterPath` rendering routine.
- if not len(hist):
+ if not len(array):
# XXX: this might be why the profiler only has exits?
return
@@ -350,10 +327,15 @@ def format_to_1d(
# x/y_data in the case where allocate_xy is
# defined?
x_1d, y_1d, connect = self.format_xy_nd_to_1d(
- hist,
+ array,
array_key,
view_range,
)
+ # cache/save last 1d outputs for use by other
+ # readers (eg. `Viz.draw_last_datum()` in the
+ # only-draw-last-uppx case).
+ self.x_1d = x_1d
+ self.y_1d = y_1d
# app_tres = None
# if append_len:
@@ -369,22 +351,17 @@ def format_to_1d(
# # assert (len(appended) - 1) == append_len
# # assert len(appended) == append_len
# print(
- # f'{self.flow.name} APPEND LEN: {append_len}\n'
- # f'{self.flow.name} APPENDED: {appended}\n'
- # f'{self.flow.name} app_tres: {app_tres}\n'
+ # f'{self.viz.name} APPEND LEN: {append_len}\n'
+ # f'{self.viz.name} APPENDED: {appended}\n'
+ # f'{self.viz.name} app_tres: {app_tres}\n'
# )
# update the last "in view data range"
if len(x_1d):
- self._last_ivdr = x_1d[0], x_1d[slice_to_head]
-
- # TODO: eventually maybe we can implement some kind of
- # transform on the ``QPainterPath`` that will more or less
- # detect the diff in "elements" terms?
- # update diff state since we've now rendered paths.
- self._last_read = new_read
+ self._last_ivdr = x_1d[0], x_1d[-1]
profiler('.format_to_1d()')
+
return (
x_1d,
y_1d,
@@ -399,6 +376,8 @@ def format_to_1d(
# Sub-type override interface #
###############################
+ x_offset: np.ndarray = np.array([0])
+
# optional pre-graphics xy formatted data which
# is incrementally updated in sync with the source data.
# XXX: was ``.allocate_xy()``
@@ -406,7 +385,6 @@ def allocate_xy_nd(
self,
src_shm: ShmArray,
data_field: str,
- index_field: str = 'index',
) -> tuple[
np.ndarray, # x
@@ -420,7 +398,11 @@ def allocate_xy_nd(
'''
y_nd = src_shm._array[data_field].copy()
- x_nd = src_shm._array[index_field].copy()
+ x_nd = (
+ src_shm._array[self.index_field].copy()
+ +
+ self.x_offset
+ )
return x_nd, y_nd
# XXX: was ``.update_xy()``
@@ -439,23 +421,43 @@ def incr_update_xy_nd(
nd_stop: int,
is_append: bool,
- index_field: str = 'index',
- ) -> tuple[
- np.ndarray,
- slice,
- ]:
+ ) -> None:
# write pushed data to flattened copy
- new_y_nd = new_from_src
+ y_nd_new = new_from_src[data_field]
+ self.y_nd[read_slc] = y_nd_new
+
+ x_nd_new = self.x_nd[read_slc]
+ x_nd_new[:] = (
+ new_from_src[self.index_field]
+ +
+ self.x_offset
+ )
- # XXX
- # TODO: this should be returned and written by caller!
- # XXX
- # generate same-valued-per-row x support based on y shape
- if index_field != 'index':
- self.x_nd[read_slc, :] = new_from_src[index_field]
+ # x_nd = self.x_nd[self.xy_slice]
+ # y_nd = self.y_nd[self.xy_slice]
+ # name = self.viz.name
+ # if 'trade_rate' == name:
+ # s = 4
+ # print(
+ # f'{name.upper()}:\n'
+ # 'NEW_FROM_SRC:\n'
+ # f'new_from_src: {new_from_src}\n\n'
+
+ # f'PRE self.x_nd:'
+ # f'\n{list(x_nd[-s:])}\n'
+
+ # f'PRE self.y_nd:\n'
+ # f'{list(y_nd[-s:])}\n\n'
- return new_y_nd, read_slc
+ # f'TO WRITE:\n'
+
+ # f'x_nd_new:\n'
+ # f'{x_nd_new[0]}\n'
+
+ # f'y_nd_new:\n'
+ # f'{y_nd_new}\n'
+ # )
# XXX: was ``.format_xy()``
def format_xy_nd_to_1d(
@@ -476,9 +478,28 @@ def format_xy_nd_to_1d(
Return single field column data verbatim
'''
+ # NOTE: we don't include the very last datum which is filled in
+ # normally by another graphics object.
+ x_1d = array[self.index_field][:-1]
+ y_1d = array[array_key][:-1]
+
+ # name = self.viz.name
+ # if 'trade_rate' == name:
+ # s = 4
+ # x_nd = list(self.x_nd[self.xy_slice][-s:-1])
+ # y_nd = list(self.y_nd[self.xy_slice][-s:-1])
+ # print(
+ # f'{name}:\n'
+ # f'XY data:\n'
+ # f'x: {x_nd}\n'
+ # f'y: {y_nd}\n\n'
+ # f'x_1d: {list(x_1d[-s:])}\n'
+ # f'y_1d: {list(y_1d[-s:])}\n\n'
+
+ # )
return (
- array['index'],
- array[array_key],
+ x_1d,
+ y_1d,
# 1d connection array or style-key to
# ``pg.functions.arrayToQPath()``
@@ -487,8 +508,15 @@ def format_xy_nd_to_1d(
class OHLCBarsFmtr(IncrementalFormatter):
+ x_offset: np.ndarray = np.array([
+ -0.5,
+ 0,
+ 0,
+ 0.5,
+ ])
fields: list[str] = ['open', 'high', 'low', 'close']
+ flat_index_ratio: float = 4
def allocate_xy_nd(
self,
@@ -511,13 +539,15 @@ def allocate_xy_nd(
# generate an flat-interpolated x-domain
x_nd = (
np.broadcast_to(
- ohlc_shm._array['index'][:, None],
+ ohlc_shm._array[self.index_field][:, None],
(
ohlc_shm._array.size,
# 4, # only ohlc
y_nd.shape[1],
),
- ) + np.array([-0.5, 0, 0, 0.5])
+ )
+ +
+ self.x_offset
)
assert y_nd.any()
@@ -527,81 +557,36 @@ def allocate_xy_nd(
y_nd,
)
- @staticmethod
- @njit(
- # TODO: for now need to construct this manually for readonly
- # arrays, see https://github.com/numba/numba/issues/4511
- # ntypes.tuple((float64[:], float64[:], float64[:]))(
- # numba_ohlc_dtype[::1], # contiguous
- # int64,
- # optional(float64),
- # ),
- nogil=True
- )
- def path_arrays_from_ohlc(
- data: np.ndarray,
- start: int64,
- bar_gap: float64 = 0.43,
+ def incr_update_xy_nd(
+ self,
- ) -> tuple[
- np.ndarray,
- np.ndarray,
- np.ndarray,
- ]:
- '''
- Generate an array of lines objects from input ohlc data.
+ src_shm: ShmArray,
+ data_field: str,
- '''
- size = int(data.shape[0] * 6)
+ new_from_src: np.ndarray, # portion of source that was updated
- x = np.zeros(
- # data,
- shape=size,
- dtype=float64,
- )
- y, c = x.copy(), x.copy()
-
- # TODO: report bug for assert @
- # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991
- for i, q in enumerate(data[start:], start):
-
- # TODO: ask numba why this doesn't work..
- # open, high, low, close, index = q[
- # ['open', 'high', 'low', 'close', 'index']]
-
- open = q['open']
- high = q['high']
- low = q['low']
- close = q['close']
- index = float64(q['index'])
-
- istart = i * 6
- istop = istart + 6
-
- # x,y detail the 6 points which connect all vertexes of a ohlc bar
- x[istart:istop] = (
- index - bar_gap,
- index,
- index,
- index,
- index,
- index + bar_gap,
- )
- y[istart:istop] = (
- open,
- open,
- low,
- high,
- close,
- close,
- )
+ read_slc: slice,
+ ln: int, # len of updated
- # specifies that the first edge is never connected to the
- # prior bars last edge thus providing a small "gap"/"space"
- # between bars determined by ``bar_gap``.
- c[istart:istop] = (1, 1, 1, 1, 1, 0)
+ nd_start: int,
+ nd_stop: int,
- return x, y, c
+ is_append: bool,
+
+ ) -> None:
+ # write newly pushed data to flattened copy
+ # a struct-arr is always passed in.
+ new_y_nd = rfn.structured_to_unstructured(
+ new_from_src[self.fields]
+ )
+ self.y_nd[read_slc] = new_y_nd
+
+ # generate same-valued-per-row x support based on y shape
+ x_nd_new = self.x_nd[read_slc]
+ x_nd_new[:] = np.broadcast_to(
+ new_from_src[self.index_field][:, None],
+ new_y_nd.shape,
+ ) + self.x_offset
# TODO: can we drop this frame and just use the above?
def format_xy_nd_to_1d(
@@ -613,7 +598,7 @@ def format_xy_nd_to_1d(
start: int = 0, # XXX: do we need this?
# 0.5 is no overlap between arms, 1.0 is full overlap
- w: float = 0.43,
+ w: float = 0.16,
) -> tuple[
np.ndarray,
@@ -626,48 +611,16 @@ def format_xy_nd_to_1d(
for line spacing.
'''
- x, y, c = self.path_arrays_from_ohlc(
- array,
+ x, y, c = path_arrays_from_ohlc(
+ array[:-1],
start,
- bar_gap=w,
- )
- return x, y, c
-
- def incr_update_xy_nd(
- self,
-
- src_shm: ShmArray,
- data_field: str,
-
- new_from_src: np.ndarray, # portion of source that was updated
-
- read_slc: slice,
- ln: int, # len of updated
+ bar_w=self.index_step_size,
+ bar_gap=w * self.index_step_size,
- nd_start: int,
- nd_stop: int,
-
- is_append: bool,
- index_field: str = 'index',
-
- ) -> tuple[
- np.ndarray,
- slice,
- ]:
- # write newly pushed data to flattened copy
- # a struct-arr is always passed in.
- new_y_nd = rfn.structured_to_unstructured(
- new_from_src[self.fields]
+ # XXX: don't ask, due to a ``numba`` bug..
+ use_time_index=(self.index_field == 'time'),
)
-
- # XXX
- # TODO: this should be returned and written by caller!
- # XXX
- # generate same-valued-per-row x support based on y shape
- if index_field != 'index':
- self.x_nd[read_slc, :] = new_from_src[index_field]
-
- return new_y_nd, read_slc
+ return x, y, c
class OHLCBarsAsCurveFmtr(OHLCBarsFmtr):
@@ -688,8 +641,8 @@ def format_xy_nd_to_1d(
# should we be passing in array as an xy arrays tuple?
# 2 more datum-indexes to capture zero at end
- x_flat = self.x_nd[self.xy_nd_start:self.xy_nd_stop]
- y_flat = self.y_nd[self.xy_nd_start:self.xy_nd_stop]
+ x_flat = self.x_nd[self.xy_nd_start:self.xy_nd_stop-1]
+ y_flat = self.y_nd[self.xy_nd_start:self.xy_nd_stop-1]
# slice to view
ivl, ivr = vr
@@ -705,14 +658,17 @@ def format_xy_nd_to_1d(
class StepCurveFmtr(IncrementalFormatter):
+ x_offset: np.ndarray = np.array([
+ 0,
+ 1,
+ ])
+
def allocate_xy_nd(
self,
shm: ShmArray,
data_field: str,
- index_field: str = 'index',
-
) -> tuple[
np.ndarray, # x
np.nd.array # y
@@ -722,19 +678,30 @@ def allocate_xy_nd(
for use by path graphics generation.
'''
- i = shm._array['index'].copy()
+ i = shm._array[self.index_field].copy()
out = shm._array[data_field].copy()
- x_out = np.broadcast_to(
- i[:, None],
- (i.size, 2),
- ) + np.array([-0.5, 0.5])
+ x_out = (
+ np.broadcast_to(
+ i[:, None],
+ (i.size, 2),
+ )
+ +
+ self.x_offset
+ )
- y_out = np.empty((len(out), 2), dtype=out.dtype)
+ # fill out Nx2 array to hold each step's left + right vertices.
+ y_out = np.empty(
+ x_out.shape,
+ dtype=out.dtype,
+ )
+ # fill in (current) values from source shm buffer
y_out[:] = out[:, np.newaxis]
+ # TODO: pretty sure we can drop this?
# start y at origin level
- y_out[0, 0] = 0
+ # y_out[0, 0] = 0
+ # y_out[self.xy_nd_start] = 0
return x_out, y_out
def incr_update_xy_nd(
@@ -743,12 +710,12 @@ def incr_update_xy_nd(
src_shm: ShmArray,
array_key: str,
- src_update: np.ndarray, # portion of source that was updated
- slc: slice,
+ new_from_src: np.ndarray, # portion of source that was updated
+ read_slc: slice,
ln: int, # len of updated
- first: int,
- last: int,
+ nd_start: int,
+ nd_stop: int,
is_append: bool,
@@ -756,25 +723,62 @@ def incr_update_xy_nd(
np.ndarray,
slice,
]:
- # for a step curve we slice from one datum prior
+ # NOTE: for a step curve we slice from one datum prior
# to the current "update slice" to get the previous
# "level".
- if is_append:
- start = max(last - 1, 0)
- end = src_shm._last.value
- new_y = src_shm._array[start:end][array_key]
- slc = slice(start, end)
+ #
+ # why this is needed,
+ # - the current new append slice will often have a zero
+ # value in the latest datum-step (at least for zero-on-new
+ # cases like vlm in the) as per configuration of the FSP
+ # engine.
+ # - we need to look back a datum to get the last level which
+ # will be used to terminate/complete the last step x-width
+ # which will be set to pair with the last x-index THIS MEANS
+ #
+ # XXX: this means WE CAN'T USE the append slice since we need to
+ # "look backward" one step to get the needed back-to-zero level
+ # and the update data in ``new_from_src`` will only contain the
+ # latest new data.
+ back_1 = slice(
+ read_slc.start - 1,
+ read_slc.stop,
+ )
- else:
- new_y = src_update
+ to_write = src_shm._array[back_1]
+ y_nd_new = self.y_nd[back_1]
+ y_nd_new[:] = to_write[array_key][:, None]
- return (
- np.broadcast_to(
- new_y[:, None], (new_y.size, 2),
- ),
- slc,
+ x_nd_new = self.x_nd[read_slc]
+ x_nd_new[:] = (
+ new_from_src[self.index_field][:, None]
+ +
+ self.x_offset
)
+ # XXX: uncomment for debugging
+ # x_nd = self.x_nd[self.xy_slice]
+ # y_nd = self.y_nd[self.xy_slice]
+ # name = self.viz.name
+ # if 'dolla_vlm' in name:
+ # s = 4
+ # print(
+ # f'{name}:\n'
+ # 'NEW_FROM_SRC:\n'
+ # f'new_from_src: {new_from_src}\n\n'
+
+ # f'PRE self.x_nd:'
+ # f'\n{x_nd[-s:]}\n'
+ # f'PRE self.y_nd:\n'
+ # f'{y_nd[-s:]}\n\n'
+
+ # f'TO WRITE:\n'
+ # f'x_nd_new:\n'
+ # f'{x_nd_new}\n'
+ # f'y_nd_new:\n'
+ # f'{y_nd_new}\n'
+ # )
+
def format_xy_nd_to_1d(
self,
@@ -787,65 +791,34 @@ def format_xy_nd_to_1d(
np.ndarray,
str,
]:
- lasts = array[['index', array_key]]
- last = lasts[array_key][-1]
+ last_t, last = array[-1][[self.index_field, array_key]]
- # 2 more datum-indexes to capture zero at end
- x_step = self.x_nd[self.xy_nd_start:self.xy_nd_stop+2]
- y_step = self.y_nd[self.xy_nd_start:self.xy_nd_stop+2]
- y_step[-1] = last
+ start = self.xy_nd_start
+ stop = self.xy_nd_stop
+
+ x_step = self.x_nd[start:stop]
+ y_step = self.y_nd[start:stop]
# slice out in-view data
ivl, ivr = vr
- ys_iv = y_step[ivl:ivr+1]
- xs_iv = x_step[ivl:ivr+1]
-
- # flatten to 1d
- y_iv = ys_iv.reshape(ys_iv.size)
- x_iv = xs_iv.reshape(xs_iv.size)
- # print(
- # f'ys_iv : {ys_iv[-s:]}\n'
- # f'y_iv: {y_iv[-s:]}\n'
- # f'xs_iv: {xs_iv[-s:]}\n'
- # f'x_iv: {x_iv[-s:]}\n'
- # )
+ # NOTE: add an extra step to get the vertical-line-down-to-zero
+ # adjacent to the last-datum graphic (filled rect).
+ x_step_iv = x_step[ivl:ivr+1]
+ y_step_iv = y_step[ivl:ivr+1]
- return x_iv, y_iv, 'all'
-
-
-def xy_downsample(
- x,
- y,
- uppx,
-
- x_spacer: float = 0.5,
-
-) -> tuple[
- np.ndarray,
- np.ndarray,
- float,
- float,
-]:
- '''
- Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input
- ``uppx`` (units-per-pixel) and add space between discreet datums.
-
- '''
- # downsample whenever more then 1 pixels per datum can be shown.
- # always refresh data bounds until we get diffing
- # working properly, see above..
- bins, x, y, ymn, ymx = ds_m4(
- x,
- y,
- uppx,
- )
+ # flatten to 1d
+ x_1d = x_step_iv.reshape(x_step_iv.size)
+ y_1d = y_step_iv.reshape(y_step_iv.size)
- # flatten output to 1d arrays suitable for path-graphics generation.
- x = np.broadcast_to(x[:, None], y.shape)
- x = (x + np.array(
- [-x_spacer, 0, 0, x_spacer]
- )).flatten()
- y = y.flatten()
+ # debugging
+ # if y_1d.any():
+ # s = 6
+ # print(
+ # f'x_step_iv:\n{x_step_iv[-s:]}\n'
+ # f'y_step_iv:\n{y_step_iv[-s:]}\n\n'
+ # f'x_1d:\n{x_1d[-s:]}\n'
+ # f'y_1d:\n{y_1d[-s:]}\n'
+ # )
- return x, y, ymn, ymx
+ return x_1d, y_1d, 'all'
diff --git a/piker/ui/_compression.py b/piker/data/_m4.py
similarity index 68%
rename from piker/ui/_compression.py
rename to piker/data/_m4.py
index c66b3e583..8452e0228 100644
--- a/piker/ui/_compression.py
+++ b/piker/data/_m4.py
@@ -15,17 +15,30 @@
# along with this program. If not, see .
'''
-Graphics related downsampling routines for compressing to pixel
-limits on the display device.
+Graphics downsampling using the infamous M4 algorithm.
+
+This is one of ``piker``'s secret weapons allowing us to boss all other
+charting platforms B)
+
+(AND DON'T YOU DARE TAKE THIS CODE WITHOUT CREDIT OR WE'LL SUE UR F#&@* ASS).
+
+NOTES: this method is a so called "visualization driven data
+aggregation" approach. It gives error-free line chart
+downsampling, see
+further scientific paper resources:
+- http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
+- http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf
+
+Details on implementation of this algo are based in,
+https://github.com/pikers/piker/issues/109
'''
import math
from typing import Optional
import numpy as np
-from numpy.lib import recfunctions as rfn
from numba import (
- jit,
+ njit,
# float64, optional, int64,
)
@@ -35,109 +48,6 @@
log = get_logger(__name__)
-def hl2mxmn(ohlc: np.ndarray) -> np.ndarray:
- '''
- Convert a OHLC struct-array containing 'high'/'low' columns
- to a "joined" max/min 1-d array.
-
- '''
- index = ohlc['index']
- hls = ohlc[[
- 'low',
- 'high',
- ]]
-
- mxmn = np.empty(2*hls.size, dtype=np.float64)
- x = np.empty(2*hls.size, dtype=np.float64)
- trace_hl(hls, mxmn, x, index[0])
- x = x + index[0]
-
- return mxmn, x
-
-
-@jit(
- # TODO: the type annots..
- # float64[:](float64[:],),
- nopython=True,
-)
-def trace_hl(
- hl: 'np.ndarray',
- out: np.ndarray,
- x: np.ndarray,
- start: int,
-
- # the "offset" values in the x-domain which
- # place the 2 output points around each ``int``
- # master index.
- margin: float = 0.43,
-
-) -> None:
- '''
- "Trace" the outline of the high-low values of an ohlc sequence
- as a line such that the maximum deviation (aka disperaion) between
- bars if preserved.
-
- This routine is expected to modify input arrays in-place.
-
- '''
- last_l = hl['low'][0]
- last_h = hl['high'][0]
-
- for i in range(hl.size):
- row = hl[i]
- l, h = row['low'], row['high']
-
- up_diff = h - last_l
- down_diff = last_h - l
-
- if up_diff > down_diff:
- out[2*i + 1] = h
- out[2*i] = last_l
- else:
- out[2*i + 1] = l
- out[2*i] = last_h
-
- last_l = l
- last_h = h
-
- x[2*i] = int(i) - margin
- x[2*i + 1] = int(i) + margin
-
- return out
-
-
-def ohlc_flatten(
- ohlc: np.ndarray,
- use_mxmn: bool = True,
-
-) -> tuple[np.ndarray, np.ndarray]:
- '''
- Convert an OHLCV struct-array into a flat ready-for-line-plotting
- 1-d array that is 4 times the size with x-domain values distributed
- evenly (by 0.5 steps) over each index.
-
- '''
- index = ohlc['index']
-
- if use_mxmn:
- # traces a line optimally over highs to lows
- # using numba. NOTE: pretty sure this is faster
- # and looks about the same as the below output.
- flat, x = hl2mxmn(ohlc)
-
- else:
- flat = rfn.structured_to_unstructured(
- ohlc[['open', 'high', 'low', 'close']]
- ).flatten()
-
- x = np.linspace(
- start=index[0] - 0.5,
- stop=index[-1] + 0.5,
- num=len(flat),
- )
- return x, flat
-
-
def ds_m4(
x: np.ndarray,
y: np.ndarray,
@@ -160,16 +70,6 @@ def ds_m4(
This is more or less an OHLC style sampling of a line-style series.
'''
- # NOTE: this method is a so called "visualization driven data
- # aggregation" approach. It gives error-free line chart
- # downsampling, see
- # further scientific paper resources:
- # - http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
- # - http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf
-
- # Details on implementation of this algo are based in,
- # https://github.com/pikers/piker/issues/109
-
# XXX: from infinite on downsampling viewable graphics:
# "one thing i remembered about the binning - if you are
# picking a range within your timeseries the start and end bin
@@ -191,6 +91,14 @@ def ds_m4(
x_end = x[-1] # x end value/highest in domain
xrange = (x_end - x_start)
+ if xrange < 0:
+ log.error(f'-VE M4 X-RANGE: {x_start} -> {x_end}')
+ # XXX: broken x-range calc-case, likely the x-end points
+ # are wrong and have some default value set (such as
+ # x_end -> while x_start -> 0.5).
+ # breakpoint()
+ return None
+
# XXX: always round up on the input pixels
# lnx = len(x)
# uppx *= max(4 / (1 + math.log(uppx, 2)), 1)
@@ -256,8 +164,7 @@ def ds_m4(
return nb, x_out, y_out, ymn, ymx
-@jit(
- nopython=True,
+@njit(
nogil=True,
)
def _m4(
diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py
new file mode 100644
index 000000000..adaed041b
--- /dev/null
+++ b/piker/data/_pathops.py
@@ -0,0 +1,452 @@
+# piker: trading gear for hackers
+# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+"""
+Super fast ``QPainterPath`` generation related operator routines.
+
+"""
+from math import (
+ ceil,
+ floor,
+)
+
+import numpy as np
+from numpy.lib import recfunctions as rfn
+from numba import (
+ # types,
+ njit,
+ float64,
+ int64,
+ # optional,
+)
+
+# TODO: for ``numba`` typing..
+# from ._source import numba_ohlc_dtype
+from ._m4 import ds_m4
+from .._profile import (
+ Profiler,
+ pg_profile_enabled,
+ ms_slower_then,
+)
+
+
+def xy_downsample(
+ x,
+ y,
+ uppx,
+
+ x_spacer: float = 0.5,
+
+) -> tuple[
+ np.ndarray,
+ np.ndarray,
+ float,
+ float,
+]:
+ '''
+ Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input
+ ``uppx`` (units-per-pixel) and add space between discreet datums.
+
+ '''
+ # downsample whenever more then 1 pixels per datum can be shown.
+ # always refresh data bounds until we get diffing
+ # working properly, see above..
+ m4_out = ds_m4(
+ x,
+ y,
+ uppx,
+ )
+
+ if m4_out is not None:
+ bins, x, y, ymn, ymx = m4_out
+ # flatten output to 1d arrays suitable for path-graphics generation.
+ x = np.broadcast_to(x[:, None], y.shape)
+ x = (x + np.array(
+ [-x_spacer, 0, 0, x_spacer]
+ )).flatten()
+ y = y.flatten()
+
+ return x, y, ymn, ymx
+
+ # XXX: we accept a None output for the case where the input range
+ # to ``ds_m4()`` is bad (-ve) and we want to catch and debug
+ # that (seemingly super rare) circumstance..
+ return None
+
+
+@njit(
+ # NOTE: need to construct this manually for readonly
+ # arrays, see https://github.com/numba/numba/issues/4511
+ # (
+ # types.Array(
+ # numba_ohlc_dtype,
+ # 1,
+ # 'C',
+ # readonly=True,
+ # ),
+ # int64,
+ # types.unicode_type,
+ # optional(float64),
+ # ),
+ nogil=True
+)
+def path_arrays_from_ohlc(
+ data: np.ndarray,
+ start: int64,
+ bar_w: float64,
+ bar_gap: float64 = 0.16,
+ use_time_index: bool = True,
+
+ # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622
+ # index_field: str,
+
+) -> tuple[
+ np.ndarray,
+ np.ndarray,
+ np.ndarray,
+]:
+ '''
+ Generate an array of lines objects from input ohlc data.
+
+ '''
+ size = int(data.shape[0] * 6)
+
+ # XXX: see this for why the dtype might have to be defined outside
+ # the routine.
+ # https://github.com/numba/numba/issues/4098#issuecomment-493914533
+ x = np.zeros(
+ shape=size,
+ dtype=float64,
+ )
+ y, c = x.copy(), x.copy()
+
+ half_w: float = bar_w/2
+
+ # TODO: report bug for assert @
+ # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991
+ for i, q in enumerate(data[start:], start):
+
+ open = q['open']
+ high = q['high']
+ low = q['low']
+ close = q['close']
+
+ if use_time_index:
+ index = float64(q['time'])
+ else:
+ index = float64(q['index'])
+
+ # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622
+ # index = float64(q[index_field])
+ # AND this (probably)
+ # open, high, low, close, index = q[
+ # ['open', 'high', 'low', 'close', 'index']]
+
+ istart = i * 6
+ istop = istart + 6
+
+ # x,y detail the 6 points which connect all vertexes of a ohlc bar
+ mid: float = index + half_w
+ x[istart:istop] = (
+ index + bar_gap,
+ mid,
+ mid,
+ mid,
+ mid,
+ index + bar_w - bar_gap,
+ )
+ y[istart:istop] = (
+ open,
+ open,
+ low,
+ high,
+ close,
+ close,
+ )
+
+ # specifies that the first edge is never connected to the
+ # prior bars last edge thus providing a small "gap"/"space"
+ # between bars determined by ``bar_gap``.
+ c[istart:istop] = (1, 1, 1, 1, 1, 0)
+
+ return x, y, c
+
+
+def hl2mxmn(
+ ohlc: np.ndarray,
+ index_field: str = 'index',
+
+) -> np.ndarray:
+ '''
+ Convert a OHLC struct-array containing 'high'/'low' columns
+ to a "joined" max/min 1-d array.
+
+ '''
+ index = ohlc[index_field]
+ hls = ohlc[[
+ 'low',
+ 'high',
+ ]]
+
+ mxmn = np.empty(2*hls.size, dtype=np.float64)
+ x = np.empty(2*hls.size, dtype=np.float64)
+ trace_hl(hls, mxmn, x, index[0])
+ x = x + index[0]
+
+ return mxmn, x
+
+
+@njit(
+ # TODO: the type annots..
+ # float64[:](float64[:],),
+)
+def trace_hl(
+ hl: 'np.ndarray',
+ out: np.ndarray,
+ x: np.ndarray,
+ start: int,
+
+ # the "offset" values in the x-domain which
+ # place the 2 output points around each ``int``
+ # master index.
+ margin: float = 0.43,
+
+) -> None:
+ '''
+ "Trace" the outline of the high-low values of an ohlc sequence
+ as a line such that the maximum deviation (aka disperaion) between
+ bars if preserved.
+
+ This routine is expected to modify input arrays in-place.
+
+ '''
+ last_l = hl['low'][0]
+ last_h = hl['high'][0]
+
+ for i in range(hl.size):
+ row = hl[i]
+ l, h = row['low'], row['high']
+
+ up_diff = h - last_l
+ down_diff = last_h - l
+
+ if up_diff > down_diff:
+ out[2*i + 1] = h
+ out[2*i] = last_l
+ else:
+ out[2*i + 1] = l
+ out[2*i] = last_h
+
+ last_l = l
+ last_h = h
+
+ x[2*i] = int(i) - margin
+ x[2*i + 1] = int(i) + margin
+
+ return out
+
+
+def ohlc_flatten(
+ ohlc: np.ndarray,
+ use_mxmn: bool = True,
+ index_field: str = 'index',
+
+) -> tuple[np.ndarray, np.ndarray]:
+ '''
+ Convert an OHLCV struct-array into a flat ready-for-line-plotting
+ 1-d array that is 4 times the size with x-domain values distributed
+ evenly (by 0.5 steps) over each index.
+
+ '''
+ index = ohlc[index_field]
+
+ if use_mxmn:
+ # traces a line optimally over highs to lows
+ # using numba. NOTE: pretty sure this is faster
+ # and looks about the same as the below output.
+ flat, x = hl2mxmn(ohlc)
+
+ else:
+ flat = rfn.structured_to_unstructured(
+ ohlc[['open', 'high', 'low', 'close']]
+ ).flatten()
+
+ x = np.linspace(
+ start=index[0] - 0.5,
+ stop=index[-1] + 0.5,
+ num=len(flat),
+ )
+ return x, flat
+
+
+def slice_from_time(
+ arr: np.ndarray,
+ start_t: float,
+ stop_t: float,
+ step: int | None = None,
+
+) -> slice:
+ '''
+ Calculate array indices mapped from a time range and return them in
+ a slice.
+
+ Given an input array with an epoch `'time'` series entry, calculate
+ the indices which span the time range and return in a slice. Presume
+ each `'time'` step increment is uniform and when the time stamp
+ series contains gaps (the uniform presumption is untrue) use
+ ``np.searchsorted()`` binary search to look up the appropriate
+ index.
+
+ '''
+ profiler = Profiler(
+ msg='slice_from_time()',
+ disabled=not pg_profile_enabled(),
+ ms_threshold=ms_slower_then,
+ )
+
+ times = arr['time']
+ t_first = floor(times[0])
+ t_last = ceil(times[-1])
+
+ # the greatest index we can return which slices to the
+ # end of the input array.
+ read_i_max = arr.shape[0]
+
+ # TODO: require this is always passed in?
+ if step is None:
+ step = round(t_last - times[-2])
+ if step == 0:
+ step = 1
+
+ # compute (presumed) uniform-time-step index offsets
+ i_start_t = floor(start_t)
+ read_i_start = floor(((i_start_t - t_first) // step)) - 1
+
+ i_stop_t = ceil(stop_t)
+
+ # XXX: edge case -> always set stop index to last in array whenever
+ # the input stop time is detected to be greater then the equiv time
+ # stamp at that last entry.
+ if i_stop_t >= t_last:
+ read_i_stop = read_i_max
+ else:
+ read_i_stop = ceil((i_stop_t - t_first) // step) + 1
+
+ # always clip outputs to array support
+ # for read start:
+ # - never allow a start < the 0 index
+ # - never allow an end index > the read array len
+ read_i_start = min(
+ max(0, read_i_start),
+ read_i_max - 1,
+ )
+ read_i_stop = max(
+ 0,
+ min(read_i_stop, read_i_max),
+ )
+
+ # check for larger-then-latest calculated index for given start
+ # time, in which case we do a binary search for the correct index.
+ # NOTE: this is usually the result of a time series with time gaps
+ # where it is expected that each index step maps to a uniform step
+ # in the time stamp series.
+ t_iv_start = times[read_i_start]
+ if (
+ t_iv_start > i_start_t
+ ):
+ # do a binary search for the best index mapping to ``start_t``
+ # given we measured an overshoot using the uniform-time-step
+ # calculation from above.
+
+ # TODO: once we start caching these per source-array,
+ # we can just overwrite ``read_i_start`` directly.
+ new_read_i_start = np.searchsorted(
+ times,
+ i_start_t,
+ side='left',
+ )
+
+ # TODO: minimize binary search work as much as possible:
+ # - cache these remap values which compensate for gaps in the
+ # uniform time step basis where we calc a later start
+ # index for the given input ``start_t``.
+ # - can we shorten the input search sequence by heuristic?
+ # up_to_arith_start = index[:read_i_start]
+
+ if (
+ new_read_i_start <= read_i_start
+ ):
+ # t_diff = t_iv_start - start_t
+ # print(
+ # f"WE'RE CUTTING OUT TIME - STEP:{step}\n"
+ # f'start_t:{start_t} -> 0index start_t:{t_iv_start}\n'
+ # f'diff: {t_diff}\n'
+ # f'REMAPPED START i: {read_i_start} -> {new_read_i_start}\n'
+ # )
+ read_i_start = new_read_i_start - 1
+
+ t_iv_stop = times[read_i_stop - 1]
+ if (
+ t_iv_stop > i_stop_t
+ ):
+ # t_diff = stop_t - t_iv_stop
+ # print(
+ # f"WE'RE CUTTING OUT TIME - STEP:{step}\n"
+ # f'calced iv stop:{t_iv_stop} -> stop_t:{stop_t}\n'
+ # f'diff: {t_diff}\n'
+ # # f'SHOULD REMAP STOP: {read_i_start} -> {new_read_i_start}\n'
+ # )
+ new_read_i_stop = np.searchsorted(
+ times[read_i_start:],
+ # times,
+ i_stop_t,
+ side='left',
+ )
+
+ if (
+ new_read_i_stop <= read_i_stop
+ ):
+ read_i_stop = read_i_start + new_read_i_stop + 1
+
+ # sanity checks for range size
+ # samples = (i_stop_t - i_start_t) // step
+ # index_diff = read_i_stop - read_i_start + 1
+ # if index_diff > (samples + 3):
+ # breakpoint()
+
+ # read-relative indexes: gives a slice where `shm.array[read_slc]`
+ # will be the data spanning the input time range `start_t` ->
+ # `stop_t`
+ read_slc = slice(
+ int(read_i_start),
+ int(read_i_stop),
+ )
+
+ profiler(
+ 'slicing complete'
+ # f'{start_t} -> {abs_slc.start} | {read_slc.start}\n'
+ # f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n'
+ )
+
+ # NOTE: if caller needs absolute buffer indices they can
+ # slice the buffer abs index like so:
+ # index = arr['index']
+ # abs_indx = index[read_slc]
+ # abs_slc = slice(
+ # int(abs_indx[0]),
+ # int(abs_indx[-1]),
+ # )
+
+ return read_slc
diff --git a/piker/data/_sampling.py b/piker/data/_sampling.py
index a5df96cca..20067f826 100644
--- a/piker/data/_sampling.py
+++ b/piker/data/_sampling.py
@@ -253,20 +253,30 @@ async def broadcast(
# f'consumers: {subs}'
)
borked: set[tractor.MsgStream] = set()
- for stream in subs:
+ sent: set[tractor.MsgStream] = set()
+ while True:
try:
- await stream.send({
- 'index': time_stamp or last_ts,
- 'period': period_s,
- })
- except (
- trio.BrokenResourceError,
- trio.ClosedResourceError
- ):
- log.error(
- f'{stream._ctx.chan.uid} dropped connection'
- )
- borked.add(stream)
+ for stream in (subs - sent):
+ try:
+ await stream.send({
+ 'index': time_stamp or last_ts,
+ 'period': period_s,
+ })
+ sent.add(stream)
+
+ except (
+ trio.BrokenResourceError,
+ trio.ClosedResourceError
+ ):
+ log.error(
+ f'{stream._ctx.chan.uid} dropped connection'
+ )
+ borked.add(stream)
+ else:
+ break
+ except RuntimeError:
+ log.warning(f'Client subs {subs} changed while broadcasting')
+ continue
for stream in borked:
try:
@@ -848,6 +858,16 @@ async def uniform_rate_send(
# rate timing exactly lul
try:
await stream.send({sym: first_quote})
+ except tractor.RemoteActorError as rme:
+ if rme.type is not tractor._exceptions.StreamOverrun:
+ raise
+ ctx = stream._ctx
+ chan = ctx.chan
+ log.warning(
+ 'Throttled quote-stream overrun!\n'
+ f'{sym}:{ctx.cid}@{chan.uid}'
+ )
+
except (
# NOTE: any of these can be raised by ``tractor``'s IPC
# transport-layer and we want to be highly resilient
diff --git a/piker/data/feed.py b/piker/data/feed.py
index 534aebc9c..156913aa6 100644
--- a/piker/data/feed.py
+++ b/piker/data/feed.py
@@ -207,7 +207,7 @@ def get_feed_bus(
) -> _FeedsBus:
'''
- Retreive broker-daemon-local data feeds bus from process global
+ Retrieve broker-daemon-local data feeds bus from process global
scope. Serialize task access to lock.
'''
@@ -250,6 +250,7 @@ async def start_backfill(
shm: ShmArray,
timeframe: float,
sampler_stream: tractor.MsgStream,
+ feed_is_live: trio.Event,
last_tsdb_dt: Optional[datetime] = None,
storage: Optional[Storage] = None,
@@ -281,7 +282,14 @@ async def start_backfill(
- pendulum.from_timestamp(times[-2])
).seconds
- if step_size_s == 60:
+ # if the market is open (aka we have a live feed) but the
+ # history sample step index seems off we report the surrounding
+ # data and drop into a bp. this case shouldn't really ever
+ # happen if we're doing history retrieval correctly.
+ if (
+ step_size_s == 60
+ and feed_is_live.is_set()
+ ):
inow = round(time.time())
diff = inow - times[-1]
if abs(diff) > 60:
@@ -499,6 +507,7 @@ async def basic_backfill(
bfqsn: str,
shms: dict[int, ShmArray],
sampler_stream: tractor.MsgStream,
+ feed_is_live: trio.Event,
) -> None:
@@ -518,6 +527,7 @@ async def basic_backfill(
shm,
timeframe,
sampler_stream,
+ feed_is_live,
)
)
except DataUnavailable:
@@ -534,6 +544,7 @@ async def tsdb_backfill(
bfqsn: str,
shms: dict[int, ShmArray],
sampler_stream: tractor.MsgStream,
+ feed_is_live: trio.Event,
task_status: TaskStatus[
tuple[ShmArray, ShmArray]
@@ -568,6 +579,8 @@ async def tsdb_backfill(
shm,
timeframe,
sampler_stream,
+ feed_is_live,
+
last_tsdb_dt=last_tsdb_dt,
tsdb_is_up=True,
storage=storage,
@@ -870,6 +883,7 @@ async def manage_history(
60: hist_shm,
},
sample_stream,
+ feed_is_live,
)
# yield back after client connect with filled shm
@@ -904,6 +918,7 @@ async def manage_history(
60: hist_shm,
},
sample_stream,
+ feed_is_live,
)
task_status.started((
hist_zero_index,
@@ -1037,12 +1052,11 @@ async def allocate_persistent_feed(
flume = Flume(
symbol=symbol,
- _hist_shm_token=hist_shm.token,
- _rt_shm_token=rt_shm.token,
first_quote=first_quote,
+ _rt_shm_token=rt_shm.token,
+ _hist_shm_token=hist_shm.token,
izero_hist=izero_hist,
izero_rt=izero_rt,
- # throttle_rate=tick_throttle,
)
# for ambiguous names we simply apply the retreived
@@ -1066,7 +1080,10 @@ async def allocate_persistent_feed(
# seed the buffer with a history datum - this is most handy
# for many backends which don't sample @ 1s OHLC but do have
# slower data such as 1m OHLC.
- if not len(rt_shm.array):
+ if (
+ not len(rt_shm.array)
+ and hist_shm.array.size
+ ):
rt_shm.push(hist_shm.array[-3:-1])
ohlckeys = ['open', 'high', 'low', 'close']
rt_shm.array[ohlckeys][-2:] = hist_shm.array['close'][-1]
@@ -1077,6 +1094,9 @@ async def allocate_persistent_feed(
rt_shm.array['time'][0] = ts
rt_shm.array['time'][1] = ts + 1
+ elif hist_shm.array.size == 0:
+ await tractor.breakpoint()
+
# wait the spawning parent task to register its subscriber
# send-stream entry before we start the sample loop.
await sub_registered.wait()
@@ -1569,6 +1589,9 @@ async def open_feed(
(brokermod, bfqsns),
) in zip(ctxs, providers.items()):
+ # NOTE: do it asap to avoid overruns during multi-feed setup?
+ ctx._backpressure = backpressure
+
for fqsn, flume_msg in flumes_msg_dict.items():
flume = Flume.from_msg(flume_msg)
assert flume.symbol.fqsn == fqsn
diff --git a/piker/data/flows.py b/piker/data/flows.py
index 9bb272308..9d8b3103c 100644
--- a/piker/data/flows.py
+++ b/piker/data/flows.py
@@ -22,17 +22,11 @@
"""
from __future__ import annotations
-from contextlib import asynccontextmanager as acm
-from functools import partial
from typing import (
- AsyncIterator,
TYPE_CHECKING,
)
import tractor
-from tractor.trionics import (
- maybe_open_context,
-)
import pendulum
import numpy as np
@@ -45,12 +39,13 @@
ShmArray,
_Token,
)
-from ._sampling import (
- open_sample_stream,
-)
+# from .._profile import (
+# Profiler,
+# pg_profile_enabled,
+# )
if TYPE_CHECKING:
- from pyqtgraph import PlotItem
+ # from pyqtgraph import PlotItem
from .feed import Feed
@@ -147,26 +142,6 @@ def hist_shm(self) -> ShmArray:
async def receive(self) -> dict:
return await self.stream.receive()
- @acm
- async def index_stream(
- self,
- delay_s: float = 1,
-
- ) -> AsyncIterator[int]:
-
- if not self.feed:
- raise RuntimeError('This flume is not part of any ``Feed``?')
-
- # TODO: maybe a public (property) API for this in ``tractor``?
- portal = self.stream._ctx._portal
- assert portal
-
- # XXX: this should be singleton on a host,
- # a lone broker-daemon per provider should be
- # created for all practical purposes
- async with open_sample_stream(float(delay_s)) as stream:
- yield stream
-
def get_ds_info(
self,
) -> tuple[float, float, float]:
@@ -218,104 +193,18 @@ def from_msg(cls, msg: dict) -> dict:
def get_index(
self,
time_s: float,
+ array: np.ndarray,
- ) -> int:
+ ) -> int | float:
'''
Return array shm-buffer index for for epoch time.
'''
- array = self.rt_shm.array
times = array['time']
- mask = (times >= time_s)
-
- if any(mask):
- return array['index'][mask][0]
-
- # just the latest index
- array['index'][-1]
-
- def slice_from_time(
- self,
- array: np.ndarray,
- start_t: float,
- stop_t: float,
- timeframe_s: int = 1,
- return_data: bool = False,
-
- ) -> np.ndarray:
- '''
- Slice an input struct array providing only datums
- "in view" of this chart.
-
- '''
- arr = {
- 1: self.rt_shm.array,
- 60: self.hist_shm.arry,
- }[timeframe_s]
-
- times = arr['time']
- index = array['index']
-
- # use advanced indexing to map the
- # time range to the index range.
- mask = (
- (times >= start_t)
- &
- (times < stop_t)
- )
-
- # TODO: if we can ensure each time field has a uniform
- # step we can instead do some arithmetic to determine
- # the equivalent index like we used to?
- # return array[
- # lbar - ifirst:
- # (rbar - ifirst) + 1
- # ]
-
- i_by_t = index[mask]
- i_0 = i_by_t[0]
-
- abs_slc = slice(
- i_0,
- i_by_t[-1],
- )
- # slice data by offset from the first index
- # available in the passed datum set.
- read_slc = slice(
- 0,
- i_by_t[-1] - i_0,
- )
- if not return_data:
- return (
- abs_slc,
- read_slc,
- )
-
- # also return the readable data from the timerange
- return (
- abs_slc,
- read_slc,
- arr[mask],
- )
-
- def view_data(
- self,
- plot: PlotItem,
- timeframe_s: int = 1,
-
- ) -> np.ndarray:
-
- # get far-side x-indices plot view
- vr = plot.viewRect()
-
- (
- abs_slc,
- buf_slc,
- iv_arr,
- ) = self.slice_from_time(
- start_t=vr.left(),
- stop_t=vr.right(),
- timeframe_s=timeframe_s,
- return_data=True,
+ first = np.searchsorted(
+ times,
+ time_s,
+ side='left',
)
- return iv_arr
+ imx = times.shape[0] - 1
+ return min(first, imx)
diff --git a/piker/fsp/_engine.py b/piker/fsp/_engine.py
index a78308a40..37852cfc9 100644
--- a/piker/fsp/_engine.py
+++ b/piker/fsp/_engine.py
@@ -188,6 +188,8 @@ async def fsp_compute(
history_by_field['time'] = src_time[-len(history_by_field):]
+ history_output['time'] = src.array['time']
+
# TODO: XXX:
# THERE'S A BIG BUG HERE WITH THE `index` field since we're
# prepending a copy of the first value a few times to make
diff --git a/piker/pp.py b/piker/pp.py
index 48cf0daa1..6c5a60d8b 100644
--- a/piker/pp.py
+++ b/piker/pp.py
@@ -54,7 +54,7 @@ def open_trade_ledger(
broker: str,
account: str,
-) -> str:
+) -> dict:
'''
Indempotently create and read in a trade log file from the
``/ledgers/`` directory.
diff --git a/piker/ui/_app.py b/piker/ui/_app.py
index 23a9d2ede..3be073e79 100644
--- a/piker/ui/_app.py
+++ b/piker/ui/_app.py
@@ -118,17 +118,10 @@ async def _async_main(
# godwidget.hbox.addWidget(search)
godwidget.search = search
- symbols: list[str] = []
-
- for sym in syms:
- symbol, _, provider = sym.rpartition('.')
- symbols.append(symbol)
-
# this internally starts a ``display_symbol_data()`` task above
order_mode_ready = await godwidget.load_symbols(
- provider,
- symbols,
- loglevel
+ fqsns=syms,
+ loglevel=loglevel,
)
# spin up a search engine for the local cached symbol set
@@ -185,8 +178,7 @@ def _main(
tractor_kwargs,
) -> None:
'''
- Sync entry point to start a chart: a ``tractor`` + Qt runtime
- entry point
+ Sync entry point to start a chart: a ``tractor`` + Qt runtime.
'''
run_qtractor(
diff --git a/piker/ui/_axes.py b/piker/ui/_axes.py
index 3ed5b4205..b6fb92819 100644
--- a/piker/ui/_axes.py
+++ b/piker/ui/_axes.py
@@ -18,6 +18,7 @@
Chart axes graphics and behavior.
"""
+from __future__ import annotations
from functools import lru_cache
from typing import Optional, Callable
from math import floor
@@ -27,6 +28,7 @@
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QPointF
+from . import _pg_overrides as pgo
from ..data._source import float_digits
from ._label import Label
from ._style import DpiAwareFont, hcolor, _font
@@ -46,8 +48,8 @@ class Axis(pg.AxisItem):
'''
def __init__(
self,
- linkedsplits,
- typical_max_str: str = '100 000.000',
+ plotitem: pgo.PlotItem,
+ typical_max_str: str = '100 000.000 ',
text_color: str = 'bracket',
lru_cache_tick_strings: bool = True,
**kwargs
@@ -61,36 +63,42 @@ def __init__(
# XXX: pretty sure this makes things slower
# self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
- self.linkedsplits = linkedsplits
+ self.pi = plotitem
self._dpi_font = _font
self.setTickFont(_font.font)
font_size = self._dpi_font.font.pixelSize()
+ style_conf = {
+ 'textFillLimits': [(0, 0.5)],
+ 'tickFont': self._dpi_font.font,
+
+ }
+ text_offset = None
if self.orientation in ('bottom',):
text_offset = floor(0.25 * font_size)
elif self.orientation in ('left', 'right'):
text_offset = floor(font_size / 2)
- self.setStyle(**{
- 'textFillLimits': [(0, 0.5)],
- 'tickFont': self._dpi_font.font,
-
- # offset of text *away from* axis line in px
- # use approx. half the font pixel size (height)
- 'tickTextOffset': text_offset,
- })
+ if text_offset:
+ style_conf.update({
+ # offset of text *away from* axis line in px
+ # use approx. half the font pixel size (height)
+ 'tickTextOffset': text_offset,
+ })
+ self.setStyle(**style_conf)
self.setTickFont(_font.font)
# NOTE: this is for surrounding "border"
self.setPen(_axis_pen)
# this is the text color
- # self.setTextPen(pg.mkPen(hcolor(text_color)))
self.text_color = text_color
+ # generate a bounding rect based on sizing to a "typical"
+ # maximum length-ed string defined as init default.
self.typical_br = _font._qfm.boundingRect(typical_max_str)
# size the pertinent axis dimension to a "typical value"
@@ -102,6 +110,9 @@ def __init__(
maxsize=2**20
)(self.tickStrings)
+ # axis "sticky" labels
+ self._stickies: dict[str, YAxisLabel] = {}
+
# NOTE: only overriden to cast tick values entries into tuples
# for use with the lru caching.
def tickValues(
@@ -139,6 +150,38 @@ def size_to_values(self) -> None:
def txt_offsets(self) -> tuple[int, int]:
return tuple(self.style['tickTextOffset'])
+ def add_sticky(
+ self,
+ pi: pgo.PlotItem,
+ name: None | str = None,
+ digits: None | int = 2,
+ bg_color='default',
+ fg_color='black',
+
+ ) -> YAxisLabel:
+
+ # if the sticky is for our symbol
+ # use the tick size precision for display
+ name = name or pi.name
+ digits = digits or 2
+
+ # TODO: ``._ysticks`` should really be an attr on each
+ # ``PlotItem`` now instead of the containing widget (because of
+ # overlays) ?
+
+ # add y-axis "last" value label
+ sticky = self._stickies[name] = YAxisLabel(
+ pi=pi,
+ parent=self,
+ digits=digits, # TODO: pass this from symbol data
+ opacity=0.9, # slight see-through
+ bg_color=bg_color,
+ fg_color=fg_color,
+ )
+
+ pi.sigRangeChanged.connect(sticky.update_on_resize)
+ return sticky
+
class PriceAxis(Axis):
@@ -200,7 +243,6 @@ def set_min_tick(
self._min_tick = size
def size_to_values(self) -> None:
- # self.typical_br = _font._qfm.boundingRect(typical_max_str)
self.setWidth(self.typical_br.width())
# XXX: drop for now since it just eats up h space
@@ -255,28 +297,50 @@ def _indexes_to_timestrs(
) -> list[str]:
- chart = self.linkedsplits.chart
- flow = chart._flows[chart.name]
- shm = flow.shm
- bars = shm.array
- first = shm._first.value
-
- bars_len = len(bars)
- times = bars['time']
-
- epochs = times[list(
- map(
- int,
- filter(
- lambda i: i > 0 and i < bars_len,
- (i-first for i in indexes)
+ # XX: ARGGGGG AG:LKSKDJF:LKJSDFD
+ chart = self.pi.chart_widget
+
+ viz = chart._vizs[chart.name]
+ shm = viz.shm
+ array = shm.array
+ times = array['time']
+ i_0, i_l = times[0], times[-1]
+
+ # edge cases
+ if (
+ not indexes
+ or
+ (indexes[0] < i_0
+ and indexes[-1] < i_l)
+ or
+ (indexes[0] > i_0
+ and indexes[-1] > i_l)
+ ):
+ return []
+
+ if viz.index_field == 'index':
+ arr_len = times.shape[0]
+ first = shm._first.value
+ epochs = times[
+ list(
+ map(
+ int,
+ filter(
+ lambda i: i > 0 and i < arr_len,
+ (i - first for i in indexes)
+ )
+ )
)
- )
- )]
+ ]
+ else:
+ epochs = list(map(int, indexes))
# TODO: **don't** have this hard coded shift to EST
# delay = times[-1] - times[-2]
- dts = np.array(epochs, dtype='datetime64[s]')
+ dts = np.array(
+ epochs,
+ dtype='datetime64[s]',
+ )
# see units listing:
# https://numpy.org/devdocs/reference/arrays.datetime.html#datetime-units
@@ -294,24 +358,39 @@ def tickStrings(
spacing: float,
) -> list[str]:
+
+ return self._indexes_to_timestrs(values)
+
+ # NOTE: handy for debugging the lru cache
# info = self.tickStrings.cache_info()
# print(info)
- return self._indexes_to_timestrs(values)
class AxisLabel(pg.GraphicsObject):
- _x_margin = 0
- _y_margin = 0
+ # relative offsets *OF* the bounding rect relative
+ # to parent graphics object.
+ # eg. | => <_x_br_offset> => | |
+ _x_br_offset: float = 0
+ _y_br_offset: float = 0
+
+ # relative offsets of text *within* bounding rect
+ # eg. | <_x_margin> => |
+ _x_margin: float = 0
+ _y_margin: float = 0
+
+ # multiplier of the text content's height in order
+ # to force a larger (y-dimension) bounding rect.
+ _y_txt_h_scaling: float = 1
def __init__(
self,
parent: pg.GraphicsItem,
digits: int = 2,
- bg_color: str = 'bracket',
+ bg_color: str = 'default',
fg_color: str = 'black',
- opacity: int = 1, # XXX: seriously don't set this to 0
+ opacity: int = .8, # XXX: seriously don't set this to 0
font_size: str = 'default',
use_arrow: bool = True,
@@ -322,6 +401,7 @@ def __init__(
self.setParentItem(parent)
self.setFlag(self.ItemIgnoresTransformations)
+ self.setZValue(100)
# XXX: pretty sure this is faster
self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
@@ -353,14 +433,14 @@ def paint(
p: QtGui.QPainter,
opt: QtWidgets.QStyleOptionGraphicsItem,
w: QtWidgets.QWidget
+
) -> None:
- """Draw a filled rectangle based on the size of ``.label_str`` text.
+ '''
+ Draw a filled rectangle based on the size of ``.label_str`` text.
Subtypes can customize further by overloading ``.draw()``.
- """
- # p.setCompositionMode(QtWidgets.QPainter.CompositionMode_SourceOver)
-
+ '''
if self.label_str:
# if not self.rect:
@@ -371,7 +451,11 @@ def paint(
p.setFont(self._dpifont.font)
p.setPen(self.fg_color)
- p.drawText(self.rect, self.text_flags, self.label_str)
+ p.drawText(
+ self.rect,
+ self.text_flags,
+ self.label_str,
+ )
def draw(
self,
@@ -379,6 +463,8 @@ def draw(
rect: QtCore.QRectF
) -> None:
+ p.setOpacity(self.opacity)
+
if self._use_arrow:
if not self.path:
self._draw_arrow_path()
@@ -386,15 +472,13 @@ def draw(
p.drawPath(self.path)
p.fillPath(self.path, pg.mkBrush(self.bg_color))
- # this adds a nice black outline around the label for some odd
- # reason; ok by us
- p.setOpacity(self.opacity)
-
# this cause the L1 labels to glitch out if used in the subtype
# and it will leave a small black strip with the arrow path if
# done before the above
- p.fillRect(self.rect, self.bg_color)
-
+ p.fillRect(
+ self.rect,
+ self.bg_color,
+ )
def boundingRect(self): # noqa
'''
@@ -438,15 +522,18 @@ def _size_br_from_str(
txt_h, txt_w = txt_br.height(), txt_br.width()
# print(f'wsw: {self._dpifont.boundingRect(" ")}')
- # allow subtypes to specify a static width and height
+ # allow subtypes to override width and height
h, w = self.size_hint()
- # print(f'axis size: {self._parent.size()}')
- # print(f'axis geo: {self._parent.geometry()}')
self.rect = QtCore.QRectF(
- 0, 0,
+
+ # relative bounds offsets
+ self._x_br_offset,
+ self._y_br_offset,
+
(w or txt_w) + self._x_margin / 2,
- (h or txt_h) + self._y_margin / 2,
+
+ (h or txt_h) * self._y_txt_h_scaling + (self._y_margin / 2),
)
# print(self.rect)
# hb = self.path.controlPointRect()
@@ -522,7 +609,7 @@ def _draw_arrow_path(self):
class YAxisLabel(AxisLabel):
- _y_margin = 4
+ _y_margin: int = 4
text_flags = (
QtCore.Qt.AlignLeft
@@ -533,19 +620,19 @@ class YAxisLabel(AxisLabel):
def __init__(
self,
- chart,
+ pi: pgo.PlotItem,
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
- self._chart = chart
-
- chart.sigRangeChanged.connect(self.update_on_resize)
+ self._pi = pi
+ pi.sigRangeChanged.connect(self.update_on_resize)
self._last_datum = (None, None)
+ self.x_offset = 0
# pull text offset from axis from parent axis
if getattr(self._parent, 'txt_offsets', False):
self.x_offset, y_offset = self._parent.txt_offsets()
@@ -564,7 +651,8 @@ def update_label(
value: float, # data for text
# on odd dimension and/or adds nice black line
- x_offset: Optional[int] = None
+ x_offset: int = 0,
+
) -> None:
# this is read inside ``.paint()``
@@ -610,7 +698,7 @@ def update_from_data(
self._last_datum = (index, value)
self.update_label(
- self._chart.mapFromView(QPointF(index, value)),
+ self._pi.mapFromView(QPointF(index, value)),
value
)
diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py
index bad82544a..96187bf2b 100644
--- a/piker/ui/_chart.py
+++ b/piker/ui/_chart.py
@@ -38,21 +38,18 @@
QVBoxLayout,
QSplitter,
)
-import numpy as np
import pyqtgraph as pg
import trio
from ._axes import (
DynamicDateAxis,
PriceAxis,
- YAxisLabel,
)
from ._cursor import (
Cursor,
ContentsLabel,
)
from ..data._sharedmem import ShmArray
-from ._l1 import L1Labels
from ._ohlc import BarItems
from ._curve import (
Curve,
@@ -62,19 +59,20 @@
hcolor,
CHART_MARGINS,
_xaxis_at,
- _min_points_to_show,
+ # _min_points_to_show,
+)
+from ..data.feed import (
+ Feed,
+ Flume,
)
-from ..data.feed import Feed
from ..data._source import Symbol
from ..log import get_logger
from ._interaction import ChartView
from ._forms import FieldsForm
-from .._profile import pg_profile_enabled, ms_slower_then
from ._overlay import PlotItemOverlay
-from ._flows import Flow
+from ._dataviz import Viz
from ._search import SearchWidget
from . import _pg_overrides as pgo
-from .._profile import Profiler
if TYPE_CHECKING:
from ._display import DisplayState
@@ -126,7 +124,10 @@ def __init__(
# self.init_strategy_ui()
# self.vbox.addLayout(self.hbox)
- self._chart_cache: dict[str, LinkedSplits] = {}
+ self._chart_cache: dict[
+ str,
+ tuple[LinkedSplits, LinkedSplits],
+ ] = {}
self.hist_linked: Optional[LinkedSplits] = None
self.rt_linked: Optional[LinkedSplits] = None
@@ -146,40 +147,23 @@ def __init__(
def linkedsplits(self) -> LinkedSplits:
return self.rt_linked
- # def init_timeframes_ui(self):
- # self.tf_layout = QHBoxLayout()
- # self.tf_layout.setSpacing(0)
- # self.tf_layout.setContentsMargins(0, 12, 0, 0)
- # time_frames = ('1M', '5M', '15M', '30M', '1H', '1D', '1W', 'MN')
- # btn_prefix = 'TF'
-
- # for tf in time_frames:
- # btn_name = ''.join([btn_prefix, tf])
- # btn = QtWidgets.QPushButton(tf)
- # # TODO:
- # btn.setEnabled(False)
- # setattr(self, btn_name, btn)
- # self.tf_layout.addWidget(btn)
-
- # self.toolbar_layout.addLayout(self.tf_layout)
-
# XXX: strat loader/saver that we don't need yet.
# def init_strategy_ui(self):
# self.strategy_box = StrategyBoxWidget(self)
# self.toolbar_layout.addWidget(self.strategy_box)
- def set_chart_symbol(
+ def set_chart_symbols(
self,
- symbol_key: str, # of form .
+ group_key: tuple[str], # of form .
all_linked: tuple[LinkedSplits, LinkedSplits], # type: ignore
) -> None:
# re-sort org cache symbol list in LIFO order
cache = self._chart_cache
- cache.pop(symbol_key, None)
- cache[symbol_key] = all_linked
+ cache.pop(group_key, None)
+ cache[group_key] = all_linked
- def get_chart_symbol(
+ def get_chart_symbols(
self,
symbol_key: str,
@@ -188,8 +172,7 @@ def get_chart_symbol(
async def load_symbols(
self,
- providername: str,
- symbol_keys: list[str],
+ fqsns: list[str],
loglevel: str,
reset: bool = False,
@@ -200,20 +183,11 @@ async def load_symbols(
Expects a ``numpy`` structured array containing all the ohlcv fields.
'''
- fqsns: list[str] = []
-
- # our symbol key style is always lower case
- for key in list(map(str.lower, symbol_keys)):
-
- # fully qualified symbol name (SNS i guess is what we're making?)
- fqsn = '.'.join([key, providername])
- fqsns.append(fqsn)
-
# NOTE: for now we use the first symbol in the set as the "key"
# for the overlay of feeds on the chart.
- group_key = fqsns[0]
+ group_key: tuple[str] = tuple(fqsns)
- all_linked = self.get_chart_symbol(group_key)
+ all_linked = self.get_chart_symbols(group_key)
order_mode_started = trio.Event()
if not self.vbox.isEmpty():
@@ -245,7 +219,6 @@ async def load_symbols(
self._root_n.start_soon(
display_symbol_data,
self,
- providername,
fqsns,
loglevel,
order_mode_started,
@@ -253,8 +226,8 @@ async def load_symbols(
# self.vbox.addWidget(hist_charts)
self.vbox.addWidget(rt_charts)
- self.set_chart_symbol(
- fqsn,
+ self.set_chart_symbols(
+ group_key,
(hist_charts, rt_charts),
)
@@ -495,7 +468,11 @@ def graphics_cycle(self, **kwargs) -> None:
from . import _display
ds = self.display_state
if ds:
- return _display.graphics_update_cycle(ds, **kwargs)
+ return _display.graphics_update_cycle(
+ ds,
+ ds.quotes,
+ **kwargs,
+ )
@property
def symbol(self) -> Symbol:
@@ -546,9 +523,12 @@ def plot_ohlc_main(
symbol: Symbol,
shm: ShmArray,
+ flume: Flume,
sidepane: FieldsForm,
- style: str = 'bar',
+ style: str = 'ohlc_bar',
+
+ **add_plot_kwargs,
) -> ChartPlotWidget:
'''
@@ -568,13 +548,13 @@ def plot_ohlc_main(
# be no distinction since we will have multiple symbols per
# view as part of "aggregate feeds".
self.chart = self.add_plot(
-
- name=symbol.key,
+ name=symbol.fqsn,
shm=shm,
+ flume=flume,
style=style,
_is_main=True,
-
sidepane=sidepane,
+ **add_plot_kwargs,
)
# add crosshair graphic
self.chart.addItem(self.cursor)
@@ -592,12 +572,14 @@ def add_plot(
name: str,
shm: ShmArray,
+ flume: Flume,
array_key: Optional[str] = None,
style: str = 'line',
_is_main: bool = False,
sidepane: Optional[QWidget] = None,
+ draw_kwargs: dict = {},
**cpw_kwargs,
@@ -615,12 +597,13 @@ def add_plot(
# TODO: we gotta possibly assign this back
# to the last subplot on removal of some last subplot
xaxis = DynamicDateAxis(
+ None,
orientation='bottom',
linkedsplits=self
)
axes = {
- 'right': PriceAxis(linkedsplits=self, orientation='right'),
- 'left': PriceAxis(linkedsplits=self, orientation='left'),
+ 'right': PriceAxis(None, orientation='right'),
+ 'left': PriceAxis(None, orientation='left'),
'bottom': xaxis,
}
@@ -645,11 +628,18 @@ def add_plot(
axisItems=axes,
**cpw_kwargs,
)
+ # TODO: wow i can't believe how confusing garbage all this axes
+ # stuff iss..
+ for axis in axes.values():
+ axis.pi = cpw.plotItem
+
cpw.hideAxis('left')
+ # cpw.removeAxis('left')
cpw.hideAxis('bottom')
if (
- _xaxis_at == 'bottom' and (
+ _xaxis_at == 'bottom'
+ and (
self.xaxis_chart
or (
not self.subplots
@@ -657,6 +647,8 @@ def add_plot(
)
)
):
+ # hide the previous x-axis chart's bottom axis since we're
+ # presumably being appended to the bottom subplot.
if self.xaxis_chart:
self.xaxis_chart.hideAxis('bottom')
@@ -701,18 +693,25 @@ def add_plot(
# link chart x-axis to main chart
# this is 1/2 of where the `Link` in ``LinkedSplit``
# comes from ;)
- cpw.setXLink(self.chart)
+ cpw.cv.setXLink(self.chart)
+
+ # NOTE: above is the same as the following,
+ # link this subchart's axes to the main top level chart.
+ # if self.chart:
+ # cpw.cv.linkView(0, self.chart.cv)
add_label = False
anchor_at = ('top', 'left')
# draw curve graphics
- if style == 'bar':
+ if style == 'ohlc_bar':
- graphics, data_key = cpw.draw_ohlc(
+ viz = cpw.draw_ohlc(
name,
shm,
- array_key=array_key
+ flume=flume,
+ array_key=array_key,
+ **draw_kwargs,
)
self.cursor.contents_labels.add_label(
cpw,
@@ -723,51 +722,72 @@ def add_plot(
elif style == 'line':
add_label = True
- graphics, data_key = cpw.draw_curve(
+ # graphics, data_key = cpw.draw_curve(
+ viz = cpw.draw_curve(
name,
shm,
+ flume,
array_key=array_key,
color='default_light',
+ **draw_kwargs,
)
elif style == 'step':
add_label = True
- graphics, data_key = cpw.draw_curve(
+ # graphics, data_key = cpw.draw_curve(
+ viz = cpw.draw_curve(
name,
shm,
+ flume,
array_key=array_key,
step_mode=True,
color='davies',
fill_color='davies',
+ **draw_kwargs,
)
else:
raise ValueError(f"Chart style {style} is currently unsupported")
- if not _is_main:
+ # NOTE: back-link the new sub-chart to trigger y-autoranging in
+ # the (ohlc parent) main chart for this linked set.
+ # if self.chart:
+ # main_viz = self.chart.get_viz(self.chart.name)
+ # self.chart.view.enable_auto_yrange(
+ # src_vb=cpw.view,
+ # viz=main_viz,
+ # )
+
+ graphics = viz.graphics
+ data_key = viz.name
+
+ if _is_main:
+ assert style == 'ohlc_bar', 'main chart must be OHLC'
+ else:
# track by name
self.subplots[name] = cpw
if qframe is not None:
self.splitter.addWidget(qframe)
- else:
- assert style == 'bar', 'main chart must be OHLC'
-
# add to cross-hair's known plots
# NOTE: add **AFTER** creating the underlying ``PlotItem``s
# since we require that global (linked charts wide) axes have
# been created!
- self.cursor.add_plot(cpw)
-
- if self.cursor and style != 'bar':
- self.cursor.add_curve_cursor(cpw, graphics)
+ if self.cursor:
+ if (
+ _is_main
+ or style != 'ohlc_bar'
+ ):
+ self.cursor.add_plot(cpw)
+ if style != 'ohlc_bar':
+ self.cursor.add_curve_cursor(cpw, graphics)
- if add_label:
- self.cursor.contents_labels.add_label(
- cpw,
- data_key,
- anchor_at=anchor_at,
- )
+ if add_label:
+ self.cursor.contents_labels.add_label(
+ cpw,
+ data_key,
+ anchor_at=anchor_at,
+ )
self.resize_sidepanes()
return cpw
@@ -797,9 +817,13 @@ def resize_sidepanes(
self.chart.sidepane.setMinimumWidth(sp_w)
+# TODO: we should really drop using this type and instead just
+# write our own wrapper around `PlotItem`..
class ChartPlotWidget(pg.PlotWidget):
'''
- ``GraphicsView`` subtype containing a single ``PlotItem``.
+ ``GraphicsView`` subtype containing a ``.plotItem: PlotItem`` as well
+ as a `.pi_overlay: PlotItemOverlay`` which helps manage and overlay flow
+ graphics view multiple compose view boxes.
- The added methods allow for plotting OHLC sequences from
``np.ndarray``s with appropriate field names.
@@ -814,8 +838,6 @@ class ChartPlotWidget(pg.PlotWidget):
sig_mouse_leave = QtCore.pyqtSignal(object)
sig_mouse_enter = QtCore.pyqtSignal(object)
- _l1_labels: L1Labels = None
-
mode_name: str = 'view'
# TODO: can take a ``background`` color setting - maybe there's
@@ -858,12 +880,17 @@ def __init__(
self.sidepane: Optional[FieldsForm] = None
# source of our custom interactions
- self.cv = cv = self.mk_vb(name)
+ self.cv = self.mk_vb(name)
- pi = pgo.PlotItem(viewBox=cv, **kwargs)
+ pi = pgo.PlotItem(
+ viewBox=self.cv,
+ name=name,
+ **kwargs,
+ )
+ pi.chart_widget = self
super().__init__(
background=hcolor(view_color),
- viewBox=cv,
+ viewBox=self.cv,
# parent=None,
# plotItem=None,
# antialias=True,
@@ -874,7 +901,9 @@ def __init__(
# give viewbox as reference to chart
# allowing for kb controls and interactions on **this** widget
# (see our custom view mode in `._interactions.py`)
- cv.chart = self
+ self.cv.chart = self
+
+ self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem)
# ensure internal pi matches
assert self.cv is self.plotItem.vb
@@ -890,9 +919,9 @@ def __init__(
# self.setViewportMargins(0, 0, 0, 0)
# registry of overlay curve names
- self._flows: dict[str, Flow] = {}
+ self._vizs: dict[str, Viz] = {}
- self._feeds: dict[Symbol, Feed] = {}
+ self.feed: Feed | None = None
self._labels = {} # registry of underlying graphics
self._ysticks = {} # registry of underlying graphics
@@ -903,28 +932,24 @@ def __init__(
# show background grid
self.showGrid(x=False, y=True, alpha=0.3)
- self.cv.enable_auto_yrange()
-
- self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem)
-
# indempotent startup flag for auto-yrange subsys
# to detect the "first time" y-domain graphics begin
# to be shown in the (main) graphics view.
self._on_screen: bool = False
def resume_all_feeds(self):
- try:
- for feed in self._feeds.values():
- for flume in feed.flumes.values():
- self.linked.godwidget._root_n.start_soon(feed.resume)
- except RuntimeError:
- # TODO: cancel the qtractor runtime here?
- raise
+ feed = self.feed
+ if feed:
+ try:
+ self.linked.godwidget._root_n.start_soon(feed.resume)
+ except RuntimeError:
+ # TODO: cancel the qtractor runtime here?
+ raise
def pause_all_feeds(self):
- for feed in self._feeds.values():
- for flume in feed.flumes.values():
- self.linked.godwidget._root_n.start_soon(feed.pause)
+ feed = self.feed
+ if feed:
+ self.linked.godwidget._root_n.start_soon(feed.pause)
@property
def view(self) -> ChartView:
@@ -933,47 +958,6 @@ def view(self) -> ChartView:
def focus(self) -> None:
self.view.setFocus()
- def last_bar_in_view(self) -> int:
- self._arrays[self.name][-1]['index']
-
- def is_valid_index(self, index: int) -> bool:
- return index >= 0 and index < self._arrays[self.name][-1]['index']
-
- def _set_xlimits(
- self,
- xfirst: int,
- xlast: int
- ) -> None:
- """Set view limits (what's shown in the main chart "pane")
- based on max/min x/y coords.
- """
- self.setLimits(
- xMin=xfirst,
- xMax=xlast,
- minXRange=_min_points_to_show,
- )
-
- def view_range(self) -> tuple[int, int]:
- vr = self.viewRect()
- return int(vr.left()), int(vr.right())
-
- def bars_range(self) -> tuple[int, int, int, int]:
- '''
- Return a range tuple for the bars present in view.
-
- '''
- main_flow = self._flows[self.name]
- ifirst, l, lbar, rbar, r, ilast = main_flow.datums_range()
- return l, lbar, rbar, r
-
- def curve_width_pxs(
- self,
- ) -> float:
- _, lbar, rbar, _ = self.bars_range()
- return self.view.mapViewToDevice(
- QLineF(lbar, 0, rbar, 0)
- ).length()
-
def pre_l1_xs(self) -> tuple[float, float]:
'''
Return the view x-coord for the value just before
@@ -982,11 +966,16 @@ def pre_l1_xs(self) -> tuple[float, float]:
'''
line_end, marker_right, yaxis_x = self.marker_right_points()
- view = self.view
- line = view.mapToView(
+ line = self.view.mapToView(
QLineF(line_end, 0, yaxis_x, 0)
)
- return line.x1(), line.length()
+ linex, linelen = line.x1(), line.length()
+ # print(
+ # f'line: {line}\n'
+ # f'linex: {linex}\n'
+ # f'linelen: {linelen}\n'
+ # )
+ return linex, linelen
def marker_right_points(
self,
@@ -1004,15 +993,22 @@ def marker_right_points(
'''
# TODO: compute some sensible maximum value here
# and use a humanized scheme to limit to that length.
- l1_len = self._max_l1_line_len
+ from ._l1 import L1Label
+ l1_len = abs(L1Label._x_br_offset)
ryaxis = self.getAxis('right')
r_axis_x = ryaxis.pos().x()
- up_to_l1_sc = r_axis_x - l1_len - 10
-
+ up_to_l1_sc = r_axis_x - l1_len
marker_right = up_to_l1_sc - (1.375 * 2 * marker_size)
- line_end = marker_right - (6/16 * marker_size)
+ # line_end = marker_right - (6/16 * marker_size)
+ line_end = marker_right - marker_size
+ # print(
+ # f'r_axis_x: {r_axis_x}\n'
+ # f'up_to_l1_sc: {up_to_l1_sc}\n'
+ # f'marker_right: {marker_right}\n'
+ # f'line_end: {line_end}\n'
+ # )
return line_end, marker_right, r_axis_x
def default_view(
@@ -1026,133 +1022,51 @@ def default_view(
Set the view box to the "default" startup view of the scene.
'''
- flow = self._flows.get(self.name)
- if not flow:
- log.warning(f'`Flow` for {self.name} not loaded yet?')
- return
+ viz = self.get_viz(self.name)
- index = flow.shm.array['index']
- xfirst, xlast = index[0], index[-1]
- l, lbar, rbar, r = self.bars_range()
- view = self.view
-
- if (
- rbar < 0
- or l < xfirst
- or l < 0
- or (rbar - lbar) < 6
- ):
- # TODO: set fixed bars count on screen that approx includes as
- # many bars as possible before a downsample line is shown.
- begin = xlast - bars_from_y
- view.setXRange(
- min=begin,
- max=xlast,
- padding=0,
- )
- # re-get range
- l, lbar, rbar, r = self.bars_range()
-
- # we get the L1 spread label "length" in view coords
- # terms now that we've scaled either by user control
- # or to the default set of bars as per the immediate block
- # above.
- if not y_offset:
- marker_pos, l1_len = self.pre_l1_xs()
- end = xlast + l1_len + 1
- else:
- end = xlast + y_offset + 1
-
- begin = end - (r - l)
-
- # for debugging
- # print(
- # # f'bars range: {brange}\n'
- # f'xlast: {xlast}\n'
- # f'marker pos: {marker_pos}\n'
- # f'l1 len: {l1_len}\n'
- # f'begin: {begin}\n'
- # f'end: {end}\n'
- # )
-
- # remove any custom user yrange setttings
- if self._static_yrange == 'axis':
- self._static_yrange = None
+ if not viz:
+ log.warning(f'`Viz` for {self.name} not loaded yet?')
+ return
- view.setXRange(
- min=begin,
- max=end,
- padding=0,
+ viz.default_view(
+ bars_from_y,
+ y_offset,
+ do_ds,
)
if do_ds:
- self.view.maybe_downsample_graphics()
- view._set_yrange()
-
- try:
self.linked.graphics_cycle()
- except IndexError:
- pass
def increment_view(
self,
- steps: int = 1,
+ datums: int = 1,
vb: Optional[ChartView] = None,
) -> None:
- """
- Increment the data view one step to the right thus "following"
- the current time slot/step/bar.
+ '''
+ Increment the data view ``datums``` steps toward y-axis thus
+ "following" the current time slot/step/bar.
- """
- l, r = self.view_range()
+ '''
view = vb or self.view
+ viz = self.main_viz
+ l, r = viz.view_range()
+ x_shift = viz.index_step() * datums
+
+ if datums >= 300:
+ print("FUCKING FIX THE GLOBAL STEP BULLSHIT")
+ # breakpoint()
+ return
+
view.setXRange(
- min=l + steps,
- max=r + steps,
+ min=l + x_shift,
+ max=r + x_shift,
# TODO: holy shit, wtf dude... why tf would this not be 0 by
# default... speechless.
padding=0,
)
- def draw_ohlc(
- self,
- name: str,
- shm: ShmArray,
-
- array_key: Optional[str] = None,
-
- ) -> (pg.GraphicsObject, str):
- '''
- Draw OHLC datums to chart.
-
- '''
- graphics = BarItems(
- self.linked,
- self.plotItem,
- pen_color=self.pen_color,
- name=name,
- )
-
- # adds all bar/candle graphics objects for each data point in
- # the np array buffer to be drawn on next render cycle
- self.plotItem.addItem(graphics)
-
- data_key = array_key or name
-
- self._flows[data_key] = Flow(
- name=name,
- plot=self.plotItem,
- _shm=shm,
- is_ohlc=True,
- graphics=graphics,
- )
-
- self._add_sticky(name, bg_color='davies')
-
- return graphics, data_key
-
def overlay_plotitem(
self,
name: str,
@@ -1172,8 +1086,8 @@ def overlay_plotitem(
raise ValueError(f'``axis_side``` must be in {allowed_sides}')
yaxis = PriceAxis(
+ plotitem=None,
orientation=axis_side,
- linkedsplits=self.linked,
**axis_kwargs,
)
@@ -1188,8 +1102,17 @@ def overlay_plotitem(
},
default_axes=[],
)
+ # pi.vb.background.setOpacity(0)
+ yaxis.pi = pi
+ pi.chart_widget = self
pi.hideButtons()
+ # hide all axes not named by ``axis_side``
+ for axname in (
+ ({'bottom'} | allowed_sides) - {axis_side}
+ ):
+ pi.hideAxis(axname)
+
# compose this new plot's graphics with the current chart's
# existing one but with separate axes as neede and specified.
self.pi_overlay.add_plotitem(
@@ -1203,14 +1126,6 @@ def overlay_plotitem(
link_axes=(0,),
)
- # connect auto-yrange callbacks *from* this new
- # view **to** this parent and likewise *from* the
- # main/parent chart back *to* the created overlay.
- cv.enable_auto_yrange(src_vb=self.view)
- # makes it so that interaction on the new overlay will reflect
- # back on the main chart (which overlay was added to).
- self.view.enable_auto_yrange(src_vb=cv)
-
# add axis title
# TODO: do we want this API to still work?
# raxis = pi.getAxis('right')
@@ -1224,6 +1139,7 @@ def draw_curve(
name: str,
shm: ShmArray,
+ flume: Flume,
array_key: Optional[str] = None,
overlay: bool = False,
@@ -1231,45 +1147,65 @@ def draw_curve(
add_label: bool = True,
pi: Optional[pg.PlotItem] = None,
step_mode: bool = False,
+ is_ohlc: bool = False,
+ add_sticky: None | str = 'right',
- **pdi_kwargs,
+ **graphics_kwargs,
- ) -> (pg.PlotDataItem, str):
+ ) -> Viz:
'''
Draw a "curve" (line plot graphics) for the provided data in
the input shm array ``shm``.
'''
color = color or self.pen_color or 'default_light'
- pdi_kwargs.update({
- 'color': color
- })
-
data_key = array_key or name
- curve_type = {
- None: Curve,
- 'step': StepCurve,
- # TODO:
- # 'bars': BarsItems
- }['step' if step_mode else None]
+ pi = pi or self.plotItem
- curve = curve_type(
- name=name,
- **pdi_kwargs,
- )
+ if is_ohlc:
+ graphics = BarItems(
+ color=color,
+ name=name,
+ **graphics_kwargs,
+ )
- pi = pi or self.plotItem
+ else:
+ curve_type = {
+ None: Curve,
+ 'step': StepCurve,
+ # TODO:
+ # 'bars': BarsItems
+ }['step' if step_mode else None]
- self._flows[data_key] = Flow(
- name=name,
- plot=pi,
- _shm=shm,
- is_ohlc=False,
- # register curve graphics with this flow
- graphics=curve,
+ graphics = curve_type(
+ name=name,
+ color=color,
+ **graphics_kwargs,
+ )
+
+ viz = self._vizs[data_key] = Viz(
+ data_key,
+ pi,
+ shm,
+ flume,
+
+ is_ohlc=is_ohlc,
+ # register curve graphics with this viz
+ graphics=graphics,
)
+ # connect auto-yrange callbacks *from* this new
+ # view **to** this parent and likewise *from* the
+ # main/parent chart back *to* the created overlay.
+ pi.vb.enable_auto_yrange(
+ src_vb=self.view,
+ viz=viz,
+ )
+
+ pi.viz = viz
+ assert isinstance(viz.shm, ShmArray)
+
# TODO: this probably needs its own method?
if overlay:
if isinstance(overlay, pgo.PlotItem):
@@ -1278,12 +1214,46 @@ def draw_curve(
f'{overlay} must be from `.plotitem_overlay()`'
)
pi = overlay
- else:
- # anchor_at = ('top', 'left')
- # TODO: something instead of stickies for overlays
- # (we need something that avoids clutter on x-axis).
- self._add_sticky(name, bg_color=color)
+ if add_sticky:
+
+ if pi is not self.plotItem:
+ # overlay = self.pi_overlay
+ # assert pi in overlay.overlays
+ overlay = self.pi_overlay
+ assert pi in overlay.overlays
+ axis = overlay.get_axis(
+ pi,
+ add_sticky,
+ )
+
+ else:
+ axis = pi.getAxis(add_sticky)
+
+ if pi.name not in axis._stickies:
+
+ # TODO: UGH! just make this not here! we should
+ # be making the sticky from code which has access
+ # to the ``Symbol`` instance..
+
+ # if the sticky is for our symbol
+ # use the tick size precision for display
+ name = name or pi.name
+ sym = self.linked.symbol
+ digits = None
+ if name == sym.key:
+ digits = sym.tick_size_digits
+
+ # anchor_at = ('top', 'left')
+
+ # TODO: something instead of stickies for overlays
+ # (we need something that avoids clutter on x-axis).
+ axis.add_sticky(
+ pi=pi,
+ fg_color='black',
+ # bg_color=color,
+ digits=digits,
+ )
# NOTE: this is more or less the RENDER call that tells Qt to
# start showing the generated graphics-curves. This is kind of
@@ -1294,86 +1264,33 @@ def draw_curve(
# the next render cycle; just note a lot of the real-time
# updates are implicit and require a bit of digging to
# understand.
- pi.addItem(curve)
+ pi.addItem(graphics)
- return curve, data_key
+ return viz
- # TODO: make this a ctx mngr
- def _add_sticky(
+ def draw_ohlc(
self,
-
name: str,
- bg_color='bracket',
-
- ) -> YAxisLabel:
-
- # if the sticky is for our symbol
- # use the tick size precision for display
- sym = self.linked.symbol
- if name == sym.key:
- digits = sym.tick_size_digits
- else:
- digits = 2
-
- # add y-axis "last" value label
- last = self._ysticks[name] = YAxisLabel(
- chart=self,
- # parent=self.getAxis('right'),
- parent=self.pi_overlay.get_axis(self.plotItem, 'right'),
- # TODO: pass this from symbol data
- digits=digits,
- opacity=1,
- bg_color=bg_color,
- )
- return last
+ shm: ShmArray,
+ flume: Flume,
- def update_graphics_from_flow(
- self,
- graphics_name: str,
array_key: Optional[str] = None,
+ **draw_curve_kwargs,
- **kwargs,
-
- ) -> pg.GraphicsObject:
+ ) -> Viz:
'''
- Update the named internal graphics from ``array``.
+ Draw OHLC datums to chart.
'''
- flow = self._flows[array_key or graphics_name]
- return flow.update_graphics(
+ return self.draw_curve(
+ name,
+ shm,
+ flume,
array_key=array_key,
- **kwargs,
+ is_ohlc=True,
+ **draw_curve_kwargs,
)
- # def _label_h(self, yhigh: float, ylow: float) -> float:
- # # compute contents label "height" in view terms
- # # to avoid having data "contents" overlap with them
- # if self._labels:
- # label = self._labels[self.name][0]
-
- # rect = label.itemRect()
- # tl, br = rect.topLeft(), rect.bottomRight()
- # vb = self.plotItem.vb
-
- # try:
- # # on startup labels might not yet be rendered
- # top, bottom = (vb.mapToView(tl).y(), vb.mapToView(br).y())
-
- # # XXX: magic hack, how do we compute exactly?
- # label_h = (top - bottom) * 0.42
-
- # except np.linalg.LinAlgError:
- # label_h = 0
- # else:
- # label_h = 0
-
- # # print(f'label height {self.name}: {label_h}')
-
- # if label_h > yhigh - ylow:
- # label_h = 0
-
- # print(f"bounds (ylow, yhigh): {(ylow, yhigh)}")
-
# TODO: pretty sure we can just call the cursor
# directly not? i don't wee why we need special "signal proxies"
# for this lul..
@@ -1386,37 +1303,6 @@ def leaveEvent(self, ev): # noqa
self.sig_mouse_leave.emit(self)
self.scene().leaveEvent(ev)
- def get_index(self, time: float) -> int:
-
- # TODO: this should go onto some sort of
- # data-view thinger..right?
- ohlc = self._flows[self.name].shm.array
-
- # XXX: not sure why the time is so off here
- # looks like we're gonna have to do some fixing..
- indexes = ohlc['time'] >= time
-
- if any(indexes):
- return ohlc['index'][indexes][-1]
- else:
- return ohlc['index'][-1]
-
- def in_view(
- self,
- array: np.ndarray,
-
- ) -> np.ndarray:
- '''
- Slice an input struct array providing only datums
- "in view" of this chart.
-
- '''
- l, lbar, rbar, r = self.bars_range()
- ifirst = array[0]['index']
- # slice data by offset from the first index
- # available in the passed datum set.
- return array[lbar - ifirst:(rbar - ifirst) + 1]
-
def maxmin(
self,
name: Optional[str] = None,
@@ -1431,49 +1317,39 @@ def maxmin(
If ``bars_range`` is provided use that range.
'''
- profiler = Profiler(
- msg=f'`{str(self)}.maxmin(name={name})`: `{self.name}`',
- disabled=not pg_profile_enabled(),
- ms_threshold=ms_slower_then,
- delayed=True,
- )
-
- # TODO: here we should instead look up the ``Flow.shm.array``
+ # TODO: here we should instead look up the ``Viz.shm.array``
# and read directly from shm to avoid copying to memory first
# and then reading it again here.
- flow_key = name or self.name
- flow = self._flows.get(flow_key)
+ viz_key = name or self.name
+ viz = self._vizs.get(viz_key)
+ if viz is None:
+ log.error(f"viz {viz_key} doesn't exist in chart {self.name} !?")
+ return 0, 0
+
+ res = viz.maxmin()
+
if (
- flow is None
+ res is None
):
- log.error(f"flow {flow_key} doesn't exist in chart {self.name} !?")
- key = res = 0, 0
-
+ mxmn = 0, 0
+ if not self._on_screen:
+ self.default_view(do_ds=False)
+ self._on_screen = True
else:
- (
- first,
- l,
- lbar,
- rbar,
- r,
- last,
- ) = bars_range or flow.datums_range()
- profiler(f'{self.name} got bars range')
-
- key = round(lbar), round(rbar)
- res = flow.maxmin(*key)
+ x_range, read_slc, mxmn = res
- if (
- res is None
- ):
- log.warning(
- f"{flow_key} no mxmn for bars_range => {key} !?"
- )
- res = 0, 0
- if not self._on_screen:
- self.default_view(do_ds=False)
- self._on_screen = True
-
- profiler(f'yrange mxmn: {key} -> {res}')
- # print(f'{flow_key} yrange mxmn: {key} -> {res}')
- return res
+ return mxmn
+
+ def get_viz(
+ self,
+ key: str,
+ ) -> Viz:
+ '''
+ Try to get an underlying ``Viz`` by key.
+
+ '''
+ return self._vizs.get(key)
+
+ @property
+ def main_viz(self) -> Viz:
+ return self.get_viz(self.name)
diff --git a/piker/ui/_cursor.py b/piker/ui/_cursor.py
index a27aca8ca..8c358c3f2 100644
--- a/piker/ui/_cursor.py
+++ b/piker/ui/_cursor.py
@@ -71,7 +71,7 @@ def __init__(
plot: ChartPlotWidget, # type: ingore # noqa
pos=None,
- color: str = 'default_light',
+ color: str = 'bracket',
) -> None:
# scale from dpi aware font size
@@ -198,12 +198,11 @@ def update_from_ohlc(
self,
name: str,
- index: int,
+ ix: int,
array: np.ndarray,
) -> None:
# this being "html" is the dumbest shit :eyeroll:
- first = array[0]['index']
self.setText(
"i:{index}
"
@@ -216,7 +215,7 @@ def update_from_ohlc(
"C:{}
"
"V:{}
"
"wap:{}".format(
- *array[index - first][
+ *array[ix][
[
'time',
'open',
@@ -228,7 +227,7 @@ def update_from_ohlc(
]
],
name=name,
- index=index,
+ index=ix,
)
)
@@ -236,15 +235,12 @@ def update_from_value(
self,
name: str,
- index: int,
+ ix: int,
array: np.ndarray,
) -> None:
-
- first = array[0]['index']
- if index < array[-1]['index'] and index > first:
- data = array[index - first][name]
- self.setText(f"{name}: {data:.2f}")
+ data = array[ix][name]
+ self.setText(f"{name}: {data:.2f}")
class ContentsLabels:
@@ -269,17 +265,20 @@ def __init__(
def update_labels(
self,
- index: int,
+ x_in: int,
) -> None:
for chart, name, label, update in self._labels:
- flow = chart._flows[name]
- array = flow.shm.array
+ viz = chart.get_viz(name)
+ array = viz.shm.array
+ index = array[viz.index_field]
+ start = index[0]
+ stop = index[-1]
if not (
- index >= 0
- and index < array[-1]['index']
+ x_in >= start
+ and x_in <= stop
):
# out of range
print('WTF out of range?')
@@ -288,7 +287,10 @@ def update_labels(
# call provided update func with data point
try:
label.show()
- update(index, array)
+ ix = np.searchsorted(index, x_in)
+ if ix > len(array):
+ breakpoint()
+ update(ix, array)
except IndexError:
log.exception(f"Failed to update label: {name}")
@@ -349,7 +351,7 @@ def __init__(
# XXX: not sure why these are instance variables?
# It's not like we can change them on the fly..?
self.pen = pg.mkPen(
- color=hcolor('default'),
+ color=hcolor('bracket'),
style=QtCore.Qt.DashLine,
)
self.lines_pen = pg.mkPen(
@@ -365,7 +367,7 @@ def __init__(
self._lw = self.pixelWidth() * self.lines_pen.width()
# xhair label's color name
- self.label_color: str = 'default'
+ self.label_color: str = 'bracket'
self._y_label_update: bool = True
@@ -418,7 +420,7 @@ def add_plot(
hl.hide()
yl = YAxisLabel(
- chart=plot,
+ pi=plot.plotItem,
# parent=plot.getAxis('right'),
parent=plot.pi_overlay.get_axis(plot.plotItem, 'right'),
digits=digits or self.digits,
@@ -482,25 +484,32 @@ def add_plot(
def add_curve_cursor(
self,
- plot: ChartPlotWidget, # noqa
+ chart: ChartPlotWidget, # noqa
curve: 'PlotCurveItem', # noqa
) -> LineDot:
- # if this plot contains curves add line dot "cursors" to denote
+ # if this chart contains curves add line dot "cursors" to denote
# the current sample under the mouse
- main_flow = plot._flows[plot.name]
+ main_viz = chart.get_viz(chart.name)
+
# read out last index
- i = main_flow.shm.array[-1]['index']
+ i = main_viz.shm.array[-1]['index']
cursor = LineDot(
curve,
index=i,
- plot=plot
+ plot=chart
)
- plot.addItem(cursor)
- self.graphics[plot].setdefault('cursors', []).append(cursor)
+ chart.addItem(cursor)
+ self.graphics[chart].setdefault('cursors', []).append(cursor)
return cursor
- def mouseAction(self, action, plot): # noqa
+ def mouseAction(
+ self,
+ action: str,
+ plot: ChartPlotWidget,
+
+ ) -> None: # noqa
+
log.debug(f"{(action, plot.name)}")
if action == 'Enter':
self.active_plot = plot
diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py
index b9a143a2c..c9ebebcd3 100644
--- a/piker/ui/_curve.py
+++ b/piker/ui/_curve.py
@@ -36,10 +36,6 @@
)
from .._profile import pg_profile_enabled, ms_slower_then
from ._style import hcolor
-# from ._compression import (
-# # ohlc_to_m4_line,
-# ds_m4,
-# )
from ..log import get_logger
from .._profile import Profiler
@@ -55,7 +51,117 @@
}
-class Curve(pg.GraphicsObject):
+class FlowGraphic(pg.GraphicsObject):
+ '''
+ Base class with minimal interface for `QPainterPath` implemented,
+ real-time updated "data flow" graphics.
+
+ See subtypes below.
+
+ '''
+ # sub-type customization methods
+ declare_paintables: Callable | None = None
+ sub_paint: Callable | None = None
+
+ # XXX-NOTE-XXX: graphics caching B)
+ # see explanation for different caching modes:
+ # https://stackoverflow.com/a/39410081
+ cache_mode: int = QGraphicsItem.DeviceCoordinateCache
+ # XXX: WARNING item caching seems to only be useful
+ # if we don't re-generate the entire QPainterPath every time
+ # don't ever use this - it's a colossal nightmare of artefacts
+ # and is disastrous for performance.
+ # QGraphicsItem.ItemCoordinateCache
+ # TODO: still questions todo with coord-cacheing that we should
+ # probably talk to a core dev about:
+ # - if this makes trasform interactions slower (such as zooming)
+ # and if so maybe if/when we implement a "history" mode for the
+ # view we disable this in that mode?
+
+ def __init__(
+ self,
+ *args,
+ name: str | None = None,
+
+ # line styling
+ color: str = 'bracket',
+ last_step_color: str | None = None,
+ fill_color: Optional[str] = None,
+ style: str = 'solid',
+
+ **kwargs
+
+ ) -> None:
+
+ self._name = name
+
+ # primary graphics item used for history
+ self.path: QPainterPath = QPainterPath()
+
+ # additional path that can be optionally used for appends which
+ # tries to avoid triggering an update/redraw of the presumably
+ # larger historical ``.path`` above. the flag to enable
+ # this behaviour is found in `Renderer.render()`.
+ self.fast_path: QPainterPath | None = None
+
+ # TODO: evaluating the path capacity stuff and see
+ # if it really makes much diff pre-allocating it.
+ # self._last_cap: int = 0
+ # cap = path.capacity()
+ # if cap != self._last_cap:
+ # print(f'NEW CAPACITY: {self._last_cap} -> {cap}')
+ # self._last_cap = cap
+
+ # all history of curve is drawn in single px thickness
+ self._color: str = color
+ pen = pg.mkPen(hcolor(color), width=1)
+ pen.setStyle(_line_styles[style])
+
+ if 'dash' in style:
+ pen.setDashPattern([8, 3])
+
+ self._pen = pen
+ self._brush = pg.functions.mkBrush(
+ hcolor(fill_color or color)
+ )
+
+ # last segment is drawn in 2px thickness for emphasis
+ if last_step_color:
+ self.last_step_pen = pg.mkPen(
+ hcolor(last_step_color),
+ width=2,
+ )
+ else:
+ self.last_step_pen = pg.mkPen(
+ self._pen,
+ width=2,
+ )
+
+ self._last_line: QLineF = QLineF()
+
+ super().__init__(*args, **kwargs)
+
+ # apply cache mode
+ self.setCacheMode(self.cache_mode)
+
+ def x_uppx(self) -> int:
+
+ px_vecs = self.pixelVectors()[0]
+ if px_vecs:
+ return px_vecs.x()
+ else:
+ return 0
+
+ def x_last(self) -> float | None:
+ '''
+ Return the last most x value of the last line segment or if not
+ drawn yet, ``None``.
+
+ '''
+ return self._last_line.x1() if self._last_line else None
+
+
+class Curve(FlowGraphic):
'''
A faster, simpler, append friendly version of
``pyqtgraph.PlotCurveItem`` built for highly customizable real-time
@@ -72,7 +178,7 @@ class Curve(pg.GraphicsObject):
lower level graphics data can be rendered in different threads and
then read and drawn in this main thread without having to worry
about dealing with Qt's concurrency primitives. See
- ``piker.ui._flows.Renderer`` for details and logic related to lower
+ ``piker.ui._render.Renderer`` for details and logic related to lower
level path generation and incremental update. The main differences in
the path generation code include:
@@ -84,125 +190,38 @@ class Curve(pg.GraphicsObject):
updates don't trigger a full path redraw.
'''
-
- # sub-type customization methods
- declare_paintables: Optional[Callable] = None
- sub_paint: Optional[Callable] = None
+ # TODO: can we remove this?
# sub_br: Optional[Callable] = None
def __init__(
self,
*args,
- step_mode: bool = False,
- color: str = 'default_lightest',
- fill_color: Optional[str] = None,
- style: str = 'solid',
- name: Optional[str] = None,
- use_fpath: bool = True,
+ # color: str = 'default_lightest',
+ # fill_color: Optional[str] = None,
+ # style: str = 'solid',
**kwargs
) -> None:
- self._name = name
-
# brutaaalll, see comments within..
self.yData = None
self.xData = None
- # self._last_cap: int = 0
- self.path: Optional[QPainterPath] = None
-
- # additional path used for appends which tries to avoid
- # triggering an update/redraw of the presumably larger
- # historical ``.path`` above.
- self.use_fpath = use_fpath
- self.fast_path: Optional[QPainterPath] = None
-
# TODO: we can probably just dispense with the parent since
# we're basically only using the pen setting now...
super().__init__(*args, **kwargs)
- # all history of curve is drawn in single px thickness
- pen = pg.mkPen(hcolor(color))
- pen.setStyle(_line_styles[style])
-
- if 'dash' in style:
- pen.setDashPattern([8, 3])
-
- self._pen = pen
-
- # last segment is drawn in 2px thickness for emphasis
- # self.last_step_pen = pg.mkPen(hcolor(color), width=2)
- self.last_step_pen = pg.mkPen(pen, width=2)
-
- self._last_line = QLineF()
-
- # flat-top style histogram-like discrete curve
- # self._step_mode: bool = step_mode
+ self._last_line: QLineF = QLineF()
# self._fill = True
- self._brush = pg.functions.mkBrush(hcolor(fill_color or color))
-
- # NOTE: this setting seems to mostly prevent redraws on mouse
- # interaction which is a huge boon for avg interaction latency.
-
- # TODO: one question still remaining is if this makes trasform
- # interactions slower (such as zooming) and if so maybe if/when
- # we implement a "history" mode for the view we disable this in
- # that mode?
- # don't enable caching by default for the case where the
- # only thing drawn is the "last" line segment which can
- # have a weird artifact where it won't be fully drawn to its
- # endpoint (something we saw on trade rate curves)
- self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
-
- # XXX: see explanation for different caching modes:
- # https://stackoverflow.com/a/39410081
- # seems to only be useful if we don't re-generate the entire
- # QPainterPath every time
- # curve.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
-
- # don't ever use this - it's a colossal nightmare of artefacts
- # and is disastrous for performance.
- # curve.setCacheMode(QtWidgets.QGraphicsItem.ItemCoordinateCache)
# allow sub-type customization
declare = self.declare_paintables
if declare:
declare()
- # TODO: probably stick this in a new parent
- # type which will contain our own version of
- # what ``PlotCurveItem`` had in terms of base
- # functionality? A `FlowGraphic` maybe?
- def x_uppx(self) -> int:
-
- px_vecs = self.pixelVectors()[0]
- if px_vecs:
- xs_in_px = px_vecs.x()
- return round(xs_in_px)
- else:
- return 0
-
- def px_width(self) -> float:
-
- vb = self.getViewBox()
- if not vb:
- return 0
-
- vr = self.viewRect()
- l, r = int(vr.left()), int(vr.right())
-
- start, stop = self._xrange
- lbar = max(l, start)
- rbar = min(r, stop)
-
- return vb.mapViewToDevice(
- QLineF(lbar, 0, rbar, 0)
- ).length()
-
# XXX: lol brutal, the internals of `CurvePoint` (inherited by
# our `LineDot`) required ``.getData()`` to work..
def getData(self):
@@ -327,14 +346,10 @@ def paint(
p.setPen(self.last_step_pen)
p.drawLine(self._last_line)
- profiler('.drawLine()')
- p.setPen(self._pen)
+ profiler('last datum `.drawLine()`')
+ p.setPen(self._pen)
path = self.path
- # cap = path.capacity()
- # if cap != self._last_cap:
- # print(f'NEW CAPACITY: {self._last_cap} -> {cap}')
- # self._last_cap = cap
if path:
p.drawPath(path)
@@ -357,22 +372,30 @@ def draw_last_datum(
self,
path: QPainterPath,
src_data: np.ndarray,
- render_data: np.ndarray,
reset: bool,
array_key: str,
+ index_field: str,
) -> None:
# default line draw last call
# with self.reset_cache():
- x = render_data['index']
- y = render_data[array_key]
+ x = src_data[index_field]
+ y = src_data[array_key]
+
+ x_last = x[-1]
+ x_2last = x[-2]
# draw the "current" step graphic segment so it
# lines up with the "middle" of the current
# (OHLC) sample.
self._last_line = QLineF(
- x[-2], y[-2],
- x[-1], y[-1],
+
+ # NOTE: currently we draw in x-domain
+ # from last datum to current such that
+ # the end of line touches the "beginning"
+ # of the current datum step span.
+ x_2last, y[-2],
+ x_last, y[-1],
)
return x, y
@@ -384,17 +407,20 @@ def draw_last_datum(
# (via it's max / min) even when highly zoomed out.
class FlattenedOHLC(Curve):
+ # avoids strange dragging/smearing artifacts when panning..
+ cache_mode: int = QGraphicsItem.NoCache
+
def draw_last_datum(
self,
path: QPainterPath,
src_data: np.ndarray,
- render_data: np.ndarray,
reset: bool,
array_key: str,
+ index_field: str,
) -> None:
lasts = src_data[-2:]
- x = lasts['index']
+ x = lasts[index_field]
y = lasts['close']
# draw the "current" step graphic segment so it
@@ -418,9 +444,9 @@ def draw_last_datum(
self,
path: QPainterPath,
src_data: np.ndarray,
- render_data: np.ndarray,
reset: bool,
array_key: str,
+ index_field: str,
w: float = 0.5,
@@ -429,14 +455,13 @@ def draw_last_datum(
# TODO: remove this and instead place all step curve
# updating into pre-path data render callbacks.
# full input data
- x = src_data['index']
+ x = src_data[index_field]
y = src_data[array_key]
x_last = x[-1]
x_2last = x[-2]
y_last = y[-1]
step_size = x_last - x_2last
- half_step = step_size / 2
# lol, commenting this makes step curves
# all "black" for me :eyeroll:..
@@ -445,7 +470,7 @@ def draw_last_datum(
x_last, 0,
)
self._last_step_rect = QRectF(
- x_last - half_step, 0,
+ x_last, 0,
step_size, y_last,
)
return x, y
@@ -458,9 +483,3 @@ def sub_paint(
# p.drawLines(*tuple(filter(bool, self._last_step_lines)))
# p.drawRect(self._last_step_rect)
p.fillRect(self._last_step_rect, self._brush)
-
- # def sub_br(
- # self,
- # parent_br: QRectF | None = None,
- # ) -> QRectF:
- # return self._last_step_rect
diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py
new file mode 100644
index 000000000..9f49691ac
--- /dev/null
+++ b/piker/ui/_dataviz.py
@@ -0,0 +1,1266 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+Data vizualization APIs
+
+'''
+from __future__ import annotations
+from functools import lru_cache
+from math import (
+ ceil,
+ floor,
+)
+from typing import (
+ Optional,
+ Literal,
+ TYPE_CHECKING,
+)
+
+import msgspec
+import numpy as np
+import pyqtgraph as pg
+from PyQt5.QtCore import QLineF
+
+from ..data._sharedmem import (
+ ShmArray,
+)
+from ..data.feed import Flume
+from ..data._formatters import (
+ IncrementalFormatter,
+ OHLCBarsFmtr, # Plain OHLC renderer
+ OHLCBarsAsCurveFmtr, # OHLC converted to line
+ StepCurveFmtr, # "step" curve (like for vlm)
+)
+from ..data._pathops import (
+ slice_from_time,
+)
+from ._ohlc import (
+ BarItems,
+)
+from ._curve import (
+ Curve,
+ StepCurve,
+ FlattenedOHLC,
+)
+from ._render import Renderer
+from ..log import get_logger
+from .._profile import (
+ Profiler,
+ pg_profile_enabled,
+ ms_slower_then,
+)
+
+
+if TYPE_CHECKING:
+ from ._interaction import ChartView
+ from ._chart import ChartPlotWidget
+ from ._display import DisplayState
+
+
+log = get_logger(__name__)
+
+
+def render_baritems(
+ viz: Viz,
+ graphics: BarItems,
+ read: tuple[
+ int, int, np.ndarray,
+ int, int, np.ndarray,
+ ],
+ profiler: Profiler,
+ **kwargs,
+
+) -> None:
+ '''
+ Graphics management logic for a ``BarItems`` object.
+
+ Mostly just logic to determine when and how to downsample an OHLC
+ lines curve into a flattened line graphic and when to display one
+ graphic or the other.
+
+ TODO: this should likely be moved into some kind of better abstraction
+ layer, if not a `Renderer` then something just above it?
+
+ '''
+ bars = graphics
+
+ self = viz # TODO: make this a ``Viz`` method?
+ r = self._src_r
+ first_render: bool = False
+
+ # if no source data renderer exists create one.
+ if not r:
+ first_render = True
+
+ # OHLC bars path renderer
+ r = self._src_r = Renderer(
+ viz=self,
+ fmtr=OHLCBarsFmtr(
+ shm=viz.shm,
+ viz=viz,
+ ),
+ )
+
+ ds_curve_r = Renderer(
+ viz=self,
+ fmtr=OHLCBarsAsCurveFmtr(
+ shm=viz.shm,
+ viz=viz,
+ ),
+ )
+
+ curve = FlattenedOHLC(
+ name=f'{viz.name}_ds_ohlc',
+ color=bars._color,
+ )
+ viz.ds_graphics = curve
+ curve.hide()
+ self.plot.addItem(curve)
+
+ # baseline "line" downsampled OHLC curve that should
+ # kick on only when we reach a certain uppx threshold.
+ self._alt_r = (ds_curve_r, curve)
+
+ ds_r, curve = self._alt_r
+
+ # print(
+ # f'r: {r.fmtr.xy_slice}\n'
+ # f'ds_r: {ds_r.fmtr.xy_slice}\n'
+ # )
+
+ # do checks for whether or not we require downsampling:
+ # - if we're **not** downsampling then we simply want to
+ # render the bars graphics curve and update..
+ # - if instead we are in a downsamplig state then we to
+ x_gt = 6 * (self.index_step() or 1)
+ uppx = curve.x_uppx()
+ # print(f'BARS UPPX: {uppx}')
+ in_line = should_line = curve.isVisible()
+
+ if (
+ in_line
+ and uppx < x_gt
+ ):
+ # print('FLIPPING TO BARS')
+ should_line = False
+ viz._in_ds = False
+
+ elif (
+ not in_line
+ and uppx >= x_gt
+ ):
+ # print('FLIPPING TO LINE')
+ should_line = True
+ viz._in_ds = True
+
+ profiler(f'ds logic complete line={should_line}')
+
+ # do graphics updates
+ if should_line:
+ r = ds_r
+ graphics = curve
+ profiler('updated ds curve')
+
+ else:
+ graphics = bars
+
+ if first_render:
+ bars.show()
+
+ changed_to_line = False
+ if (
+ not in_line
+ and should_line
+ ):
+ # change to line graphic
+ log.info(
+ f'downsampling to line graphic {self.name}'
+ )
+ bars.hide()
+ curve.show()
+ curve.update()
+ changed_to_line = True
+
+ elif (
+ in_line
+ and not should_line
+ ):
+ # change to bars graphic
+ log.info(
+ f'showing bars graphic {self.name}\n'
+ f'first bars render?: {first_render}'
+ )
+ curve.hide()
+ bars.show()
+ bars.update()
+
+ # XXX: is this required?
+ viz._in_ds = should_line
+
+ should_redraw = (
+ changed_to_line
+ or not should_line
+ )
+ return (
+ graphics,
+ r,
+ should_redraw,
+ should_line,
+ )
+
+
+_sample_rates: set[float] = {1, 60}
+
+
+class Viz(msgspec.Struct): # , frozen=True):
+ '''
+ (Data) "Visualization" compound type which wraps a real-time
+ shm array stream with displayed graphics (curves, charts)
+ for high level access and control as well as efficient incremental
+ update.
+
+ The intention is for this type to eventually be capable of shm-passing
+ of incrementally updated graphics stream data between actors.
+
+ '''
+ name: str
+ plot: pg.PlotItem
+ _shm: ShmArray
+ flume: Flume
+ graphics: Curve | BarItems
+
+ # for tracking y-mn/mx for y-axis auto-ranging
+ yrange: tuple[float, float] = None
+
+ # in some cases a viz may want to change its
+ # graphical "type" or, "form" when downsampling, to
+ # start this is only ever an interpolation line.
+ ds_graphics: Optional[Curve] = None
+
+ is_ohlc: bool = False
+ render: bool = True # toggle for display loop
+
+ _index_field: Literal[
+ 'index',
+ 'time',
+
+ # TODO: idea is to re-index all time series to a common
+ # longest-len-int-index where we avoid gaps and instead
+ # graph on the 0 -> N domain of the array index super set.
+ # 'gapless',
+
+ ] = 'time'
+
+ # downsampling state
+ _last_uppx: float = 0
+ _in_ds: bool = False
+ _index_step: float | None = None
+
+ # map from uppx -> (downsampled data, incremental graphics)
+ _src_r: Renderer | None = None
+ _alt_r: tuple[
+ Renderer,
+ pg.GraphicsItem
+ ] | None = None
+
+ # cache of y-range values per x-range input.
+ _mxmns: dict[
+ tuple[int, int],
+ tuple[float, float],
+ ] = {}
+
+ # cache of median calcs from input read slice hashes
+ # see `.median()`
+ _meds: dict[
+ int,
+ float,
+ ] = {}
+
+ # to make lru_cache-ing work, see
+ # https://docs.python.org/3/faq/programming.html#how-do-i-cache-method-calls
+ def __eq__(self, other):
+ return self._shm._token == other._shm._token
+
+ def __hash__(self):
+ return hash(self._shm._token)
+
+ @property
+ def shm(self) -> ShmArray:
+ return self._shm
+
+ @property
+ def index_field(self) -> str:
+ return self._index_field
+
+ def index_step(
+ self,
+ reset: bool = False,
+
+ ) -> float:
+
+ # attempt to dectect the best step size by scanning a sample of
+ # the source data.
+ if self._index_step is None:
+
+ index = self.shm.array[self.index_field]
+ isample = index[:16]
+
+ mxdiff: None | float = None
+ for step in np.diff(isample):
+ if step in _sample_rates:
+ if (
+ mxdiff is not None
+ and step != mxdiff
+ ):
+ raise ValueError(
+ f'Multiple step sizes detected? {mxdiff}, {step}'
+ )
+ mxdiff = step
+
+ self._index_step = max(mxdiff, 1)
+ if (
+ mxdiff < 1
+ or 1 < mxdiff < 60
+ ):
+ # TODO: remove this once we're sure the above scan loop
+ # is rock solid.
+ breakpoint()
+
+ return self._index_step
+
+ def maxmin(
+ self,
+
+ x_range: slice | tuple[int, int] | None = None,
+ i_read_range: tuple[int, int] | None = None,
+ use_caching: bool = True,
+
+ ) -> tuple[float, float] | None:
+ '''
+ Compute the cached max and min y-range values for a given
+ x-range determined by ``lbar`` and ``rbar`` or ``None``
+ if no range can be determined (yet).
+
+ '''
+ name = self.name
+ profiler = Profiler(
+ msg=f'`Viz[{name}].maxmin()`',
+ disabled=not pg_profile_enabled(),
+ ms_threshold=4,
+ delayed=True,
+ )
+
+ shm = self.shm
+ if shm is None:
+ return None
+
+ do_print: bool = False
+ arr = shm.array
+
+ if i_read_range is not None:
+ read_slc = slice(*i_read_range)
+ index = arr[read_slc][self.index_field]
+ if not index.size:
+ return None
+ ixrng = (index[0], index[-1])
+
+ else:
+ if x_range is None:
+ (
+ l,
+ _,
+ lbar,
+ rbar,
+ _,
+ r,
+ ) = self.datums_range()
+
+ profiler(f'{self.name} got bars range')
+ x_range = lbar, rbar
+
+ # TODO: hash the slice instead maybe?
+ # https://stackoverflow.com/a/29980872
+ lbar, rbar = ixrng = round(x_range[0]), round(x_range[1])
+
+ if use_caching:
+ cached_result = self._mxmns.get(ixrng)
+ if cached_result:
+ if do_print:
+ print(
+ f'{self.name} CACHED maxmin\n'
+ f'{ixrng} -> {cached_result}'
+ )
+ read_slc, mxmn = cached_result
+ return (
+ ixrng,
+ read_slc,
+ mxmn,
+ )
+
+ if i_read_range is None:
+ # get relative slice indexes into array
+ if self.index_field == 'time':
+ read_slc = slice_from_time(
+ arr,
+ start_t=lbar,
+ stop_t=rbar,
+ step=self.index_step(),
+ )
+
+ else:
+ ifirst = arr[0]['index']
+ read_slc = slice(
+ lbar - ifirst,
+ (rbar - ifirst) + 1
+ )
+
+ slice_view = arr[read_slc]
+
+ if not slice_view.size:
+ log.warning(
+ f'{self.name} no maxmin in view?\n'
+ f"{name} no mxmn for bars_range => {ixrng} !?"
+ )
+ return None
+
+ elif self.yrange:
+ mxmn = self.yrange
+ if do_print:
+ print(
+ f'{self.name} M4 maxmin:\n'
+ f'{ixrng} -> {mxmn}'
+ )
+ else:
+ if self.is_ohlc:
+ ylow = np.min(slice_view['low'])
+ yhigh = np.max(slice_view['high'])
+
+ else:
+ view = slice_view[self.name]
+ ylow = np.min(view)
+ yhigh = np.max(view)
+
+ mxmn = ylow, yhigh
+ if (
+ do_print
+ ):
+ s = 3
+ print(
+ f'{self.name} MANUAL ohlc={self.is_ohlc} maxmin:\n'
+ f'{ixrng} -> {mxmn}\n'
+ f'read_slc: {read_slc}\n'
+ # f'abs_slc: {slice_view["index"]}\n'
+ f'first {s}:\n{slice_view[:s]}\n'
+ f'last {s}:\n{slice_view[-s:]}\n'
+ )
+
+ # cache result for input range
+ assert mxmn
+ self._mxmns[ixrng] = (read_slc, mxmn)
+ profiler(f'yrange mxmn cacheing: {x_range} -> {mxmn}')
+ return (
+ ixrng,
+ read_slc,
+ mxmn,
+ )
+
+ @lru_cache(maxsize=6116)
+ def median_from_range(
+ self,
+ start: int,
+ stop: int,
+
+ ) -> float:
+ in_view = self.shm.array[start:stop]
+ if self.is_ohlc:
+ return np.median(in_view['close'])
+ else:
+ return np.median(in_view[self.name])
+
+ def view_range(self) -> tuple[int, int]:
+ '''
+ Return the start and stop x-indexes for the managed ``ViewBox``.
+
+ '''
+ vr = self.plot.viewRect()
+ return (
+ vr.left(),
+ vr.right(),
+ )
+
+ def bars_range(self) -> tuple[int, int, int, int]:
+ '''
+ Return a range tuple for the left-view, left-datum, right-datum
+ and right-view x-indices.
+
+ '''
+ l, start, datum_start, datum_stop, stop, r = self.datums_range()
+ return l, datum_start, datum_stop, r
+
+ def datums_range(
+ self,
+ view_range: None | tuple[float, float] = None,
+ index_field: str | None = None,
+ array: None | np.ndarray = None,
+
+ ) -> tuple[
+ int, int, int, int, int, int
+ ]:
+ '''
+ Return a range tuple for the datums present in view.
+
+ '''
+ l, r = view_range or self.view_range()
+
+ index_field: str = index_field or self.index_field
+ if index_field == 'index':
+ l, r = round(l), round(r)
+
+ if array is None:
+ array = self.shm.array
+
+ index = array[index_field]
+ first = floor(index[0])
+ last = ceil(index[-1])
+
+ # first and last datums in view determined by
+ # l / r view range.
+ leftmost = floor(l)
+ rightmost = ceil(r)
+
+ # invalid view state
+ if (
+ r < l
+ or l < 0
+ or r < 0
+ or (l > last and r > last)
+ ):
+ leftmost = first
+ rightmost = last
+ else:
+ rightmost = max(
+ min(last, rightmost),
+ first,
+ )
+
+ leftmost = min(
+ max(first, leftmost),
+ last,
+ rightmost - 1,
+ )
+
+ assert leftmost < rightmost
+
+ return (
+ l, # left x-in-view
+ first, # first datum
+ leftmost,
+ rightmost,
+ last, # last_datum
+ r, # right-x-in-view
+ )
+
+ def read(
+ self,
+ array_field: Optional[str] = None,
+ index_field: str | None = None,
+ profiler: None | Profiler = None,
+
+ ) -> tuple[
+ int, int, np.ndarray,
+ int, int, np.ndarray,
+ ]:
+ '''
+ Read the underlying shm array buffer and
+ return the data plus indexes for the first
+ and last
+ which has been written to.
+
+ '''
+ index_field: str = index_field or self.index_field
+ vr = l, r = self.view_range()
+
+ # readable data
+ array = self.shm.array
+
+ if profiler:
+ profiler('self.shm.array READ')
+
+ (
+ l,
+ ifirst,
+ lbar,
+ rbar,
+ ilast,
+ r,
+ ) = self.datums_range(
+ view_range=vr,
+ index_field=index_field,
+ array=array,
+ )
+
+ if profiler:
+ profiler('self.datums_range()')
+
+ abs_slc = slice(ifirst, ilast)
+
+ # TODO: support time slicing
+ if index_field == 'time':
+ read_slc = slice_from_time(
+ array,
+ start_t=lbar,
+ stop_t=rbar,
+ )
+
+ # TODO: maybe we should return this from the slicer call
+ # above?
+ in_view = array[read_slc]
+ if in_view.size:
+ abs_indx = in_view['index']
+ abs_slc = slice(
+ int(abs_indx[0]),
+ int(abs_indx[-1]),
+ )
+
+ if profiler:
+ profiler(
+ '`slice_from_time('
+ f'start_t={lbar}'
+ f'stop_t={rbar})'
+ )
+
+ # array-index slicing
+ # TODO: can we do time based indexing using arithmetic presuming
+ # a uniform time stamp step size?
+ else:
+ # get read-relative indices adjusting for master shm index.
+ lbar_i = max(l, ifirst) - ifirst
+ rbar_i = min(r, ilast) - ifirst
+
+ # NOTE: the slice here does NOT include the extra ``+ 1``
+ # BUT the ``in_view`` slice DOES..
+ read_slc = slice(lbar_i, rbar_i)
+ in_view = array[lbar_i: rbar_i + 1]
+ # in_view = array[lbar_i-1: rbar_i+1]
+
+ # XXX: same as ^
+ # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1]
+ if profiler:
+ profiler('index arithmetic for slicing')
+
+ if array_field:
+ array = array[array_field]
+
+ return (
+ # abs indices + full data set
+ abs_slc.start,
+ abs_slc.stop,
+ array,
+
+ # relative (read) indices + in view data
+ read_slc.start,
+ read_slc.stop,
+ in_view,
+ )
+
+ def update_graphics(
+ self,
+ render: bool = True,
+ array_key: str | None = None,
+
+ profiler: Profiler | None = None,
+ do_append: bool = True,
+
+ **kwargs,
+
+ ) -> tuple[
+ bool,
+ tuple[int, int],
+ pg.GraphicsObject,
+ ]:
+ '''
+ Read latest datums from shm and render to (incrementally)
+ render to graphics.
+
+ '''
+ profiler = Profiler(
+ msg=f'Viz.update_graphics() for {self.name}',
+ disabled=not pg_profile_enabled(),
+ ms_threshold=ms_slower_then,
+ # ms_threshold=4,
+ )
+ # shm read and slice to view
+ read = (
+ xfirst,
+ xlast,
+ src_array,
+ ivl,
+ ivr,
+ in_view,
+ ) = self.read(profiler=profiler)
+
+ profiler('read src shm data')
+
+ graphics = self.graphics
+
+ if (
+ not in_view.size
+ or not render
+ ):
+ # print(f'{self.name} not in view (exiting early)')
+ return (
+ False,
+ (ivl, ivr),
+ graphics,
+ )
+
+ should_redraw: bool = False
+ ds_allowed: bool = True # guard for m4 activation
+
+ # TODO: probably specialize ``Renderer`` types instead of
+ # these logic checks?
+ # - put these blocks into a `.load_renderer()` meth?
+ # - consider a OHLCRenderer, StepCurveRenderer, Renderer?
+ r = self._src_r
+ if isinstance(graphics, BarItems):
+ # XXX: special case where we change out graphics
+ # to a line after a certain uppx threshold.
+ (
+ graphics,
+ r,
+ should_redraw,
+ ds_allowed, # in line mode?
+ ) = render_baritems(
+ self,
+ graphics,
+ read,
+ profiler,
+ **kwargs,
+ )
+
+ elif not r:
+ if isinstance(graphics, StepCurve):
+
+ r = self._src_r = Renderer(
+ viz=self,
+ fmtr=StepCurveFmtr(
+ shm=self.shm,
+ viz=self,
+ ),
+ )
+
+ else:
+ r = self._src_r
+ if not r:
+ # just using for ``.diff()`` atm..
+ r = self._src_r = Renderer(
+ viz=self,
+ fmtr=IncrementalFormatter(
+ shm=self.shm,
+ viz=self,
+ ),
+ )
+
+ # ``Curve`` derivative case(s):
+ array_key = array_key or self.name
+
+ # ds update config
+ new_sample_rate: bool = False
+ should_ds: bool = r._in_ds
+ showing_src_data: bool = not r._in_ds
+
+ # downsampling incremental state checking
+ # check for and set std m4 downsample conditions
+ uppx = graphics.x_uppx()
+ uppx_diff = (uppx - self._last_uppx)
+ profiler(f'diffed uppx {uppx}')
+ if (
+ uppx > 1
+ and abs(uppx_diff) >= 1
+ and ds_allowed
+ ):
+ log.debug(
+ f'{array_key} sampler change: {self._last_uppx} -> {uppx}'
+ )
+ self._last_uppx = uppx
+
+ new_sample_rate = True
+ showing_src_data = False
+ should_ds = True
+ should_redraw = True
+
+ # "back to source" case:
+ # this more or less skips use of the m4 downsampler
+ # inside ``Renderer.render()`` which results in a path
+ # drawn verbatim to match the xy source data.
+ elif (
+ uppx <= 2
+ and self._in_ds
+ ):
+ # we should de-downsample back to our original
+ # source data so we clear our path data in prep
+ # to generate a new one from original source data.
+ new_sample_rate = True
+ should_ds = False
+ should_redraw = True
+ showing_src_data = True
+
+ # MAIN RENDER LOGIC:
+ # - determine in view data and redraw on range change
+ # - determine downsampling ops if needed
+ # - (incrementally) update ``QPainterPath``
+
+ out = r.render(
+ read,
+ array_key,
+ profiler,
+ uppx=uppx,
+
+ # TODO: better way to detect and pass this?
+ # if we want to eventually cache renderers for a given uppx
+ # we should probably use this as a key + state?
+ should_redraw=should_redraw,
+ new_sample_rate=new_sample_rate,
+ should_ds=should_ds,
+ showing_src_data=showing_src_data,
+
+ do_append=do_append,
+ )
+
+ if not out:
+ log.warning(f'{self.name} failed to render!?')
+ return (
+ False,
+ (ivl, ivr),
+ graphics,
+ )
+
+ path, reset_cache = out
+
+ # XXX: SUPER UGGGHHH... without this we get stale cache
+ # graphics that "smear" across the view horizontally
+ # when panning and the first datum is out of view..
+ reset_cache = False
+ if (
+ reset_cache
+ ):
+ # assign output paths to graphicis obj but
+ # after a coords-cache reset.
+ with graphics.reset_cache():
+ graphics.path = r.path
+ graphics.fast_path = r.fast_path
+
+ self.draw_last(
+ array_key=array_key,
+ last_read=read,
+ reset_cache=reset_cache,
+ )
+ else:
+ # assign output paths to graphicis obj
+ graphics.path = r.path
+ graphics.fast_path = r.fast_path
+
+ self.draw_last(
+ array_key=array_key,
+ last_read=read,
+ reset_cache=reset_cache,
+ )
+ # graphics.draw_last_datum(
+ # path,
+ # src_array,
+ # reset_cache,
+ # array_key,
+ # index_field=self.index_field,
+ # )
+ # TODO: does this actuallly help us in any way (prolly should
+ # look at the source / ask ogi). I think it avoid artifacts on
+ # wheel-scroll downsampling curve updates?
+ # TODO: is this ever better?
+ graphics.prepareGeometryChange()
+ profiler('.prepareGeometryChange()')
+
+ graphics.update()
+ profiler('.update()')
+
+ # track downsampled state
+ self._in_ds = r._in_ds
+
+ return (
+ True,
+ (ivl, ivr),
+ graphics,
+ )
+
+ def draw_last(
+ self,
+ array_key: str | None = None,
+ last_read: tuple | None = None,
+ reset_cache: bool = False,
+ only_last_uppx: bool = False,
+
+ ) -> None:
+
+ # shm read and slice to view
+ (
+ xfirst, xlast, src_array,
+ ivl, ivr, in_view,
+ ) = last_read or self.read()
+
+ array_key = array_key or self.name
+
+ gfx = self.graphics
+
+ # the renderer is downsampling we choose
+ # to always try and update a single (interpolating)
+ # line segment that spans and tries to display
+ # the last uppx's worth of datums.
+ # we only care about the last pixel's
+ # worth of data since that's all the screen
+ # can represent on the last column where
+ # the most recent datum is being drawn.
+ uppx = ceil(gfx.x_uppx())
+
+ if (
+ (self._in_ds or only_last_uppx)
+ and uppx > 0
+ ):
+ alt_renderer = self._alt_r
+ if alt_renderer:
+ renderer, gfx = alt_renderer
+ else:
+ renderer = self._src_r
+
+ fmtr = renderer.fmtr
+ x = fmtr.x_1d
+ y = fmtr.y_1d
+
+ iuppx = ceil(uppx)
+ if alt_renderer:
+ iuppx = ceil(uppx / fmtr.flat_index_ratio)
+
+ y = y[-iuppx:]
+ ymn, ymx = y.min(), y.max()
+ try:
+ x_start = x[-iuppx]
+ except IndexError:
+ # we're less then an x-px wide so just grab the start
+ # datum index.
+ x_start = x[0]
+
+ gfx._last_line = QLineF(
+ x_start, ymn,
+ x[-1], ymx,
+ )
+ # print(
+ # f'updating DS curve {self.name}@{time_step}s\n'
+ # f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}'
+ # )
+
+ else:
+ x, y = gfx.draw_last_datum(
+ gfx.path,
+ src_array,
+ reset_cache, # never reset path
+ array_key,
+ self.index_field,
+ )
+ # print(f'updating NOT DS curve {self.name}')
+
+ gfx.update()
+
+ def default_view(
+ self,
+ bars_from_y: int = int(616 * 3/8),
+ y_offset: int = 0, # in datums
+ do_ds: bool = True,
+
+ ) -> None:
+ '''
+ Set the plot's viewbox to a "default" startup setting where
+ we try to show the underlying data range sanely.
+
+ '''
+ shm: ShmArray = self.shm
+ array: np.ndarray = shm.array
+ view: ChartView = self.plot.vb
+ (
+ vl,
+ first_datum,
+ datum_start,
+ datum_stop,
+ last_datum,
+ vr,
+ ) = self.datums_range(array=array)
+
+ # invalid case: view is not ordered correctly
+ # return and expect caller to sort it out.
+ if (
+ vl > vr
+ ):
+ log.warning(
+ 'Skipping `.default_view()` viewbox not initialized..\n'
+ f'l -> r: {vl} -> {vr}\n'
+ f'datum_start -> datum_stop: {datum_start} -> {datum_stop}\n'
+ )
+ return
+
+ chartw: ChartPlotWidget = self.plot.getViewWidget()
+ index_field = self.index_field
+ step = self.index_step()
+
+ if index_field == 'time':
+ # transform l -> r view range values into
+ # data index domain to determine how view
+ # should be reset to better match data.
+ read_slc = slice_from_time(
+ array,
+ start_t=vl,
+ stop_t=vr,
+ step=step,
+ )
+ else:
+ read_slc = slice(0, datum_stop - datum_start + 1)
+
+ index_iv = array[index_field][read_slc]
+ uppx: float = self.graphics.x_uppx() or 1
+
+ # l->r distance in scene units, no larger then data span
+ data_diff = last_datum - first_datum
+ rl_diff = vr - vl
+ rescale_to_data: bool = False
+ # new_uppx: float = 1
+
+ if rl_diff > data_diff:
+ rescale_to_data = True
+ rl_diff = data_diff
+ new_uppx: float = data_diff / self.px_width()
+
+ # orient by offset from the y-axis including
+ # space to compensate for the L1 labels.
+ if not y_offset:
+ _, l1_offset = chartw.pre_l1_xs()
+
+ offset = l1_offset
+
+ if (
+ rescale_to_data
+ ):
+ offset = (offset / uppx) * new_uppx
+
+ else:
+ offset = (y_offset * step) + uppx*step
+
+ # align right side of view to the rightmost datum + the selected
+ # offset from above.
+ r_reset = (self.graphics.x_last() or last_datum) + offset
+
+ # no data is in view so check for the only 2 sane cases:
+ # - entire view is LEFT of data
+ # - entire view is RIGHT of data
+ if index_iv.size == 0:
+ log.warning(f'No data in view for {vl} -> {vr}')
+
+ # 2 cases either the view is to the left or right of the
+ # data set.
+ if (
+ vl <= first_datum
+ and vr <= first_datum
+ ):
+ l_reset = first_datum
+
+ elif (
+ vl >= last_datum
+ and vr >= last_datum
+ ):
+ l_reset = r_reset - rl_diff
+
+ else:
+ log.warning(f'Unknown view state {vl} -> {vr}')
+ return
+ # raise RuntimeError(f'Unknown view state {vl} -> {vr}')
+
+ else:
+ # maintain the l->r view distance
+ l_reset = r_reset - rl_diff
+
+ # remove any custom user yrange setttings
+ if chartw._static_yrange == 'axis':
+ chartw._static_yrange = None
+
+ view.setXRange(
+ min=l_reset,
+ max=r_reset,
+ padding=0,
+ )
+
+ if do_ds:
+ view.interact_graphics_cycle()
+ # view._set_yrange(viz=self)
+
+ def incr_info(
+ self,
+ ds: DisplayState,
+ update_uppx: float = 16,
+ is_1m: bool = False,
+
+ ) -> tuple:
+ '''
+ Return a slew of graphics related data-flow metrics to do with
+ incrementally updating a data view.
+
+ Output info includes,
+ ----------------------
+
+ uppx: float
+ x-domain units-per-pixel.
+
+ liv: bool
+ telling if the "last datum" is in vie"last datum" is in
+ view.
+
+ do_px_step: bool
+ recent data append(s) are enough that the next physical
+ pixel-column should be used for drawing.
+
+ i_diff_t: float
+ the difference between the last globally recorded time stamp
+ aand the current one.
+
+ append_diff: int
+ diff between last recorded "append index" (the index at whic
+ `do_px_step` was last returned `True`) and the current index.
+
+ do_rt_update: bool
+ `True` only when the uppx is less then some threshold
+ defined by `update_uppx`.
+
+ should_tread: bool
+ determines the first step, globally across all callers, that
+ the a set of data views should be "treaded", shifted in the
+ x-domain such that the last datum in view is always in the
+ same spot in non-view/scene (aka GUI coord) terms.
+
+
+ '''
+ # get most recent right datum index in-view
+ l, start, datum_start, datum_stop, stop, r = self.datums_range()
+ lasts = self.shm.array[-1]
+ i_step = lasts['index'] # last index-specific step.
+ i_step_t = lasts['time'] # last time step.
+
+ # fqsn = self.flume.symbol.fqsn
+
+ # check if "last (is) in view" -> is a real-time update necessary?
+ if self.index_field == 'index':
+ liv = (r >= i_step)
+ else:
+ liv = (r >= i_step_t)
+
+ # compute the first available graphic obj's x-units-per-pixel
+ # TODO: make this not loop through all vizs each time!
+ uppx = self.plot.vb.x_uppx()
+
+ # NOTE: this used to be implemented in a dedicated
+ # "increment task": ``check_for_new_bars()`` but it doesn't
+ # make sense to do a whole task switch when we can just do
+ # this simple index-diff and all the fsp sub-curve graphics
+ # are diffed on each draw cycle anyway; so updates to the
+ # "curve" length is already automatic.
+ globalz = ds.globalz
+ varz = ds.hist_vars if is_1m else ds.vars
+
+ last_key = 'i_last_slow_t' if is_1m else 'i_last_t'
+ glast = globalz[last_key]
+
+ # calc datums diff since last global increment
+ i_diff_t: float = i_step_t - glast
+
+ # when the current step is now greater then the last we have
+ # read from the display state globals, we presume that the
+ # underlying source shm buffer has added a new sample and thus
+ # we should increment the global view a step (i.e. tread the
+ # view in place to keep the current datum at the same spot on
+ # screen).
+ should_tread: bool = False
+ if i_diff_t > 0:
+ globalz[last_key] = i_step_t
+ should_tread = True
+
+ # update the "last datum" (aka extending the vizs graphic with
+ # new data) only if the number of unit steps is >= the number of
+ # such unit steps per pixel (aka uppx). Iow, if the zoom level
+ # is such that a datum(s) update to graphics wouldn't span
+ # to a new pixel, we don't update yet.
+ i_last_append = varz['i_last_append']
+ append_diff: int = i_step - i_last_append
+
+ do_px_step = (append_diff * self.index_step()) >= uppx
+ do_rt_update = (uppx < update_uppx)
+
+ if (
+ do_px_step
+ ):
+ varz['i_last_append'] = i_step
+
+ # print(
+ # f'DOING APPEND => {fqsn}\n'
+ # f'i_step: {i_step}\n'
+ # f'i_step_t: {i_step_t}\n'
+ # f'glast: {glast}\n'
+ # f'last_append: {i_last_append}\n'
+ # f'r: {r}\n'
+ # '-----------------------------\n'
+ # f'uppx: {uppx}\n'
+ # f'liv: {liv}\n'
+ # f'do_px_step: {do_px_step}\n'
+ # f'i_diff_t: {i_diff_t}\n'
+ # f'do_rt_update: {do_rt_update}\n'
+ # f'append_diff: {append_diff}\n'
+ # f'should_tread: {should_tread}\n'
+ # )
+
+ varz['i_last'] = i_step
+
+ # TODO: pack this into a struct?
+ return (
+ uppx,
+ liv,
+ do_px_step,
+ i_diff_t,
+ append_diff,
+ do_rt_update,
+ should_tread,
+ )
+
+ def px_width(self) -> float:
+ '''
+ Return the width of the view box containing
+ this graphic in pixel units.
+
+ '''
+ vb = self.plot.vb
+ if not vb:
+ return 0
+
+ vl, vr = self.view_range()
+
+ return vb.mapViewToDevice(
+ QLineF(
+ vl, 0,
+ vr, 0,
+ )
+ ).length()
diff --git a/piker/ui/_display.py b/piker/ui/_display.py
index c7ed9299d..1123d5164 100644
--- a/piker/ui/_display.py
+++ b/piker/ui/_display.py
@@ -22,8 +22,13 @@
'''
from functools import partial
+import itertools
+from math import floor
import time
-from typing import Optional, Any, Callable
+from typing import (
+ Optional,
+ Any,
+)
import tractor
import trio
@@ -36,12 +41,20 @@
Flume,
)
from ..data.types import Struct
+from ..data._sharedmem import (
+ ShmArray,
+)
+from ..data._sampling import (
+ _tick_groups,
+ open_sample_stream,
+)
from ._axes import YAxisLabel
from ._chart import (
ChartPlotWidget,
LinkedSplits,
GodWidget,
)
+from ._dataviz import Viz
from ._l1 import L1Labels
from ._style import hcolor
from ._fsp import (
@@ -50,14 +63,12 @@
has_vlm,
open_vlm_displays,
)
-from ..data._sharedmem import (
- ShmArray,
-)
-from ..data._source import tf_in_1s
from ._forms import (
FieldsForm,
mk_order_pane_layout,
)
+from . import _pg_overrides as pgo
+# from ..data._source import tf_in_1s
from .order_mode import (
open_order_mode,
OrderMode,
@@ -71,27 +82,17 @@
log = get_logger(__name__)
-# TODO: load this from a config.toml!
-_quote_throttle_rate: int = 16 # Hz
-
-
-# a working tick-type-classes template
-_tick_groups = {
- 'clears': {'trade', 'utrade', 'last'},
- 'bids': {'bid', 'bsize'},
- 'asks': {'ask', 'asize'},
-}
-
-# TODO: delegate this to each `Flow.maxmin()` which includes
+# TODO: delegate this to each `Viz.maxmin()` which includes
# caching and further we should implement the following stream based
# approach, likely with ``numba``:
# https://arxiv.org/abs/cs/0610046
# https://github.com/lemire/pythonmaxmin
-def chart_maxmin(
- chart: ChartPlotWidget,
- ohlcv_shm: ShmArray,
- vlm_chart: Optional[ChartPlotWidget] = None,
+def multi_maxmin(
+ i_read_range: tuple[int, int] | None,
+ fast_viz: Viz,
+ vlm_viz: Viz | None = None,
+ profiler: Profiler = None,
) -> tuple[
@@ -105,29 +106,51 @@ def chart_maxmin(
Compute max and min datums "in view" for range limits.
'''
- last_bars_range = chart.bars_range()
- out = chart.maxmin()
-
+ out = fast_viz.maxmin(
+ i_read_range=i_read_range,
+ )
if out is None:
- return (last_bars_range, 0, 0, 0)
+ # log.warning(f'No yrange provided for {name}!?')
+ return (0, 0, 0)
- mn, mx = out
+ (
+ ixrng,
+ read_slc,
+ yrange,
+ ) = out
- mx_vlm_in_view = 0
+ if profiler:
+ profiler(f'fast_viz.maxmin({read_slc})')
+
+ mn, mx = yrange
# TODO: we need to NOT call this to avoid a manual
# np.max/min trigger and especially on the vlm_chart
- # flows which aren't shown.. like vlm?
- if vlm_chart:
- out = vlm_chart.maxmin()
+ # vizs which aren't shown.. like vlm?
+ mx_vlm_in_view = 0
+ if vlm_viz:
+ out = vlm_viz.maxmin(
+ i_read_range=i_read_range,
+ )
if out:
- _, mx_vlm_in_view = out
+ (
+ ixrng,
+ read_slc,
+ mxmn,
+ ) = out
+ mx_vlm_in_view = mxmn[1]
+
+ if profiler:
+ profiler(f'vlm_viz.maxmin({read_slc})')
return (
- last_bars_range,
mx,
- max(mn, 0), # presuming price can't be negative?
- mx_vlm_in_view,
+
+ # enforcing price can't be negative?
+ # TODO: do we even need this?
+ max(mn, 0),
+
+ mx_vlm_in_view, # vlm max
)
@@ -136,21 +159,25 @@ class DisplayState(Struct):
Chart-local real-time graphics state container.
'''
+ fqsn: str
godwidget: GodWidget
quotes: dict[str, Any]
- maxmin: Callable
- ohlcv: ShmArray
- hist_ohlcv: ShmArray
+ flume: Flume
- # high level chart handles
+ # high level chart handles and underlying ``Viz``
chart: ChartPlotWidget
+ viz: Viz
+ hist_chart: ChartPlotWidget
+ hist_viz: Viz
# axis labels
l1: L1Labels
last_price_sticky: YAxisLabel
hist_last_price_sticky: YAxisLabel
+ vlm_viz: Viz
+
# misc state tracking
vars: dict[str, Any] = {
'tick_margin': 0,
@@ -160,78 +187,98 @@ class DisplayState(Struct):
'last_mx': 0,
'last_mn': 0,
}
+ hist_vars: dict[str, Any] = {
+ 'tick_margin': 0,
+ 'i_last': 0,
+ 'i_last_append': 0,
+ 'last_mx_vlm': 0,
+ 'last_mx': 0,
+ 'last_mn': 0,
+ }
+
+ globalz: None | dict[str, Any] = None
vlm_chart: Optional[ChartPlotWidget] = None
vlm_sticky: Optional[YAxisLabel] = None
wap_in_history: bool = False
- def incr_info(
- self,
- chart: Optional[ChartPlotWidget] = None,
- shm: Optional[ShmArray] = None,
- state: Optional[dict] = None, # pass in a copy if you don't
-
- update_state: bool = True,
- update_uppx: float = 16,
-
- ) -> tuple:
-
- shm = shm or self.ohlcv
- chart = chart or self.chart
- state = state or self.vars
-
- if not update_state:
- state = state.copy()
-
- # compute the first available graphic's x-units-per-pixel
- uppx = chart.view.x_uppx()
-
- # NOTE: this used to be implemented in a dedicated
- # "increment task": ``check_for_new_bars()`` but it doesn't
- # make sense to do a whole task switch when we can just do
- # this simple index-diff and all the fsp sub-curve graphics
- # are diffed on each draw cycle anyway; so updates to the
- # "curve" length is already automatic.
-
- # increment the view position by the sample offset.
- i_step = shm.index
- i_diff = i_step - state['i_last']
- state['i_last'] = i_step
-
- append_diff = i_step - state['i_last_append']
-
- # update the "last datum" (aka extending the flow graphic with
- # new data) only if the number of unit steps is >= the number of
- # such unit steps per pixel (aka uppx). Iow, if the zoom level
- # is such that a datum(s) update to graphics wouldn't span
- # to a new pixel, we don't update yet.
- do_append = (append_diff >= uppx)
- if do_append:
- state['i_last_append'] = i_step
-
- do_rt_update = uppx < update_uppx
-
- _, _, _, r = chart.bars_range()
- liv = r >= i_step
-
- # TODO: pack this into a struct
- return (
- uppx,
- liv,
- do_append,
- i_diff,
- append_diff,
- do_rt_update,
- )
+
+async def increment_history_view(
+ ds: DisplayState,
+):
+ hist_chart = ds.hist_chart
+ hist_viz = ds.hist_viz
+ assert 'hist' in hist_viz.shm.token['shm_name']
+
+ # TODO: seems this is more reliable at keeping the slow
+ # chart incremented in view more correctly?
+ # - It might make sense to just inline this logic with the
+ # main display task? => it's a tradeoff of slower task
+ # wakeups/ctx switches verus logic checks (as normal)
+ # - we need increment logic that only does the view shift
+ # call when the uppx permits/needs it
+ async with open_sample_stream(1.) as istream:
+ async for msg in istream:
+
+ profiler = Profiler(
+ msg=f'History chart cycle for: `{ds.fqsn}`',
+ delayed=True,
+ disabled=not pg_profile_enabled(),
+ ms_threshold=ms_slower_then,
+ # ms_threshold=4,
+ )
+
+ # l3 = ds.viz.shm.array[-3:]
+ # print(
+ # f'fast step for {ds.flume.symbol.fqsn}:\n'
+ # f'{list(l3["time"])}\n'
+ # f'{l3}\n'
+ # )
+ # check if slow chart needs an x-domain shift and/or
+ # y-range resize.
+ (
+ uppx,
+ liv,
+ do_px_step,
+ i_diff_t,
+ append_diff,
+ do_rt_update,
+ should_tread,
+
+ ) = hist_viz.incr_info(
+ ds=ds,
+ is_1m=True,
+ )
+
+ if do_px_step:
+ hist_viz.update_graphics()
+ profiler('`hist Viz.update_graphics()` call')
+
+ if liv:
+ hist_viz.plot.vb.interact_graphics_cycle(
+ do_linked_charts=False,
+ # do_overlay_scaling=False,
+ )
+ profiler('hist chart yrange view')
+
+ # check if tread-in-place view x-shift is needed
+ if should_tread:
+ # ensure path graphics append is shown on treads since
+ # the main rt loop does not call this.
+ hist_chart.increment_view(datums=append_diff)
+ profiler('hist tread view')
+
+ profiler.finish()
async def graphics_update_loop(
nurse: trio.Nursery,
godwidget: GodWidget,
- flume: Flume,
+ feed: Feed,
+ pis: dict[str, list[pgo.PlotItem, pgo.PlotItem]] = {},
wap_in_history: bool = False,
- vlm_chart: Optional[ChartPlotWidget] = None,
+ vlm_charts: dict[str, ChartPlotWidget] = {},
) -> None:
'''
@@ -255,560 +302,610 @@ async def graphics_update_loop(
fast_chart = linked.chart
hist_chart = godwidget.hist_linked.chart
+ assert hist_chart
+
+ # per-viz-set global last index tracking for global chart
+ # view UX incrementing; these values are singleton
+ # per-multichart-set such that automatic x-domain shifts are only
+ # done once per time step update.
+ globalz = {
+ 'i_last_t': 0, # multiview-global fast (1s) step index
+ 'i_last_slow_t': 0, # multiview-global slow (1m) step index
+ }
- ohlcv = flume.rt_shm
- hist_ohlcv = flume.hist_shm
+ dss: dict[str, DisplayState] = {}
- # update last price sticky
- last_price_sticky = fast_chart._ysticks[fast_chart.name]
- last_price_sticky.update_from_data(
- *ohlcv.array[-1][['index', 'close']]
- )
+ for fqsn, flume in feed.flumes.items():
+ ohlcv = flume.rt_shm
+ hist_ohlcv = flume.hist_shm
+ symbol = flume.symbol
+ fqsn = symbol.fqsn
- hist_last_price_sticky = hist_chart._ysticks[hist_chart.name]
- hist_last_price_sticky.update_from_data(
- *hist_ohlcv.array[-1][['index', 'close']]
- )
+ # update last price sticky
+ fast_viz = fast_chart._vizs[fqsn]
+ index_field = fast_viz.index_field
+ fast_pi = fast_viz.plot
+ last_price_sticky = fast_pi.getAxis('right')._stickies[fqsn]
+ last_price_sticky.update_from_data(
+ *ohlcv.array[-1][[
+ index_field,
+ 'close',
+ ]]
+ )
+ last_price_sticky.show()
+
+ hist_viz = hist_chart._vizs[fqsn]
+ slow_pi = hist_viz.plot
+ hist_last_price_sticky = slow_pi.getAxis('right')._stickies[fqsn]
+ hist_last_price_sticky.update_from_data(
+ *hist_ohlcv.array[-1][[
+ index_field,
+ 'close',
+ ]]
+ )
- maxmin = partial(
- chart_maxmin,
- fast_chart,
- ohlcv,
- vlm_chart,
- )
- last_bars_range: tuple[float, float]
- (
- last_bars_range,
- last_mx,
- last_mn,
- last_mx_vlm,
- ) = maxmin()
+ vlm_chart = vlm_charts[fqsn]
+ vlm_viz = vlm_chart._vizs.get('volume') if vlm_chart else None
- last, volume = ohlcv.array[-1][['close', 'volume']]
+ (
+ last_mx,
+ last_mn,
+ last_mx_vlm,
+ ) = multi_maxmin(
+ None,
+ fast_viz,
+ vlm_viz,
+ )
- symbol = fast_chart.linked.symbol
+ last, volume = ohlcv.array[-1][['close', 'volume']]
- l1 = L1Labels(
- fast_chart,
- # determine precision/decimal lengths
- digits=symbol.tick_size_digits,
- size_digits=symbol.lot_size_digits,
- )
- fast_chart._l1_labels = l1
-
- # TODO:
- # - in theory we should be able to read buffer data faster
- # then msgs arrive.. needs some tinkering and testing
-
- # - if trade volume jumps above / below prior L1 price
- # levels this might be dark volume we need to
- # present differently -> likely dark vlm
-
- tick_size = fast_chart.linked.symbol.tick_size
- tick_margin = 3 * tick_size
-
- fast_chart.show()
- last_quote = time.time()
- i_last = ohlcv.index
-
- ds = linked.display_state = DisplayState(**{
- 'godwidget': godwidget,
- 'quotes': {},
- 'maxmin': maxmin,
- 'ohlcv': ohlcv,
- 'hist_ohlcv': hist_ohlcv,
- 'chart': fast_chart,
- 'last_price_sticky': last_price_sticky,
- 'hist_last_price_sticky': hist_last_price_sticky,
- 'l1': l1,
-
- 'vars': {
- 'tick_margin': tick_margin,
- 'i_last': i_last,
- 'i_last_append': i_last,
- 'last_mx_vlm': last_mx_vlm,
- 'last_mx': last_mx,
- 'last_mn': last_mn,
- }
- })
+ symbol = flume.symbol
- if vlm_chart:
- vlm_sticky = vlm_chart._ysticks['volume']
- ds.vlm_chart = vlm_chart
- ds.vlm_sticky = vlm_sticky
-
- fast_chart.default_view()
-
- # TODO: probably factor this into some kinda `DisplayState`
- # API that can be reused at least in terms of pulling view
- # params (eg ``.bars_range()``).
- async def increment_history_view():
- i_last = hist_ohlcv.index
- state = ds.vars.copy() | {
- 'i_last_append': i_last,
- 'i_last': i_last,
- }
- _, hist_step_size_s, _ = flume.get_ds_info()
-
- async with flume.index_stream(
- # int(hist_step_size_s)
- # TODO: seems this is more reliable at keeping the slow
- # chart incremented in view more correctly?
- # - It might make sense to just inline this logic with the
- # main display task? => it's a tradeoff of slower task
- # wakeups/ctx switches verus logic checks (as normal)
- # - we need increment logic that only does the view shift
- # call when the uppx permits/needs it
- int(1),
- ) as istream:
- async for msg in istream:
-
- # check if slow chart needs an x-domain shift and/or
- # y-range resize.
- (
- uppx,
- liv,
- do_append,
- i_diff,
- append_diff,
- do_rt_update,
- ) = ds.incr_info(
- chart=hist_chart,
- shm=ds.hist_ohlcv,
- state=state,
- # update_state=False,
- )
- # print(
- # f'liv: {liv}\n'
- # f'do_append: {do_append}\n'
- # f'append_diff: {append_diff}\n'
- # )
+ l1 = L1Labels(
+ fast_pi,
+ # determine precision/decimal lengths
+ digits=symbol.tick_size_digits,
+ size_digits=symbol.lot_size_digits,
+ )
- if (
- do_append
- and liv
- ):
- hist_chart.increment_view(steps=i_diff)
- hist_chart.view._set_yrange(yrange=hist_chart.maxmin())
+ # TODO:
+ # - in theory we should be able to read buffer data faster
+ # then msgs arrive.. needs some tinkering and testing
- nurse.start_soon(increment_history_view)
+ # - if trade volume jumps above / below prior L1 price
+ # levels this might be dark volume we need to
+ # present differently -> likely dark vlm
- # main real-time quotes update loop
- stream: tractor.MsgStream = flume.stream
- async for quotes in stream:
+ tick_size = symbol.tick_size
+ tick_margin = 3 * tick_size
- ds.quotes = quotes
- quote_period = time.time() - last_quote
- quote_rate = round(
- 1/quote_period, 1) if quote_period > 0 else float('inf')
- if (
- quote_period <= 1/_quote_throttle_rate
+ fast_chart.show()
+ last_quote_s = time.time()
- # in the absolute worst case we shouldn't see more then
- # twice the expected throttle rate right!?
- # and quote_rate >= _quote_throttle_rate * 2
- and quote_rate >= display_rate
- ):
- log.warning(f'High quote rate {symbol.key}: {quote_rate}')
+ dss[fqsn] = ds = linked.display_state = DisplayState(**{
+ 'fqsn': fqsn,
+ 'godwidget': godwidget,
+ 'quotes': {},
+ # 'maxmin': maxmin,
+
+ 'flume': flume,
+
+ 'chart': fast_chart,
+ 'viz': fast_viz,
+ 'last_price_sticky': last_price_sticky,
+
+ 'hist_chart': hist_chart,
+ 'hist_viz': hist_viz,
+ 'hist_last_price_sticky': hist_last_price_sticky,
+
+ 'vlm_viz': vlm_viz,
+
+ 'l1': l1,
+
+ 'vars': {
+ 'tick_margin': tick_margin,
+ 'i_last': 0,
+ 'i_last_append': 0,
+ 'last_mx_vlm': last_mx_vlm,
+ 'last_mx': last_mx,
+ 'last_mn': last_mn,
+ },
+ 'globalz': globalz,
+ })
+
+ if vlm_chart:
+ vlm_pi = vlm_viz.plot
+ vlm_sticky = vlm_pi.getAxis('right')._stickies['volume']
+ ds.vlm_chart = vlm_chart
+ ds.vlm_sticky = vlm_sticky
+
+ fast_chart.default_view()
+
+ # ds.hist_vars.update({
+ # 'i_last_append': 0,
+ # 'i_last': 0,
+ # })
+
+ nurse.start_soon(
+ increment_history_view,
+ ds,
+ )
+
+ if ds.hist_vars['i_last'] < ds.hist_vars['i_last_append']:
+ breakpoint()
+
+ # main real-time quotes update loop
+ stream: tractor.MsgStream
+ async with feed.open_multi_stream() as stream:
+ assert stream
+ async for quotes in stream:
+ quote_period = time.time() - last_quote_s
+ quote_rate = round(
+ 1/quote_period, 1) if quote_period > 0 else float('inf')
+ if (
+ quote_period <= 1/_quote_throttle_rate
- last_quote = time.time()
+ # in the absolute worst case we shouldn't see more then
+ # twice the expected throttle rate right!?
+ # and quote_rate >= _quote_throttle_rate * 2
+ and quote_rate >= display_rate
+ ):
+ pass
+ # log.warning(f'High quote rate {symbol.key}: {quote_rate}')
- # chart isn't active/shown so skip render cycle and pause feed(s)
- if fast_chart.linked.isHidden():
- # print('skipping update')
- fast_chart.pause_all_feeds()
- continue
+ last_quote_s = time.time()
- # ic = fast_chart.view._ic
- # if ic:
- # fast_chart.pause_all_feeds()
- # await ic.wait()
- # fast_chart.resume_all_feeds()
+ for fqsn, quote in quotes.items():
+ ds = dss[fqsn]
+ ds.quotes = quote
- # sync call to update all graphics/UX components.
- graphics_update_cycle(ds)
+ rt_pi, hist_pi = pis[fqsn]
+
+ # chart isn't active/shown so skip render cycle and
+ # pause feed(s)
+ if (
+ fast_chart.linked.isHidden()
+ or not rt_pi.isVisible()
+ ):
+ print(f'{fqsn} skipping update for HIDDEN CHART')
+ fast_chart.pause_all_feeds()
+ continue
+
+ ic = fast_chart.view._ic
+ if ic:
+ fast_chart.pause_all_feeds()
+ print(f'{fqsn} PAUSING DURING INTERACTION')
+ await ic.wait()
+ fast_chart.resume_all_feeds()
+
+ # sync call to update all graphics/UX components.
+ graphics_update_cycle(
+ ds,
+ quote,
+ )
def graphics_update_cycle(
ds: DisplayState,
+ quote: dict,
+
wap_in_history: bool = False,
trigger_all: bool = False, # flag used by prepend history updates
prepend_update_index: Optional[int] = None,
) -> None:
- # TODO: eventually optimize this whole graphics stack with ``numba``
- # hopefully XD
-
- chart = ds.chart
- # TODO: just pass this as a direct ref to avoid so many attr accesses?
- hist_chart = ds.godwidget.hist_linked.chart
profiler = Profiler(
- msg=f'Graphics loop cycle for: `{chart.name}`',
- delayed=True,
+ msg=f'Graphics loop cycle for: `{ds.fqsn}`',
disabled=not pg_profile_enabled(),
- # disabled=True,
ms_threshold=ms_slower_then,
-
- # ms_threshold=1/12 * 1e3,
+ delayed=True,
+ # ms_threshold=4,
)
- # unpack multi-referenced components
+ # TODO: SPEEDing this all up..
+ # - optimize this whole graphics stack with ``numba`` hopefully
+ # or at least a little `mypyc` B)
+ # - pass more direct refs as input to avoid so many attr accesses?
+ # - use a streaming minmax algo and drop the use of the
+ # state-tracking ``multi_maxmin()`` routine from above?
+
+ fqsn = ds.fqsn
+ chart = ds.chart
vlm_chart = ds.vlm_chart
- # rt "HFT" chart
+ varz = ds.vars
l1 = ds.l1
- ohlcv = ds.ohlcv
+ flume = ds.flume
+ ohlcv = flume.rt_shm
array = ohlcv.array
- vars = ds.vars
- tick_margin = vars['tick_margin']
+ hist_viz = ds.hist_viz
+ main_viz = ds.viz
+ index_field = main_viz.index_field
+
+ tick_margin = varz['tick_margin']
+
+ (
+ uppx,
+ liv,
+ do_px_step,
+ i_diff_t,
+ append_diff,
+ do_rt_update,
+ should_tread,
+ ) = main_viz.incr_info(ds=ds)
+ profiler('`.incr_info()`')
+
+ # TODO: we should only run mxmn when we know
+ # an update is due via ``do_px_step`` above.
+
+ # TODO: eventually we want to separate out the dark vlm and show
+ # them as an additional graphic.
+ clear_types = _tick_groups['clears']
+
+ mx = varz['last_mx']
+ mn = varz['last_mn']
+ mx_vlm_in_view = varz['last_mx_vlm']
+
+ # update ohlc sampled price bars
+ if (
+ # do_rt_update
+ # or do_px_step
+ (liv and do_px_step)
+ or trigger_all
+ ):
+ _, i_read_range, _ = main_viz.update_graphics()
+ profiler('`Viz.update_graphics()` call')
- for sym, quote in ds.quotes.items():
- (
- uppx,
- liv,
- do_append,
- i_diff,
- append_diff,
- do_rt_update,
- ) = ds.incr_info()
-
- # TODO: we should only run mxmn when we know
- # an update is due via ``do_append`` above.
(
- brange,
mx_in_view,
mn_in_view,
mx_vlm_in_view,
- ) = ds.maxmin()
- l, lbar, rbar, r = brange
+ ) = multi_maxmin(
+ i_read_range,
+ main_viz,
+ ds.vlm_viz,
+ profiler,
+ )
+
mx = mx_in_view + tick_margin
mn = mn_in_view - tick_margin
-
- profiler('`ds.maxmin()` call')
-
- if (
- prepend_update_index is not None
- and lbar > prepend_update_index
- ):
- # on a history update (usually from the FSP subsys)
- # if the segment of history that is being prepended
- # isn't in view there is no reason to do a graphics
- # update.
- log.debug('Skipping prepend graphics cycle: frame not in view')
- return
+ profiler(f'{fqsn} `multi_maxmin()` call')
# don't real-time "shift" the curve to the
# left unless we get one of the following:
if (
- (do_append and liv)
+ should_tread
or trigger_all
):
- chart.increment_view(steps=i_diff)
- chart.view._set_yrange(yrange=(mn, mx))
+ chart.increment_view(datums=append_diff)
- if vlm_chart:
- vlm_chart.increment_view(steps=i_diff)
+ # NOTE: since vlm and ohlc charts are axis linked now we don't
+ # need the double increment request?
+ # if vlm_chart:
+ # vlm_chart.increment_view(datums=append_diff)
profiler('view incremented')
- ticks_frame = quote.get('ticks', ())
+ # iterate frames of ticks-by-type such that we only update graphics
+ # using the last update per type where possible.
+ ticks_by_type = quote.get('tbt', {})
+ for typ, ticks in ticks_by_type.items():
- frames_by_type: dict[str, dict] = {}
- lasts = {}
+ # NOTE: ticks are `.append()`-ed to the `ticks_by_type: dict` by the
+ # `._sampling.uniform_rate_send()` loop
+ tick = ticks[-1] # get most recent value
- # build tick-type "frames" of tick sequences since
- # likely the tick arrival rate is higher then our
- # (throttled) quote stream rate.
- for tick in ticks_frame:
- price = tick.get('price')
- ticktype = tick.get('type')
+ price = tick.get('price')
+ size = tick.get('size')
- if ticktype == 'n/a' or price == -1:
- # okkk..
- continue
+ # compute max and min prices (including bid/ask) from
+ # tick frames to determine the y-range for chart
+ # auto-scaling.
+ if (
+ liv
+
+ # TODO: make sure IB doesn't send ``-1``!
+ and price > 0
+ ):
+ mx = max(price + tick_margin, mx)
+ mn = min(price - tick_margin, mn)
+
+ # clearing price update:
+ # generally, we only want to update grahpics from the *last*
+ # tick event once - thus showing the most recent state.
+ if typ in clear_types:
+
+ # update price sticky(s)
+ end_ic = array[-1][[
+ index_field,
+ 'close',
+ ]]
+ ds.last_price_sticky.update_from_data(*end_ic)
+ ds.hist_last_price_sticky.update_from_data(*end_ic)
+
+ # update vwap overlay line
+ # if wap_in_history:
+ # chart.get_viz('bar_wap').update_graphics()
+
+ # update OHLC chart last bars
+ # TODO: fix the only last uppx stuff....
+ main_viz.draw_last() # only_last_uppx=True)
+ hist_viz.draw_last() # only_last_uppx=True)
+
+ # L1 book label-line updates
+ if typ in ('last',):
+
+ label = {
+ l1.ask_label.fields['level']: l1.ask_label,
+ l1.bid_label.fields['level']: l1.bid_label,
+ }.get(price)
+
+ if (
+ label is not None
+ and liv
+ ):
+ label.update_fields(
+ {'level': price, 'size': size}
+ )
+
+ # TODO: on trades should we be knocking down
+ # the relevant L1 queue manually ourselves?
+ # label.size -= size
+
+ # NOTE: right now we always update the y-axis labels
+ # despite the last datum not being in view. Ideally
+ # we have a guard for this when we detect that the range
+ # of those values is not in view and then we disable these
+ # blocks.
+ elif (
+ typ in _tick_groups['asks']
+ ):
+ l1.ask_label.update_fields({'level': price, 'size': size})
- # keys are entered in olded-event-inserted-first order
- # since we iterate ``ticks_frame`` in standard order
- # above. in other words the order of the keys is the order
- # of tick events by type from the provider feed.
- frames_by_type.setdefault(ticktype, []).append(tick)
-
- # overwrites so the last tick per type is the entry
- lasts[ticktype] = tick
-
- # from pprint import pformat
- # frame_counts = {
- # typ: len(frame) for typ, frame in frames_by_type.items()
- # }
- # print(f'{pformat(frame_counts)}')
- # print(f'framed: {pformat(frames_by_type)}')
- # print(f'lasts: {pformat(lasts)}')
-
- # TODO: eventually we want to separate out the utrade (aka
- # dark vlm prices) here and show them as an additional
- # graphic.
- clear_types = _tick_groups['clears']
-
- # XXX: if we wanted to iterate in "latest" (i.e. most
- # current) tick first order as an optimization where we only
- # update from the last tick from each type class.
- # last_clear_updated: bool = False
-
- # update ohlc sampled price bars
+ elif (
+ typ in _tick_groups['bids']
+ ):
+ l1.bid_label.update_fields({'level': price, 'size': size})
+
+ profiler('L1 labels updates')
+
+ # Y-autoranging: adjust y-axis limits based on state tracking
+ # of previous "last" L1 values which are in view.
+ lmx = varz['last_mx']
+ lmn = varz['last_mn']
+ mx_diff = mx - lmx
+ mn_diff = mn - lmn
+
+ if (
+ mx_diff
+ or mn_diff
+ ):
+ # complain about out-of-range outliers which can show up
+ # in certain annoying feeds (like ib)..
if (
- do_rt_update
- or do_append
- or trigger_all
+ abs(mx_diff) > .25 * lmx
+ or
+ abs(mn_diff) > .25 * lmn
):
- chart.update_graphics_from_flow(
- chart.name,
- do_append=do_append,
- )
- hist_chart.update_graphics_from_flow(
- chart.name,
- do_append=do_append,
+ log.error(
+ f'WTF MN/MX IS WAY OFF:\n'
+ f'lmn: {lmn}\n'
+ f'mn: {mn}\n'
+ f'lmx: {lmx}\n'
+ f'mx: {mx}\n'
+ f'mx_diff: {mx_diff}\n'
+ f'mn_diff: {mn_diff}\n'
)
- # NOTE: we always update the "last" datum
- # since the current range should at least be updated
- # to it's max/min on the last pixel.
-
- # iterate in FIFO order per tick-frame
- for typ, tick in lasts.items():
-
- price = tick.get('price')
- size = tick.get('size')
-
- # compute max and min prices (including bid/ask) from
- # tick frames to determine the y-range for chart
- # auto-scaling.
- # TODO: we need a streaming minmax algo here, see def above.
- if liv:
- mx = max(price + tick_margin, mx)
- mn = min(price - tick_margin, mn)
-
- if typ in clear_types:
-
- # XXX: if we only wanted to update graphics from the
- # "current"/"latest received" clearing price tick
- # once (see alt iteration order above).
- # if last_clear_updated:
- # continue
-
- # last_clear_updated = True
- # we only want to update grahpics from the *last*
- # tick event that falls under the "clearing price"
- # set.
-
- # update price sticky(s)
- end = array[-1]
- ds.last_price_sticky.update_from_data(
- *end[['index', 'close']]
- )
- ds.hist_last_price_sticky.update_from_data(
- *end[['index', 'close']]
+ # FAST CHART resize case
+ elif (
+ liv
+ and not chart._static_yrange == 'axis'
+ ):
+ main_vb = main_viz.plot.vb
+
+ if (
+ main_vb._ic is None
+ or not main_vb._ic.is_set()
+ ):
+ # TODO: incremenal update of the median
+ # and maxmin driving the y-autoranging.
+ # yr = (mn, mx)
+ main_vb.interact_graphics_cycle(
+ # do_overlay_scaling=False,
+ do_linked_charts=False,
)
+ # TODO: we should probably scale
+ # the view margin based on the size
+ # of the true range? This way you can
+ # slap in orders outside the current
+ # L1 (only) book range.
- if wap_in_history:
- # update vwap overlay line
- chart.update_graphics_from_flow(
- 'bar_wap',
- )
+ profiler('main vb y-autorange')
- # L1 book label-line updates
- # XXX: is this correct for ib?
- # if ticktype in ('trade', 'last'):
- # if ticktype in ('last',): # 'size'):
- if typ in ('last',): # 'size'):
+ # SLOW CHART resize case
+ (
+ _,
+ hist_liv,
+ _,
+ _,
+ _,
+ _,
+ _,
+ ) = hist_viz.incr_info(
+ ds=ds,
+ is_1m=True,
+ )
+ profiler('hist `Viz.incr_info()`')
- label = {
- l1.ask_label.fields['level']: l1.ask_label,
- l1.bid_label.fields['level']: l1.bid_label,
- }.get(price)
+ # TODO: track local liv maxmin without doing a recompute all the
+ # time..plut, just generally the user is more likely to be
+ # zoomed out enough on the slow chart that this is never an
+ # issue (the last datum going out of y-range).
+ # hist_chart = ds.hist_chart
+ # if (
+ # hist_liv
+ # and not hist_chart._static_yrange == 'axis'
+ # ):
+ # hist_viz.plot.vb._set_yrange(
+ # viz=hist_viz,
+ # # yrange=yr, # this is the rt range, not hist.. XD
+ # )
+ # profiler('hist vb y-autorange')
- if (
- label is not None
- and liv
- ):
- label.update_fields(
- {'level': price, 'size': size}
- )
+ # XXX: update this every draw cycle to ensure y-axis auto-ranging
+ # only adjusts when the in-view data co-domain actually expands or
+ # contracts.
+ varz['last_mx'], varz['last_mn'] = mx, mn
- # TODO: on trades should we be knocking down
- # the relevant L1 queue?
- # label.size -= size
+ # TODO: a similar, only-update-full-path-on-px-step approach for all
+ # fsp overlays and vlm stuff..
- elif (
- typ in _tick_groups['asks']
- # TODO: instead we could check if the price is in the
- # y-view-range?
- and liv
- ):
- l1.ask_label.update_fields({'level': price, 'size': size})
+ # run synchronous update on all `Viz` overlays
+ for curve_name, viz in chart._vizs.items():
- elif (
- typ in _tick_groups['bids']
- # TODO: instead we could check if the price is in the
- # y-view-range?
- and liv
- ):
- l1.bid_label.update_fields({'level': price, 'size': size})
+ # update any overlayed fsp flows
+ if (
+ curve_name != fqsn
+ and not viz.is_ohlc
+ ):
+ update_fsp_chart(
+ viz,
+ curve_name,
+ array_key=curve_name,
+ )
- # check for y-range re-size
- if (mx > vars['last_mx']) or (mn < vars['last_mn']):
+ # even if we're downsampled bigly
+ # draw the last datum in the final
+ # px column to give the user the mx/mn
+ # range of that set.
+ if (
+ curve_name != fqsn
+ and liv
+ # and not do_px_step
+ # and not do_rt_update
+ ):
+ viz.draw_last(
+ array_key=curve_name,
+
+ # TODO: XXX this is currently broken for the
+ # `FlattenedOHLC` case since we aren't returning the
+ # full x/y uppx's worth of src-data from
+ # `draw_last_datum()` ..
+ only_last_uppx=True,
+ )
- # fast chart resize case
- if (
- liv
- and not chart._static_yrange == 'axis'
- ):
- main_vb = chart.view
- if (
- main_vb._ic is None
- or not main_vb._ic.is_set()
- ):
- # print(f'updating range due to mxmn')
- main_vb._set_yrange(
- # TODO: we should probably scale
- # the view margin based on the size
- # of the true range? This way you can
- # slap in orders outside the current
- # L1 (only) book range.
- # range_margin=0.1,
- yrange=(mn, mx),
- )
+ profiler('overlays updates')
- # check if slow chart needs a resize
+ # volume chart logic..
+ # TODO: can we unify this with the above loop?
+ if vlm_chart:
+ vlm_vizs = vlm_chart._vizs
+
+ main_vlm_viz = vlm_vizs['volume']
+
+ # always update y-label
+ ds.vlm_sticky.update_from_data(
+ *array[-1][[
+ index_field,
+ 'volume',
+ ]]
+ )
+
+ if (
(
- _,
- hist_liv,
- _,
- _,
- _,
- _,
- ) = ds.incr_info(
- chart=hist_chart,
- shm=ds.hist_ohlcv,
- update_state=False,
+ do_rt_update
+ or do_px_step
+ and liv
+ )
+ or trigger_all
+ ):
+ # TODO: make it so this doesn't have to be called
+ # once the $vlm is up?
+ main_vlm_viz.update_graphics(
+
+ # UGGGh, see ``maxmin()`` impl in `._fsp` for
+ # the overlayed plotitems... we need a better
+ # bay to invoke a maxmin per overlay..
+ render=False,
+
+ # XXX: ^^^^ THIS IS SUPER IMPORTANT! ^^^^
+ # without this, since we disable the
+ # 'volume' (units) chart after the $vlm starts
+ # up we need to be sure to enable this
+ # auto-ranging otherwise there will be no handler
+ # connected to update accompanying overlay
+ # graphics..
)
- if hist_liv:
- hist_chart.view._set_yrange(yrange=hist_chart.maxmin())
+ profiler('`main_vlm_viz.update_graphics()`')
- # XXX: update this every draw cycle to make L1-always-in-view work.
- vars['last_mx'], vars['last_mn'] = mx, mn
+ if (
+ mx_vlm_in_view != varz['last_mx_vlm']
+ ):
+ varz['last_mx_vlm'] = mx_vlm_in_view
+
+ # TODO: incr maxmin update as pass into below..
+ # vlm_yr = (0, mx_vlm_in_view * 1.375)
+
+ main_vlm_viz.plot.vb.interact_graphics_cycle(
+ # do_overlay_scaling=False,
+ do_linked_charts=False,
+ )
+ profiler('`vlm_chart.view.interact_graphics_cycle()`')
+
+ # update all downstream FSPs
+ for curve_name, viz in vlm_vizs.items():
- # run synchronous update on all linked flows
- # TODO: should the "main" (aka source) flow be special?
- for curve_name, flow in chart._flows.items():
- # update any overlayed fsp flows
- if curve_name != chart.data_key:
+ if curve_name == 'volume':
+ continue
+
+ if (
+ viz.render
+ and (
+ liv and do_rt_update
+ or do_px_step
+ )
+ and curve_name not in {fqsn}
+ ):
update_fsp_chart(
- chart,
- flow,
+ viz,
curve_name,
array_key=curve_name,
)
+ profiler(f'vlm `Viz[{viz.name}].update_graphics()`')
+
+ # is this even doing anything?
+ # (pretty sure it's the real-time
+ # resizing from last quote?)
+ # XXX: without this we get completely
+ # mangled/empty vlm display subchart..
+ fvb = viz.plot.vb
+ fvb.interact_graphics_cycle(
+ do_linked_charts=False,
+ )
+ profiler(
+ f'Viz[{viz.name}].plot.vb.interact_graphics_cycle()`'
+ )
# even if we're downsampled bigly
# draw the last datum in the final
# px column to give the user the mx/mn
# range of that set.
- if (
- not do_append
- # and not do_rt_update
+ elif (
+ not do_px_step
and liv
+ and uppx >= 1
):
- flow.draw_last(
- array_key=curve_name,
- only_last_uppx=True,
- )
+ # always update the last datum-element
+ # graphic for all vizs
+ viz.draw_last(array_key=curve_name)
+ profiler(f'vlm `Viz[{viz.name}].draw_last()`')
- # volume chart logic..
- # TODO: can we unify this with the above loop?
- if vlm_chart:
- # always update y-label
- ds.vlm_sticky.update_from_data(
- *array[-1][['index', 'volume']]
- )
+ profiler('vlm Viz all updates complete')
- if (
- (
- do_rt_update
- or do_append
- and liv
- )
- or trigger_all
- ):
- # TODO: make it so this doesn't have to be called
- # once the $vlm is up?
- vlm_chart.update_graphics_from_flow(
- 'volume',
- # UGGGh, see ``maxmin()`` impl in `._fsp` for
- # the overlayed plotitems... we need a better
- # bay to invoke a maxmin per overlay..
- render=False,
- # XXX: ^^^^ THIS IS SUPER IMPORTANT! ^^^^
- # without this, since we disable the
- # 'volume' (units) chart after the $vlm starts
- # up we need to be sure to enable this
- # auto-ranging otherwise there will be no handler
- # connected to update accompanying overlay
- # graphics..
- )
- profiler('`vlm_chart.update_graphics_from_flow()`')
-
- if (
- mx_vlm_in_view != vars['last_mx_vlm']
- ):
- yrange = (0, mx_vlm_in_view * 1.375)
- vlm_chart.view._set_yrange(
- yrange=yrange,
- )
- profiler('`vlm_chart.view._set_yrange()`')
- # print(f'mx vlm: {last_mx_vlm} -> {mx_vlm_in_view}')
- vars['last_mx_vlm'] = mx_vlm_in_view
-
- for curve_name, flow in vlm_chart._flows.items():
-
- if (
- curve_name != 'volume' and
- flow.render and (
- liv and
- do_rt_update or do_append
- )
- ):
- update_fsp_chart(
- vlm_chart,
- flow,
- curve_name,
- array_key=curve_name,
- # do_append=uppx < update_uppx,
- do_append=do_append,
- )
- # is this even doing anything?
- # (pretty sure it's the real-time
- # resizing from last quote?)
- fvb = flow.plot.vb
- fvb._set_yrange(
- name=curve_name,
- )
-
- elif (
- curve_name != 'volume'
- and not do_append
- and liv
- and uppx >= 1
- # even if we're downsampled bigly
- # draw the last datum in the final
- # px column to give the user the mx/mn
- # range of that set.
- ):
- # always update the last datum-element
- # graphic for all flows
- # print(f'drawing last {flow.name}')
- flow.draw_last(array_key=curve_name)
+ profiler.finish()
async def link_views_with_region(
@@ -833,96 +930,129 @@ async def link_views_with_region(
pen=pg.mkPen(hcolor('gunmetal')),
brush=pg.mkBrush(hcolor('default_darkest')),
)
- region.setZValue(10) # put linear region "in front" in layer terms
+ region.setOpacity(0)
hist_pi.addItem(region, ignoreBounds=True)
+ region.setOpacity(6/16)
- flow = rt_chart._flows[hist_chart.name]
- assert flow
+ viz = rt_chart.get_viz(flume.symbol.fqsn)
+ assert viz
+ index_field = viz.index_field
# XXX: no idea why this doesn't work but it's causing
# a weird placement of the region on the way-far-left..
- # region.setClipItem(flow.graphics)
-
- # poll for datums load and timestep detection
- for _ in range(100):
- try:
- _, _, ratio = flume.get_ds_info()
- break
- except IndexError:
- await trio.sleep(0.01)
- continue
- else:
- raise RuntimeError(
- 'Failed to detect sampling periods from shm!?')
-
- # sampling rate transform math:
- # -----------------------------
- # define the fast chart to slow chart as a linear mapping
- # over the fast index domain `i` to the slow index domain
- # `j` as:
- #
- # j = i - i_offset
- # ------------ + j_offset
- # j/i
- #
- # conversely the inverse function is:
- #
- # i = j/i * (j - j_offset) + i_offset
- #
- # Where `j_offset` is our ``izero_hist`` and `i_offset` is our
- # `izero_rt`, the ``ShmArray`` offsets which correspond to the
- # indexes in each array where the "current" time is indexed at init.
- # AKA the index where new data is "appended to" and historical data
- # if "prepended from".
- #
- # more practically (and by default) `i` is normally an index
- # into 1s samples and `j` is an index into 60s samples (aka 1m).
- # in the below handlers ``ratio`` is the `j/i` and ``mn``/``mx``
- # are the low and high index input from the source index domain.
-
- def update_region_from_pi(
- window,
- viewRange: tuple[tuple, tuple],
- is_manual: bool = True,
-
- ) -> None:
- # put linear region "in front" in layer terms
- region.setZValue(10)
+ # region.setClipItem(viz.graphics)
+
+ if index_field == 'time':
+
+ # in the (epoch) index case we can map directly
+ # from the fast chart's x-domain values since they are
+ # on the same index as the slow chart.
+
+ def update_region_from_pi(
+ window,
+ viewRange: tuple[tuple, tuple],
+ is_manual: bool = True,
+ ) -> None:
+ # put linear region "in front" in layer terms
+ region.setZValue(10)
+
+ # set the region on the history chart
+ # to the range currently viewed in the
+ # HFT/real-time chart.
+ rng = mn, mx = viewRange[0]
+
+ # hist_viz = hist_chart.get_viz(flume.symbol.fqsn)
+ # hist = hist_viz.shm.array[-3:]
+ # print(
+ # f'mn: {mn}\n'
+ # f'mx: {mx}\n'
+ # f'slow last 3 epochs: {list(hist["time"])}\n'
+ # f'slow last 3: {hist}\n'
+ # )
+
+ region.setRegion(rng)
- # set the region on the history chart
- # to the range currently viewed in the
- # HFT/real-time chart.
- mn, mx = viewRange[0]
- ds_mn = (mn - izero_rt)/ratio
- ds_mx = (mx - izero_rt)/ratio
- lhmn = ds_mn + izero_hist
- lhmx = ds_mx + izero_hist
- # print(
- # f'rt_view_range: {(mn, mx)}\n'
- # f'ds_mn, ds_mx: {(ds_mn, ds_mx)}\n'
- # f'lhmn, lhmx: {(lhmn, lhmx)}\n'
- # )
- region.setRegion((
- lhmn,
- lhmx,
- ))
-
- # TODO: if we want to have the slow chart adjust range to
- # match the fast chart's selection -> results in the
- # linear region expansion never can go "outside of view".
- # hmn, hmx = hvr = hist_chart.view.state['viewRange'][0]
- # print((hmn, hmx))
- # if (
- # hvr
- # and (lhmn < hmn or lhmx > hmx)
- # ):
- # hist_pi.setXRange(
- # lhmn,
- # lhmx,
- # padding=0,
- # )
- # hist_linked.graphics_cycle()
+ else:
+ # poll for datums load and timestep detection
+ for _ in range(100):
+ try:
+ _, _, ratio = flume.get_ds_info()
+ break
+ except IndexError:
+ await trio.sleep(0.01)
+ continue
+ else:
+ raise RuntimeError(
+ 'Failed to detect sampling periods from shm!?')
+
+ # sampling rate transform math:
+ # -----------------------------
+ # define the fast chart to slow chart as a linear mapping
+ # over the fast index domain `i` to the slow index domain
+ # `j` as:
+ #
+ # j = i - i_offset
+ # ------------ + j_offset
+ # j/i
+ #
+ # conversely the inverse function is:
+ #
+ # i = j/i * (j - j_offset) + i_offset
+ #
+ # Where `j_offset` is our ``izero_hist`` and `i_offset` is our
+ # `izero_rt`, the ``ShmArray`` offsets which correspond to the
+ # indexes in each array where the "current" time is indexed at init.
+ # AKA the index where new data is "appended to" and historical data
+ # if "prepended from".
+ #
+ # more practically (and by default) `i` is normally an index
+ # into 1s samples and `j` is an index into 60s samples (aka 1m).
+ # in the below handlers ``ratio`` is the `j/i` and ``mn``/``mx``
+ # are the low and high index input from the source index domain.
+
+ def update_region_from_pi(
+ window,
+ viewRange: tuple[tuple, tuple],
+ is_manual: bool = True,
+
+ ) -> None:
+ # put linear region "in front" in layer terms
+ region.setZValue(10)
+
+ # set the region on the history chart
+ # to the range currently viewed in the
+ # HFT/real-time chart.
+ mn, mx = viewRange[0]
+ ds_mn = (mn - izero_rt)/ratio
+ ds_mx = (mx - izero_rt)/ratio
+ lhmn = ds_mn + izero_hist
+ lhmx = ds_mx + izero_hist
+ # print(
+ # f'rt_view_range: {(mn, mx)}\n'
+ # f'ds_mn, ds_mx: {(ds_mn, ds_mx)}\n'
+ # f'lhmn, lhmx: {(lhmn, lhmx)}\n'
+ # )
+ region.setRegion((
+ lhmn,
+ lhmx,
+ ))
+
+ # TODO: if we want to have the slow chart adjust range to
+ # match the fast chart's selection -> results in the
+ # linear region expansion never can go "outside of view".
+ # hmn, hmx = hvr = hist_chart.view.state['viewRange'][0]
+ # print((hmn, hmx))
+ # if (
+ # hvr
+ # and (lhmn < hmn or lhmx > hmx)
+ # ):
+ # hist_pi.setXRange(
+ # lhmn,
+ # lhmx,
+ # padding=0,
+ # )
+ # hist_linked.graphics_cycle()
# connect region to be updated on plotitem interaction.
rt_pi.sigRangeChanged.connect(update_region_from_pi)
@@ -945,9 +1075,11 @@ def update_pi_from_region():
# region.sigRegionChangeFinished.connect(update_pi_from_region)
+_quote_throttle_rate: int = 60 - 6
+
+
async def display_symbol_data(
godwidget: GodWidget,
- provider: str,
fqsns: list[str],
loglevel: str,
order_mode_started: trio.Event,
@@ -972,123 +1104,174 @@ async def display_symbol_data(
# )
for fqsn in fqsns:
-
loading_sym_key = sbar.open_status(
f'loading {fqsn} ->',
group_key=True
)
+ # (TODO: make this not so shit XD)
+ # close group status once a symbol feed fully loads to view.
+ # sbar._status_groups[loading_sym_key][1]()
+
+ # TODO: ctl over update loop's maximum frequency.
+ # - load this from a config.toml!
+ # - allow dyanmic configuration from chart UI?
+ global _quote_throttle_rate
+ from ._window import main_window
+ display_rate = main_window().current_screen().refreshRate()
+ _quote_throttle_rate = floor(display_rate) - 6
+
+ # TODO: we should be able to increase this if we use some
+ # `mypyc` speedups elsewhere? 22ish seems to be the sweet
+ # spot for single-feed chart.
+ num_of_feeds = len(fqsns)
+ mx: int = 22
+ if num_of_feeds > 1:
+ # there will be more ctx switches with more than 1 feed so we
+ # max throttle down a bit more.
+ mx = 16
+
+ # limit to at least display's FPS
+ # avoiding needless Qt-in-guest-mode context switches
+ cycles_per_feed = min(
+ round(_quote_throttle_rate/num_of_feeds),
+ mx,
+ )
+
feed: Feed
async with open_feed(
fqsns,
loglevel=loglevel,
-
- # limit to at least display's FPS
- # avoiding needless Qt-in-guest-mode context switches
- tick_throttle=_quote_throttle_rate,
+ tick_throttle=cycles_per_feed,
) as feed:
- # TODO: right now we only show one symbol on charts, but
- # overlays are coming muy pronto guey..
- assert len(feed.flumes) == 1
- flume = list(feed.flumes.values())[0]
+ # use expanded contract symbols passed back from feed layer.
+ fqsns = list(feed.flumes.keys())
+
+ # step_size_s = 1
+ # tf_key = tf_in_1s[step_size_s]
+ godwidget.window.setWindowTitle(
+ f'{fqsns} '
+ # f'tick:{symbol.tick_size} '
+ # f'step:{tf_key} '
+ )
+ # generate order mode side-pane UI
+ # A ``FieldsForm`` form to configure order entry
+ # and add as next-to-y-axis singleton pane
+ pp_pane: FieldsForm = mk_order_pane_layout(godwidget)
+ godwidget.pp_pane = pp_pane
+
+ # create top history view chart above the "main rt chart".
+ rt_linked = godwidget.rt_linked
+ hist_linked = godwidget.hist_linked
+
+ # NOTE: here we insert the slow-history chart set into
+ # the fast chart's splitter -> so it's a splitter of charts
+ # inside the first widget slot of a splitter of charts XD
+ rt_linked.splitter.insertWidget(0, hist_linked)
+
+ rt_chart: None | ChartPlotWidget = None
+ hist_chart: None | ChartPlotWidget = None
+ vlm_chart: None | ChartPlotWidget = None
+
+ # TODO: I think some palette's based on asset group types
+ # would be good, for eg:
+ # - underlying and opts contracts
+ # - index and underlyings + futures
+ # - gradient in "lightness" based on liquidity, or lifetime in derivs?
+ palette = itertools.cycle([
+ # curve color, last bar curve color
+ ['grayest', 'i3'],
+ ['default_dark', 'default'],
+
+ ['grayer', 'bracket'],
+ ['i3', 'gray'],
+ ])
+
+ pis: dict[str, list[pgo.PlotItem, pgo.PlotItem]] = {}
+
+ # load in ohlc data to a common linked but split chart set.
+ fitems: list[
+ tuple[str, Flume]
+ ] = list(feed.flumes.items())
+
+ # use array int-indexing when no aggregate feed overlays are
+ # loaded.
+ if len(fitems) == 1:
+ from ._dataviz import Viz
+ Viz._index_field = 'index'
+
+ # for the "first"/selected symbol we create new chart widgets
+ # and sub-charts for FSPs
+ fqsn, flume = fitems[0]
+
+ # TODO NOTE: THIS CONTROLS WHAT SYMBOL IS USED FOR ORDER MODE
+ # SUBMISSIONS, we need to make this switch based on selection.
+ rt_linked._symbol = flume.symbol
+ hist_linked._symbol = flume.symbol
ohlcv: ShmArray = flume.rt_shm
hist_ohlcv: ShmArray = flume.hist_shm
symbol = flume.symbol
+ brokername = symbol.brokers[0]
fqsn = symbol.fqsn
- step_size_s = 1
- tf_key = tf_in_1s[step_size_s]
-
- # load in symbol's ohlc data
- godwidget.window.setWindowTitle(
- f'{fqsn} '
- f'tick:{symbol.tick_size} '
- f'step:{tf_key} '
- )
-
- rt_linked = godwidget.rt_linked
- rt_linked._symbol = symbol
-
- # create top history view chart above the "main rt chart".
- hist_linked = godwidget.hist_linked
- hist_linked._symbol = symbol
hist_chart = hist_linked.plot_ohlc_main(
symbol,
hist_ohlcv,
+ flume,
# in the case of history chart we explicitly set `False`
# to avoid internal pane creation.
# sidepane=False,
sidepane=godwidget.search,
+ draw_kwargs={
+ 'last_step_color': 'original',
+ },
)
+
+ # ensure the last datum graphic is generated
+ # for zoom-interaction purposes.
+ hist_viz = hist_chart.get_viz(fqsn)
+ hist_viz.draw_last(array_key=fqsn)
+ pis.setdefault(fqsn, [None, None])[1] = hist_chart.plotItem
+
# don't show when not focussed
hist_linked.cursor.always_show_xlabel = False
- # generate order mode side-pane UI
- # A ``FieldsForm`` form to configure order entry
- # and add as next-to-y-axis singleton pane
- pp_pane: FieldsForm = mk_order_pane_layout(godwidget)
- godwidget.pp_pane = pp_pane
-
- # create main OHLC chart
- ohlc_chart = rt_linked.plot_ohlc_main(
+ rt_chart = rt_linked.plot_ohlc_main(
symbol,
ohlcv,
+ flume,
# in the case of history chart we explicitly set `False`
# to avoid internal pane creation.
sidepane=pp_pane,
+ draw_kwargs={
+ 'last_step_color': 'original',
+ },
)
+ rt_viz = rt_chart.get_viz(fqsn)
+ pis.setdefault(fqsn, [None, None])[0] = rt_chart.plotItem
- ohlc_chart._feeds[symbol.key] = feed
- ohlc_chart.setFocus()
+ # for pause/resume on mouse interaction
+ rt_chart.feed = feed
- # XXX: FOR SOME REASON THIS IS CAUSING HANGZ!?!
- # plot historical vwap if available
- wap_in_history = False
- # if (
- # brokermod._show_wap_in_history
- # and 'bar_wap' in bars.dtype.fields
- # ):
- # wap_in_history = True
- # ohlc_chart.draw_curve(
- # name='bar_wap',
- # shm=ohlcv,
- # color='default_light',
- # add_label=False,
- # )
-
- # NOTE: we must immediately tell Qt to show the OHLC chart
- # to avoid a race where the subplots get added/shown to
- # the linked set *before* the main price chart!
- rt_linked.show()
- rt_linked.focus()
- await trio.sleep(0)
-
- # NOTE: here we insert the slow-history chart set into
- # the fast chart's splitter -> so it's a splitter of charts
- # inside the first widget slot of a splitter of charts XD
- rt_linked.splitter.insertWidget(0, hist_linked)
- # XXX: if we wanted it at the bottom?
- # rt_linked.splitter.addWidget(hist_linked)
- rt_linked.focus()
-
- godwidget.resize_all()
-
- vlm_chart: Optional[ChartPlotWidget] = None
async with trio.open_nursery() as ln:
-
# if available load volume related built-in display(s)
+ vlm_charts: dict[
+ str,
+ None | ChartPlotWidget
+ ] = {}.fromkeys(feed.flumes)
if (
- not symbol.broker_info[provider].get('no_vlm', False)
+ not symbol.broker_info[brokername].get('no_vlm', False)
and has_vlm(ohlcv)
+ and vlm_chart is None
):
- vlm_chart = await ln.start(
+ vlm_chart = vlm_charts[fqsn] = await ln.start(
open_vlm_displays,
rt_linked,
- ohlcv,
+ flume,
)
# load (user's) FSP set (otherwise known as "indicators")
@@ -1096,87 +1279,180 @@ async def display_symbol_data(
ln.start_soon(
start_fsp_displays,
rt_linked,
- ohlcv,
+ flume,
loading_sym_key,
loglevel,
)
- # start graphics update loop after receiving first live quote
- ln.start_soon(
- graphics_update_loop,
- ln,
- godwidget,
- flume,
- wap_in_history,
- vlm_chart,
- )
+ # XXX: FOR SOME REASON THIS IS CAUSING HANGZ!?!
+ # plot historical vwap if available
+ wap_in_history = False
+ # if (
+ # brokermod._show_wap_in_history
+ # and 'bar_wap' in bars.dtype.fields
+ # ):
+ # wap_in_history = True
+ # rt_chart.draw_curve(
+ # name='bar_wap',
+ # shm=ohlcv,
+ # color='default_light',
+ # add_label=False,
+ # )
+ godwidget.resize_all()
await trio.sleep(0)
- # size view to data prior to order mode init
- ohlc_chart.default_view()
- rt_linked.graphics_cycle()
- await trio.sleep(0)
+ for fqsn, flume in fitems[1:]:
+ # get a new color from the palette
+ bg_chart_color, bg_last_bar_color = next(palette)
- hist_chart.default_view(
- bars_from_y=int(len(hist_ohlcv.array)), # size to data
- y_offset=6116*2, # push it a little away from the y-axis
- )
- hist_linked.graphics_cycle()
+ ohlcv: ShmArray = flume.rt_shm
+ hist_ohlcv: ShmArray = flume.hist_shm
+
+ symbol = flume.symbol
+ fqsn = symbol.fqsn
+
+ hist_pi = hist_chart.overlay_plotitem(
+ name=fqsn,
+ axis_title=fqsn,
+ )
+
+ hist_viz = hist_chart.draw_curve(
+ fqsn,
+ hist_ohlcv,
+ flume,
+ array_key=fqsn,
+ overlay=hist_pi,
+ pi=hist_pi,
+ is_ohlc=True,
+
+ color=bg_chart_color,
+ last_step_color=bg_last_bar_color,
+ )
+
+ # ensure the last datum graphic is generated
+ # for zoom-interaction purposes.
+ hist_viz.draw_last(array_key=fqsn)
+
+ hist_pi.vb.maxmin = partial(
+ hist_chart.maxmin,
+ name=fqsn,
+ )
+ # TODO: we need a better API to do this..
+ # specially store ref to shm for lookup in display loop
+ # since only a placeholder of `None` is entered in
+ # ``.draw_curve()``.
+ hist_viz = hist_chart._vizs[fqsn]
+ assert hist_viz.plot is hist_pi
+ pis.setdefault(fqsn, [None, None])[1] = hist_pi
+
+ rt_pi = rt_chart.overlay_plotitem(
+ name=fqsn,
+ axis_title=fqsn,
+ )
+
+ rt_viz = rt_chart.draw_curve(
+ fqsn,
+ ohlcv,
+ flume,
+ array_key=fqsn,
+ overlay=rt_pi,
+ pi=rt_pi,
+ is_ohlc=True,
+
+ color=bg_chart_color,
+ last_step_color=bg_last_bar_color,
+ )
+ rt_pi.vb.maxmin = partial(
+ rt_chart.maxmin,
+ name=fqsn,
+ )
+
+ # TODO: we need a better API to do this..
+ # specially store ref to shm for lookup in display loop
+ # since only a placeholder of `None` is entered in
+ # ``.draw_curve()``.
+ rt_viz = rt_chart._vizs[fqsn]
+ assert rt_viz.plot is rt_pi
+ pis.setdefault(fqsn, [None, None])[0] = rt_pi
+
+ rt_chart.setFocus()
+
+ # NOTE: we must immediately tell Qt to show the OHLC chart
+ # to avoid a race where the subplots get added/shown to
+ # the linked set *before* the main price chart!
+ rt_linked.show()
+ rt_linked.focus()
await trio.sleep(0)
- godwidget.resize_all()
+ # XXX: if we wanted it at the bottom?
+ # rt_linked.splitter.addWidget(hist_linked)
+
+ # greedily do a view range default and pane resizing
+ # on startup before loading the order-mode machinery.
+ for fqsn, flume in feed.flumes.items():
+
+ # size view to data prior to order mode init
+ rt_chart.default_view()
+ rt_linked.graphics_cycle()
+
+ # TODO: look into this because not sure why it was
+ # commented out / we ever needed it XD
+ # NOTE: we pop the volume chart from the subplots set so
+ # that it isn't double rendered in the display loop
+ # above since we do a maxmin calc on the volume data to
+ # determine if auto-range adjustements should be made.
+ # rt_linked.subplots.pop('volume', None)
+
+ hist_chart.default_view()
+ hist_linked.graphics_cycle()
+
+ godwidget.resize_all()
+ await trio.sleep(0)
await link_views_with_region(
- ohlc_chart,
+ rt_chart,
hist_chart,
flume,
)
+ # start update loop task
+ ln.start_soon(
+ graphics_update_loop,
+ ln,
+ godwidget,
+ feed,
+ pis,
+ wap_in_history,
+ vlm_charts,
+ )
+
+ # boot order-mode
+ order_ctl_symbol: str = fqsns[0]
mode: OrderMode
async with (
open_order_mode(
feed,
godwidget,
- fqsn,
+ fqsns[0],
order_mode_started
) as mode
):
- if not vlm_chart:
- # trigger another view reset if no sub-chart
- ohlc_chart.default_view()
rt_linked.mode = mode
- # let Qt run to render all widgets and make sure the
- # sidepanes line up vertically.
- await trio.sleep(0)
-
- # dynamic resize steps
- godwidget.resize_all()
-
- # TODO: look into this because not sure why it was
- # commented out / we ever needed it XD
- # NOTE: we pop the volume chart from the subplots set so
- # that it isn't double rendered in the display loop
- # above since we do a maxmin calc on the volume data to
- # determine if auto-range adjustements should be made.
- # rt_linked.subplots.pop('volume', None)
+ rt_viz = rt_chart.get_viz(order_ctl_symbol)
+ rt_viz.plot.setFocus()
- # TODO: make this not so shit XD
- # close group status
- sbar._status_groups[loading_sym_key][1]()
+ # default view adjuments and sidepane alignment
+ # as final default UX touch.
+ rt_chart.default_view()
+ await trio.sleep(0)
- hist_linked.graphics_cycle()
+ hist_chart.default_view()
+ hist_viz = hist_chart.get_viz(fqsn)
await trio.sleep(0)
- bars_in_mem = int(len(hist_ohlcv.array))
- hist_chart.default_view(
- bars_from_y=bars_in_mem, # size to data
- # push it 1/16th away from the y-axis
- y_offset=round(bars_in_mem / 16),
- )
godwidget.resize_all()
- # let the app run.. bby
- await trio.sleep_forever()
+ await trio.sleep_forever() # let the app run.. bby
diff --git a/piker/ui/_editors.py b/piker/ui/_editors.py
index 3703558a9..08f198529 100644
--- a/piker/ui/_editors.py
+++ b/piker/ui/_editors.py
@@ -377,7 +377,7 @@ def set_pos(
nbars = ixmx - ixmn + 1
chart = self._chart
- data = chart._flows[chart.name].shm.array[ixmn:ixmx]
+ data = chart.get_viz(chart.name).shm.array[ixmn:ixmx]
if len(data):
std = data['close'].std()
diff --git a/piker/ui/_flows.py b/piker/ui/_flows.py
deleted file mode 100644
index a29089050..000000000
--- a/piker/ui/_flows.py
+++ /dev/null
@@ -1,954 +0,0 @@
-# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for pikers)
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-'''
-High level streaming graphics primitives.
-
-This is an intermediate layer which associates real-time low latency
-graphics primitives with underlying FSP related data structures for fast
-incremental update.
-
-'''
-from __future__ import annotations
-from typing import (
- Optional,
-)
-
-import msgspec
-import numpy as np
-import pyqtgraph as pg
-from PyQt5.QtGui import QPainterPath
-from PyQt5.QtCore import QLineF
-
-from ..data._sharedmem import (
- ShmArray,
-)
-from .._profile import (
- pg_profile_enabled,
- # ms_slower_then,
-)
-from ._pathops import (
- IncrementalFormatter,
- OHLCBarsFmtr, # Plain OHLC renderer
- OHLCBarsAsCurveFmtr, # OHLC converted to line
- StepCurveFmtr, # "step" curve (like for vlm)
- xy_downsample,
-)
-from ._ohlc import (
- BarItems,
- # bar_from_ohlc_row,
-)
-from ._curve import (
- Curve,
- StepCurve,
- FlattenedOHLC,
-)
-from ..log import get_logger
-from .._profile import Profiler
-
-
-log = get_logger(__name__)
-
-
-def render_baritems(
- flow: Flow,
- graphics: BarItems,
- read: tuple[
- int, int, np.ndarray,
- int, int, np.ndarray,
- ],
- profiler: Profiler,
- **kwargs,
-
-) -> None:
- '''
- Graphics management logic for a ``BarItems`` object.
-
- Mostly just logic to determine when and how to downsample an OHLC
- lines curve into a flattened line graphic and when to display one
- graphic or the other.
-
- TODO: this should likely be moved into some kind of better abstraction
- layer, if not a `Renderer` then something just above it?
-
- '''
- bars = graphics
-
- # if no source data renderer exists create one.
- self = flow
- show_bars: bool = False
-
- r = self._src_r
- if not r:
- show_bars = True
-
- # OHLC bars path renderer
- r = self._src_r = Renderer(
- flow=self,
- fmtr=OHLCBarsFmtr(
- shm=flow.shm,
- flow=flow,
- _last_read=read,
- ),
- )
-
- ds_curve_r = Renderer(
- flow=self,
- fmtr=OHLCBarsAsCurveFmtr(
- shm=flow.shm,
- flow=flow,
- _last_read=read,
- ),
- )
-
- curve = FlattenedOHLC(
- name=f'{flow.name}_ds_ohlc',
- color=bars._color,
- )
- flow.ds_graphics = curve
- curve.hide()
- self.plot.addItem(curve)
-
- # baseline "line" downsampled OHLC curve that should
- # kick on only when we reach a certain uppx threshold.
- self._render_table = (ds_curve_r, curve)
-
- ds_r, curve = self._render_table
-
- # do checks for whether or not we require downsampling:
- # - if we're **not** downsampling then we simply want to
- # render the bars graphics curve and update..
- # - if instead we are in a downsamplig state then we to
- x_gt = 6
- uppx = curve.x_uppx()
- in_line = should_line = curve.isVisible()
- if (
- in_line
- and uppx < x_gt
- ):
- # print('FLIPPING TO BARS')
- should_line = False
- flow._in_ds = False
-
- elif (
- not in_line
- and uppx >= x_gt
- ):
- # print('FLIPPING TO LINE')
- should_line = True
- flow._in_ds = True
-
- profiler(f'ds logic complete line={should_line}')
-
- # do graphics updates
- if should_line:
- r = ds_r
- graphics = curve
- profiler('updated ds curve')
-
- else:
- graphics = bars
-
- if show_bars:
- bars.show()
-
- changed_to_line = False
- if (
- not in_line
- and should_line
- ):
- # change to line graphic
- log.info(
- f'downsampling to line graphic {self.name}'
- )
- bars.hide()
- curve.show()
- curve.update()
- changed_to_line = True
-
- elif in_line and not should_line:
- # change to bars graphic
- log.info(f'showing bars graphic {self.name}')
- curve.hide()
- bars.show()
- bars.update()
-
- return (
- graphics,
- r,
- {'read_from_key': False},
- should_line,
- changed_to_line,
- )
-
-
-class Flow(msgspec.Struct): # , frozen=True):
- '''
- (Financial Signal-)Flow compound type which wraps a real-time
- shm array stream with displayed graphics (curves, charts)
- for high level access and control as well as efficient incremental
- update.
-
- The intention is for this type to eventually be capable of shm-passing
- of incrementally updated graphics stream data between actors.
-
- '''
- name: str
- plot: pg.PlotItem
- graphics: Curve | BarItems
- _shm: ShmArray
- yrange: tuple[float, float] = None
-
- # in some cases a flow may want to change its
- # graphical "type" or, "form" when downsampling,
- # normally this is just a plain line.
- ds_graphics: Optional[Curve] = None
-
- is_ohlc: bool = False
- render: bool = True # toggle for display loop
-
- # downsampling state
- _last_uppx: float = 0
- _in_ds: bool = False
-
- # map from uppx -> (downsampled data, incremental graphics)
- _src_r: Optional[Renderer] = None
- _render_table: dict[
- Optional[int],
- tuple[Renderer, pg.GraphicsItem],
- ] = (None, None)
-
- # TODO: hackery to be able to set a shm later
- # but whilst also allowing this type to hashable,
- # likely will require serializable token that is used to attach
- # to the underlying shm ref after startup?
- # _shm: Optional[ShmArray] = None # currently, may be filled in "later"
-
- # last read from shm (usually due to an update call)
- _last_read: Optional[np.ndarray] = None
-
- # cache of y-range values per x-range input.
- _mxmns: dict[tuple[int, int], tuple[float, float]] = {}
-
- @property
- def shm(self) -> ShmArray:
- return self._shm
-
- # TODO: remove this and only allow setting through
- # private ``._shm`` attr?
- @shm.setter
- def shm(self, shm: ShmArray) -> ShmArray:
- self._shm = shm
-
- def maxmin(
- self,
- lbar: int,
- rbar: int,
-
- ) -> Optional[tuple[float, float]]:
- '''
- Compute the cached max and min y-range values for a given
- x-range determined by ``lbar`` and ``rbar`` or ``None``
- if no range can be determined (yet).
-
- '''
- rkey = (lbar, rbar)
- cached_result = self._mxmns.get(rkey)
- if cached_result:
- return cached_result
-
- shm = self.shm
- if shm is None:
- return None
-
- arr = shm.array
-
- # build relative indexes into shm array
- # TODO: should we just add/use a method
- # on the shm to do this?
- ifirst = arr[0]['index']
- slice_view = arr[
- lbar - ifirst:
- (rbar - ifirst) + 1
- ]
-
- if not slice_view.size:
- return None
-
- elif self.yrange:
- mxmn = self.yrange
- # print(f'{self.name} M4 maxmin: {mxmn}')
-
- else:
- if self.is_ohlc:
- ylow = np.min(slice_view['low'])
- yhigh = np.max(slice_view['high'])
-
- else:
- view = slice_view[self.name]
- ylow = np.min(view)
- yhigh = np.max(view)
-
- mxmn = ylow, yhigh
- # print(f'{self.name} MANUAL maxmin: {mxmin}')
-
- # cache result for input range
- assert mxmn
- self._mxmns[rkey] = mxmn
-
- return mxmn
-
- def view_range(self) -> tuple[int, int]:
- '''
- Return the indexes in view for the associated
- plot displaying this flow's data.
-
- '''
- vr = self.plot.viewRect()
- return int(vr.left()), int(vr.right())
-
- def datums_range(self) -> tuple[
- int, int, int, int, int, int
- ]:
- '''
- Return a range tuple for the datums present in view.
-
- '''
- l, r = self.view_range()
-
- # TODO: avoid this and have shm passed
- # in earlier.
- if self.shm is None:
- # haven't initialized the flow yet
- return (0, l, 0, 0, r, 0)
-
- array = self.shm.array
- index = array['index']
- start = index[0]
- end = index[-1]
- lbar = max(l, start)
- rbar = min(r, end)
- return (
- start, l, lbar, rbar, r, end,
- )
-
- def read(
- self,
- array_field: Optional[str] = None,
-
- ) -> tuple[
- int, int, np.ndarray,
- int, int, np.ndarray,
- ]:
- # read call
- array = self.shm.array
-
- indexes = array['index']
- ifirst = indexes[0]
- ilast = indexes[-1]
-
- ifirst, l, lbar, rbar, r, ilast = self.datums_range()
-
- # get read-relative indices adjusting
- # for master shm index.
- lbar_i = max(l, ifirst) - ifirst
- rbar_i = min(r, ilast) - ifirst
-
- if array_field:
- array = array[array_field]
-
- # TODO: we could do it this way as well no?
- # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1]
- in_view = array[lbar_i: rbar_i + 1]
-
- return (
- # abs indices + full data set
- ifirst, ilast, array,
-
- # relative indices + in view datums
- lbar_i, rbar_i, in_view,
- )
-
- def update_graphics(
- self,
- use_vr: bool = True,
- render: bool = True,
- array_key: Optional[str] = None,
-
- profiler: Optional[Profiler] = None,
- do_append: bool = True,
-
- **kwargs,
-
- ) -> pg.GraphicsObject:
- '''
- Read latest datums from shm and render to (incrementally)
- render to graphics.
-
- '''
- profiler = Profiler(
- msg=f'Flow.update_graphics() for {self.name}',
- disabled=not pg_profile_enabled(),
- ms_threshold=4,
- # ms_threshold=ms_slower_then,
- )
- # shm read and slice to view
- read = (
- xfirst, xlast, src_array,
- ivl, ivr, in_view,
- ) = self.read()
-
- profiler('read src shm data')
-
- graphics = self.graphics
-
- if (
- not in_view.size
- or not render
- ):
- # print('exiting early')
- return graphics
-
- slice_to_head: int = -1
- should_redraw: bool = False
- should_line: bool = False
- rkwargs = {}
-
- # TODO: probably specialize ``Renderer`` types instead of
- # these logic checks?
- # - put these blocks into a `.load_renderer()` meth?
- # - consider a OHLCRenderer, StepCurveRenderer, Renderer?
- r = self._src_r
- if isinstance(graphics, BarItems):
- # XXX: special case where we change out graphics
- # to a line after a certain uppx threshold.
- (
- graphics,
- r,
- rkwargs,
- should_line,
- changed_to_line,
- ) = render_baritems(
- self,
- graphics,
- read,
- profiler,
- **kwargs,
- )
- should_redraw = changed_to_line or not should_line
- self._in_ds = should_line
-
- elif not r:
- if isinstance(graphics, StepCurve):
-
- r = self._src_r = Renderer(
- flow=self,
- fmtr=StepCurveFmtr(
- shm=self.shm,
- flow=self,
- _last_read=read,
- ),
- )
-
- # TODO: append logic inside ``.render()`` isn't
- # correct yet for step curves.. remove this to see it.
- should_redraw = True
- slice_to_head = -2
-
- else:
- r = self._src_r
- if not r:
- # just using for ``.diff()`` atm..
- r = self._src_r = Renderer(
- flow=self,
- fmtr=IncrementalFormatter(
- shm=self.shm,
- flow=self,
- _last_read=read,
- ),
- )
-
- # ``Curve`` derivative case(s):
- array_key = array_key or self.name
- # print(array_key)
-
- # ds update config
- new_sample_rate: bool = False
- should_ds: bool = r._in_ds
- showing_src_data: bool = not r._in_ds
-
- # downsampling incremental state checking
- # check for and set std m4 downsample conditions
- uppx = graphics.x_uppx()
- uppx_diff = (uppx - self._last_uppx)
- profiler(f'diffed uppx {uppx}')
- if (
- uppx > 1
- and abs(uppx_diff) >= 1
- ):
- log.debug(
- f'{array_key} sampler change: {self._last_uppx} -> {uppx}'
- )
- self._last_uppx = uppx
-
- new_sample_rate = True
- showing_src_data = False
- should_ds = True
- should_redraw = True
-
- elif (
- uppx <= 2
- and self._in_ds
- ):
- # we should de-downsample back to our original
- # source data so we clear our path data in prep
- # to generate a new one from original source data.
- new_sample_rate = True
- should_ds = False
- should_redraw = True
-
- showing_src_data = True
- # reset yrange to be computed from source data
- self.yrange = None
-
- # MAIN RENDER LOGIC:
- # - determine in view data and redraw on range change
- # - determine downsampling ops if needed
- # - (incrementally) update ``QPainterPath``
-
- out = r.render(
- read,
- array_key,
- profiler,
- uppx=uppx,
- # use_vr=True,
-
- # TODO: better way to detect and pass this?
- # if we want to eventually cache renderers for a given uppx
- # we should probably use this as a key + state?
- should_redraw=should_redraw,
- new_sample_rate=new_sample_rate,
- should_ds=should_ds,
- showing_src_data=showing_src_data,
-
- slice_to_head=slice_to_head,
- do_append=do_append,
-
- **rkwargs,
- )
- if showing_src_data:
- # print(f"{self.name} SHOWING SOURCE")
- # reset yrange to be computed from source data
- self.yrange = None
-
- if not out:
- log.warning(f'{self.name} failed to render!?')
- return graphics
-
- path, data, reset = out
-
- # if self.yrange:
- # print(f'flow {self.name} yrange from m4: {self.yrange}')
-
- # XXX: SUPER UGGGHHH... without this we get stale cache
- # graphics that don't update until you downsampler again..
- # reset = False
- # if reset:
- # with graphics.reset_cache():
- # # assign output paths to graphicis obj
- # graphics.path = r.path
- # graphics.fast_path = r.fast_path
-
- # # XXX: we don't need this right?
- # # graphics.draw_last_datum(
- # # path,
- # # src_array,
- # # data,
- # # reset,
- # # array_key,
- # # )
- # # graphics.update()
- # # profiler('.update()')
- # else:
- # assign output paths to graphicis obj
- graphics.path = r.path
- graphics.fast_path = r.fast_path
-
- graphics.draw_last_datum(
- path,
- src_array,
- data,
- reset,
- array_key,
- )
- graphics.update()
- profiler('.update()')
-
- # TODO: does this actuallly help us in any way (prolly should
- # look at the source / ask ogi). I think it avoid artifacts on
- # wheel-scroll downsampling curve updates?
- # TODO: is this ever better?
- # graphics.prepareGeometryChange()
- # profiler('.prepareGeometryChange()')
-
- # track downsampled state
- self._in_ds = r._in_ds
-
- return graphics
-
- def draw_last(
- self,
- array_key: Optional[str] = None,
- only_last_uppx: bool = False,
-
- ) -> None:
-
- # shm read and slice to view
- (
- xfirst, xlast, src_array,
- ivl, ivr, in_view,
- ) = self.read()
-
- g = self.graphics
- array_key = array_key or self.name
- x, y = g.draw_last_datum(
- g.path,
- src_array,
- src_array,
- False, # never reset path
- array_key,
- )
-
- # the renderer is downsampling we choose
- # to always try and updadte a single (interpolating)
- # line segment that spans and tries to display
- # the las uppx's worth of datums.
- # we only care about the last pixel's
- # worth of data since that's all the screen
- # can represent on the last column where
- # the most recent datum is being drawn.
- if self._in_ds or only_last_uppx:
- dsg = self.ds_graphics or self.graphics
-
- # XXX: pretty sure we don't need this?
- # if isinstance(g, Curve):
- # with dsg.reset_cache():
- uppx = self._last_uppx
- y = y[-uppx:]
- ymn, ymx = y.min(), y.max()
- # print(f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}')
- try:
- iuppx = x[-uppx]
- except IndexError:
- # we're less then an x-px wide so just grab the start
- # datum index.
- iuppx = x[0]
-
- dsg._last_line = QLineF(
- iuppx, ymn,
- x[-1], ymx,
- )
- # print(f'updating DS curve {self.name}')
- dsg.update()
-
- else:
- # print(f'updating NOT DS curve {self.name}')
- g.update()
-
-
-class Renderer(msgspec.Struct):
-
- flow: Flow
- fmtr: IncrementalFormatter
-
- # output graphics rendering, the main object
- # processed in ``QGraphicsObject.paint()``
- path: Optional[QPainterPath] = None
- fast_path: Optional[QPainterPath] = None
-
- # XXX: just ideas..
- # called on the final data (transform) output to convert
- # to "graphical data form" a format that can be passed to
- # the ``.draw()`` implementation.
- # graphics_t: Optional[Callable[ShmArray, np.ndarray]] = None
- # graphics_t_shm: Optional[ShmArray] = None
-
- # path graphics update implementation methods
- # prepend_fn: Optional[Callable[QPainterPath, QPainterPath]] = None
- # append_fn: Optional[Callable[QPainterPath, QPainterPath]] = None
-
- # downsampling state
- _last_uppx: float = 0
- _in_ds: bool = False
-
- def draw_path(
- self,
- x: np.ndarray,
- y: np.ndarray,
- connect: str | np.ndarray = 'all',
- path: Optional[QPainterPath] = None,
- redraw: bool = False,
-
- ) -> QPainterPath:
-
- path_was_none = path is None
-
- if redraw and path:
- path.clear()
-
- # TODO: avoid this?
- if self.fast_path:
- self.fast_path.clear()
-
- # profiler('cleared paths due to `should_redraw=True`')
-
- path = pg.functions.arrayToQPath(
- x,
- y,
- connect=connect,
- finiteCheck=False,
-
- # reserve mem allocs see:
- # - https://doc.qt.io/qt-5/qpainterpath.html#reserve
- # - https://doc.qt.io/qt-5/qpainterpath.html#capacity
- # - https://doc.qt.io/qt-5/qpainterpath.html#clear
- # XXX: right now this is based on had hoc checks on a
- # hidpi 3840x2160 4k monitor but we should optimize for
- # the target display(s) on the sys.
- # if no_path_yet:
- # graphics.path.reserve(int(500e3))
- # path=path, # path re-use / reserving
- )
-
- # avoid mem allocs if possible
- if path_was_none:
- path.reserve(path.capacity())
-
- return path
-
- def render(
- self,
-
- new_read,
- array_key: str,
- profiler: Profiler,
- uppx: float = 1,
-
- # redraw and ds flags
- should_redraw: bool = False,
- new_sample_rate: bool = False,
- should_ds: bool = False,
- showing_src_data: bool = True,
-
- do_append: bool = True,
- slice_to_head: int = -1,
- use_fpath: bool = True,
-
- # only render datums "in view" of the ``ChartView``
- use_vr: bool = True,
- read_from_key: bool = True,
-
- ) -> list[QPainterPath]:
- '''
- Render the current graphics path(s)
-
- There are (at least) 3 stages from source data to graphics data:
- - a data transform (which can be stored in additional shm)
- - a graphics transform which converts discrete basis data to
- a `float`-basis view-coords graphics basis. (eg. ``ohlc_flatten()``,
- ``step_path_arrays_from_1d()``, etc.)
-
- - blah blah blah (from notes)
-
- '''
- # TODO: can the renderer just call ``Flow.read()`` directly?
- # unpack latest source data read
- fmtr = self.fmtr
-
- (
- _,
- _,
- array,
- ivl,
- ivr,
- in_view,
- ) = new_read
-
- # xy-path data transform: convert source data to a format
- # able to be passed to a `QPainterPath` rendering routine.
- fmt_out = fmtr.format_to_1d(
- new_read,
- array_key,
- profiler,
-
- slice_to_head=slice_to_head,
- read_src_from_key=read_from_key,
- slice_to_inview=use_vr,
- )
-
- # no history in view case
- if not fmt_out:
- # XXX: this might be why the profiler only has exits?
- return
-
- (
- x_1d,
- y_1d,
- connect,
- prepend_length,
- append_length,
- view_changed,
- # append_tres,
-
- ) = fmt_out
-
- # redraw conditions
- if (
- prepend_length > 0
- or new_sample_rate
- or view_changed
-
- # NOTE: comment this to try and make "append paths"
- # work below..
- or append_length > 0
- ):
- should_redraw = True
-
- path = self.path
- fast_path = self.fast_path
- reset = False
-
- # redraw the entire source data if we have either of:
- # - no prior path graphic rendered or,
- # - we always intend to re-render the data only in view
- if (
- path is None
- or should_redraw
- ):
- # print(f"{self.flow.name} -> REDRAWING BRUH")
- if new_sample_rate and showing_src_data:
- log.info(f'DEDOWN -> {array_key}')
- self._in_ds = False
-
- elif should_ds and uppx > 1:
-
- x_1d, y_1d, ymn, ymx = xy_downsample(
- x_1d,
- y_1d,
- uppx,
- )
- self.flow.yrange = ymn, ymx
- # print(f'{self.flow.name} post ds: ymn, ymx: {ymn},{ymx}')
-
- reset = True
- profiler(f'FULL PATH downsample redraw={should_ds}')
- self._in_ds = True
-
- path = self.draw_path(
- x=x_1d,
- y=y_1d,
- connect=connect,
- path=path,
- redraw=True,
- )
-
- profiler(
- 'generated fresh path. '
- f'(should_redraw: {should_redraw} '
- f'should_ds: {should_ds} new_sample_rate: {new_sample_rate})'
- )
-
- # TODO: get this piecewise prepend working - right now it's
- # giving heck on vwap...
- # elif prepend_length:
-
- # prepend_path = pg.functions.arrayToQPath(
- # x[0:prepend_length],
- # y[0:prepend_length],
- # connect='all'
- # )
-
- # # swap prepend path in "front"
- # old_path = graphics.path
- # graphics.path = prepend_path
- # # graphics.path.moveTo(new_x[0], new_y[0])
- # graphics.path.connectPath(old_path)
-
- elif (
- append_length > 0
- and do_append
- ):
- print(f'{array_key} append len: {append_length}')
- # new_x = x_1d[-append_length - 2:] # slice_to_head]
- # new_y = y_1d[-append_length - 2:] # slice_to_head]
- profiler('sliced append path')
- # (
- # x_1d,
- # y_1d,
- # connect,
- # ) = append_tres
-
- profiler(
- f'diffed array input, append_length={append_length}'
- )
-
- # if should_ds and uppx > 1:
- # new_x, new_y = xy_downsample(
- # new_x,
- # new_y,
- # uppx,
- # )
- # profiler(f'fast path downsample redraw={should_ds}')
-
- append_path = self.draw_path(
- x=x_1d,
- y=y_1d,
- connect=connect,
- path=fast_path,
- )
- profiler('generated append qpath')
-
- if use_fpath:
- # print(f'{self.flow.name}: FAST PATH')
- # an attempt at trying to make append-updates faster..
- if fast_path is None:
- fast_path = append_path
- # fast_path.reserve(int(6e3))
- else:
- fast_path.connectPath(append_path)
- size = fast_path.capacity()
- profiler(f'connected fast path w size: {size}')
-
- print(
- f"append_path br: {append_path.boundingRect()}\n"
- f"path size: {size}\n"
- f"append_path len: {append_path.length()}\n"
- f"fast_path len: {fast_path.length()}\n"
- )
- # graphics.path.moveTo(new_x[0], new_y[0])
- # path.connectPath(append_path)
-
- # XXX: lol this causes a hang..
- # graphics.path = graphics.path.simplified()
- else:
- size = path.capacity()
- profiler(f'connected history path w size: {size}')
- path.connectPath(append_path)
-
- self.path = path
- self.fast_path = fast_path
-
- return self.path, array, reset
diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py
index 9e05f5454..6da93b718 100644
--- a/piker/ui/_fsp.py
+++ b/piker/ui/_fsp.py
@@ -42,6 +42,8 @@
_Token,
try_read,
)
+from ..data.feed import Flume
+from ..data._source import Symbol
from ._chart import (
ChartPlotWidget,
LinkedSplits,
@@ -76,15 +78,14 @@ def has_vlm(ohlcv: ShmArray) -> bool:
def update_fsp_chart(
- chart: ChartPlotWidget,
- flow,
+ viz,
graphics_name: str,
array_key: Optional[str],
**kwargs,
) -> None:
- shm = flow.shm
+ shm = viz.shm
if not shm:
return
@@ -99,18 +100,15 @@ def update_fsp_chart(
# update graphics
# NOTE: this does a length check internally which allows it
# staying above the last row check below..
- chart.update_graphics_from_flow(
- graphics_name,
- array_key=array_key or graphics_name,
- **kwargs,
- )
+ viz.update_graphics()
# XXX: re: ``array_key``: fsp func names must be unique meaning we
# can't have duplicates of the underlying data even if multiple
# sub-charts reference it under different 'named charts'.
# read from last calculated value and update any label
- last_val_sticky = chart._ysticks.get(graphics_name)
+ last_val_sticky = viz.plot.getAxis(
+ 'right')._stickies.get(graphics_name)
if last_val_sticky:
last = last_row[array_key]
last_val_sticky.update_from_data(-1, last)
@@ -211,7 +209,7 @@ async def open_fsp_actor_cluster(
async def run_fsp_ui(
linkedsplits: LinkedSplits,
- shm: ShmArray,
+ flume: Flume,
started: trio.Event,
target: Fsp,
conf: dict[str, dict],
@@ -248,9 +246,11 @@ async def run_fsp_ui(
else:
chart = linkedsplits.subplots[overlay_with]
+ shm = flume.rt_shm
chart.draw_curve(
- name=name,
- shm=shm,
+ name,
+ shm,
+ flume,
overlay=True,
color='default_light',
array_key=name,
@@ -260,8 +260,9 @@ async def run_fsp_ui(
else:
# create a new sub-chart widget for this fsp
chart = linkedsplits.add_plot(
- name=name,
- shm=shm,
+ name,
+ shm,
+ flume,
array_key=name,
sidepane=sidepane,
@@ -281,9 +282,10 @@ async def run_fsp_ui(
# profiler(f'fsp:{name} chart created')
# first UI update, usually from shm pushed history
+ viz = chart.get_viz(array_key)
update_fsp_chart(
chart,
- chart._flows[array_key],
+ viz,
name,
array_key=array_key,
)
@@ -310,7 +312,7 @@ async def run_fsp_ui(
# level_line(chart, 70, orient_v='bottom')
# level_line(chart, 80, orient_v='top')
- chart.view._set_yrange()
+ chart.view._set_yrange(viz=viz)
# done() # status updates
# profiler(f'fsp:{func_name} starting update loop')
@@ -351,6 +353,9 @@ async def run_fsp_ui(
# last = time.time()
+# TODO: maybe this should be our ``Viz`` type since it maps
+# one flume to the next? The machinery for task/actor mgmt should
+# be part of the instantiation API?
class FspAdmin:
'''
Client API for orchestrating FSP actors and displaying
@@ -362,7 +367,7 @@ def __init__(
tn: trio.Nursery,
cluster: dict[str, tractor.Portal],
linked: LinkedSplits,
- src_shm: ShmArray,
+ flume: Flume,
) -> None:
self.tn = tn
@@ -374,7 +379,11 @@ def __init__(
tuple[tractor.MsgStream, ShmArray]
] = {}
self._flow_registry: dict[_Token, str] = {}
- self.src_shm = src_shm
+
+ # TODO: make this a `.src_flume` and add
+ # a `dst_flume`?
+ # (=> but then wouldn't this be the most basic `Viz`?)
+ self.flume = flume
def rr_next_portal(self) -> tractor.Portal:
name, portal = next(self._rr_next_actor)
@@ -387,7 +396,7 @@ async def open_chain(
complete: trio.Event,
started: trio.Event,
fqsn: str,
- dst_shm: ShmArray,
+ dst_fsp_flume: Flume,
conf: dict,
target: Fsp,
loglevel: str,
@@ -408,9 +417,10 @@ async def open_chain(
# data feed key
fqsn=fqsn,
+ # TODO: pass `Flume.to_msg()`s here?
# mems
- src_shm_token=self.src_shm.token,
- dst_shm_token=dst_shm.token,
+ src_shm_token=self.flume.rt_shm.token,
+ dst_shm_token=dst_fsp_flume.rt_shm.token,
# target
ns_path=ns_path,
@@ -427,12 +437,14 @@ async def open_chain(
ctx.open_stream() as stream,
):
+ dst_fsp_flume.stream: tractor.MsgStream = stream
+
# register output data
self._registry[
(fqsn, ns_path)
] = (
stream,
- dst_shm,
+ dst_fsp_flume.rt_shm,
complete
)
@@ -467,9 +479,9 @@ async def start_engine_task(
worker_name: Optional[str] = None,
loglevel: str = 'info',
- ) -> (ShmArray, trio.Event):
+ ) -> (Flume, trio.Event):
- fqsn = self.linked.symbol.front_fqsn()
+ fqsn = self.flume.symbol.fqsn
# allocate an output shm array
key, dst_shm, opened = maybe_mk_fsp_shm(
@@ -477,8 +489,28 @@ async def start_engine_task(
target=target,
readonly=True,
)
+
+ portal = self.cluster.get(worker_name) or self.rr_next_portal()
+ provider_tag = portal.channel.uid
+
+ symbol = Symbol(
+ key=key,
+ broker_info={
+ provider_tag: {'asset_type': 'fsp'},
+ },
+ )
+ dst_fsp_flume = Flume(
+ symbol=symbol,
+ _rt_shm_token=dst_shm.token,
+ first_quote={},
+
+ # set to 0 presuming for now that we can't load
+ # FSP history (though we should eventually).
+ izero_hist=0,
+ izero_rt=0,
+ )
self._flow_registry[(
- self.src_shm._token,
+ self.flume.rt_shm._token,
target.name
)] = dst_shm._token
@@ -487,7 +519,6 @@ async def start_engine_task(
# f'Already started FSP `{fqsn}:{func_name}`'
# )
- portal = self.cluster.get(worker_name) or self.rr_next_portal()
complete = trio.Event()
started = trio.Event()
self.tn.start_soon(
@@ -496,13 +527,13 @@ async def start_engine_task(
complete,
started,
fqsn,
- dst_shm,
+ dst_fsp_flume,
conf,
target,
loglevel,
)
- return dst_shm, started
+ return dst_fsp_flume, started
async def open_fsp_chart(
self,
@@ -514,7 +545,7 @@ async def open_fsp_chart(
) -> (trio.Event, ChartPlotWidget):
- shm, started = await self.start_engine_task(
+ flume, started = await self.start_engine_task(
target,
conf,
loglevel,
@@ -526,7 +557,7 @@ async def open_fsp_chart(
run_fsp_ui,
self.linked,
- shm,
+ flume,
started,
target,
@@ -540,7 +571,7 @@ async def open_fsp_chart(
@acm
async def open_fsp_admin(
linked: LinkedSplits,
- src_shm: ShmArray,
+ flume: Flume,
**kwargs,
) -> AsyncGenerator[dict, dict[str, tractor.Portal]]:
@@ -561,7 +592,7 @@ async def open_fsp_admin(
tn,
cluster_map,
linked,
- src_shm,
+ flume,
)
try:
yield admin
@@ -575,8 +606,9 @@ async def open_fsp_admin(
async def open_vlm_displays(
linked: LinkedSplits,
- ohlcv: ShmArray,
+ flume: Flume,
dvlm: bool = True,
+ loglevel: str = 'info',
task_status: TaskStatus[ChartPlotWidget] = trio.TASK_STATUS_IGNORED,
@@ -597,6 +629,8 @@ async def open_vlm_displays(
sig = inspect.signature(flow_rates.func)
params = sig.parameters
+ ohlcv: ShmArray = flume.rt_shm
+
async with (
open_fsp_sidepane(
linked, {
@@ -616,7 +650,7 @@ async def open_vlm_displays(
}
},
) as sidepane,
- open_fsp_admin(linked, ohlcv) as admin,
+ open_fsp_admin(linked, flume) as admin,
):
# TODO: support updates
# period_field = sidepane.fields['period']
@@ -624,14 +658,21 @@ async def open_vlm_displays(
# str(period_param.default)
# )
+ # use slightly less light (then bracket) gray
+ # for volume from "main exchange" and a more "bluey"
+ # gray for "dark" vlm.
+ vlm_color = 'i3'
+ dark_vlm_color = 'charcoal'
+
# built-in vlm which we plot ASAP since it's
# usually data provided directly with OHLC history.
shm = ohlcv
- ohlc_chart = linked.chart
+ # ohlc_chart = linked.chart
- chart = linked.add_plot(
+ vlm_chart = linked.add_plot(
name='volume',
shm=shm,
+ flume=flume,
array_key='volume',
sidepane=sidepane,
@@ -644,71 +685,45 @@ async def open_vlm_displays(
# the curve item internals are pretty convoluted.
style='step',
)
- ohlc_chart.view.enable_auto_yrange(
- src_vb=chart.view,
- )
-
- # force 0 to always be in view
- def multi_maxmin(
- names: list[str],
-
- ) -> tuple[float, float]:
- '''
- Flows "group" maxmin loop; assumes all named flows
- are in the same co-domain and thus can be sorted
- as one set.
-
- Iterates all the named flows and calls the chart
- api to find their range values and return.
-
- TODO: really we should probably have a more built-in API
- for this?
-
- '''
- mx = 0
- for name in names:
- ymn, ymx = chart.maxmin(name=name)
- mx = max(mx, ymx)
-
- return 0, mx
+ vlm_viz = vlm_chart._vizs['volume']
# TODO: fix the x-axis label issue where if you put
# the axis on the left it's totally not lined up...
# show volume units value on LHS (for dinkus)
- # chart.hideAxis('right')
- # chart.showAxis('left')
+ # vlm_chart.hideAxis('right')
+ vlm_chart.hideAxis('left')
# send back new chart to caller
- task_status.started(chart)
+ task_status.started(vlm_chart)
# should **not** be the same sub-chart widget
- assert chart.name != linked.chart.name
+ assert vlm_chart.name != linked.chart.name
# sticky only on sub-charts atm
- last_val_sticky = chart._ysticks[chart.name]
+ last_val_sticky = vlm_chart.plotItem.getAxis(
+ 'right')._stickies.get(vlm_chart.name)
# read from last calculated value
value = shm.array['volume'][-1]
last_val_sticky.update_from_data(-1, value)
- vlm_curve = chart.update_graphics_from_flow(
- 'volume',
- # shm.array,
- )
+ _, _, vlm_curve = vlm_viz.update_graphics()
# size view to data once at outset
- chart.view._set_yrange()
+ # vlm_chart.view._set_yrange(
+ # viz=vlm_viz
+ # )
# add axis title
- axis = chart.getAxis('right')
+ axis = vlm_chart.getAxis('right')
axis.set_title(' vlm')
if dvlm:
tasks_ready = []
# spawn and overlay $ vlm on the same subchart
- dvlm_shm, started = await admin.start_engine_task(
+ dvlm_flume, started = await admin.start_engine_task(
dolla_vlm,
{ # fsp engine conf
@@ -720,45 +735,32 @@ def multi_maxmin(
},
},
},
- # loglevel,
+ loglevel,
)
- tasks_ready.append(started)
-
- # FIXME: we should error on starting the same fsp right
- # since it might collide with existing shm.. or wait we
- # had this before??
- # dolla_vlm,
-
- tasks_ready.append(started)
- # profiler(f'created shm for fsp actor: {display_name}')
-
- # wait for all engine tasks to startup
- async with trio.open_nursery() as n:
- for event in tasks_ready:
- n.start_soon(event.wait)
# dolla vlm overlay
# XXX: the main chart already contains a vlm "units" axis
# so here we add an overlay wth a y-range in
# $ liquidity-value units (normally a fiat like USD).
- dvlm_pi = chart.overlay_plotitem(
+ dvlm_pi = vlm_chart.overlay_plotitem(
'dolla_vlm',
index=0, # place axis on inside (nearest to chart)
+
axis_title=' $vlm',
- axis_side='right',
+ axis_side='left',
+
axis_kwargs={
'typical_max_str': ' 100.0 M ',
'formatter': partial(
humanize,
digits=2,
),
+ 'text_color': vlm_color,
},
)
- dvlm_pi.hideAxis('left')
- dvlm_pi.hideAxis('bottom')
# all to be overlayed curve names
- fields = [
+ dvlm_fields = [
'dolla_vlm',
'dark_vlm',
]
@@ -771,27 +773,12 @@ def multi_maxmin(
'dark_trade_rate',
]
- group_mxmn = partial(
- multi_maxmin,
- # keep both regular and dark vlm in view
- names=fields,
- # names=fields + dvlm_rate_fields,
- )
-
- # add custom auto range handler
- dvlm_pi.vb._maxmin = group_mxmn
-
- # use slightly less light (then bracket) gray
- # for volume from "main exchange" and a more "bluey"
- # gray for "dark" vlm.
- vlm_color = 'i3'
- dark_vlm_color = 'charcoal'
-
# add dvlm (step) curves to common view
def chart_curves(
names: list[str],
pi: pg.PlotItem,
shm: ShmArray,
+ flume: Flume,
step_mode: bool = False,
style: str = 'solid',
@@ -805,9 +792,13 @@ def chart_curves(
else:
color = 'bracket'
- curve, _ = chart.draw_curve(
- name=name,
- shm=shm,
+ assert isinstance(shm, ShmArray)
+ assert isinstance(flume, Flume)
+
+ viz = vlm_chart.draw_curve(
+ name,
+ shm,
+ flume,
array_key=name,
overlay=pi,
color=color,
@@ -815,38 +806,32 @@ def chart_curves(
style=style,
pi=pi,
)
+ assert viz.plot is pi
- # TODO: we need a better API to do this..
- # specially store ref to shm for lookup in display loop
- # since only a placeholder of `None` is entered in
- # ``.draw_curve()``.
- flow = chart._flows[name]
- assert flow.plot is pi
-
+ await started.wait()
chart_curves(
- fields,
+ dvlm_fields,
dvlm_pi,
- dvlm_shm,
+ dvlm_flume.rt_shm,
+ dvlm_flume,
step_mode=True,
)
- # spawn flow rates fsp **ONLY AFTER** the 'dolla_vlm' fsp is
- # up since this one depends on it.
-
- fr_shm, started = await admin.start_engine_task(
+ # NOTE: spawn flow rates fsp **ONLY AFTER** the 'dolla_vlm' fsp is
+ # up since calculating vlm "rates" obvs first requires the
+ # underlying vlm event feed ;)
+ fr_flume, started = await admin.start_engine_task(
flow_rates,
{ # fsp engine conf
'func_name': 'flow_rates',
- 'zero_on_step': False,
+ 'zero_on_step': True,
},
- # loglevel,
+ loglevel,
)
- await started.wait()
-
# chart_curves(
# dvlm_rate_fields,
# dvlm_pi,
- # fr_shm,
+ # fr_flume.rt_shm,
# )
# TODO: is there a way to "sync" the dual axes such that only
@@ -855,24 +840,26 @@ def chart_curves(
# displayed and the curves are effectively the same minus
# liquidity events (well at least on low OHLC periods - 1s).
vlm_curve.hide()
- chart.removeItem(vlm_curve)
- vflow = chart._flows['volume']
- vflow.render = False
+ vlm_chart.removeItem(vlm_curve)
+ # vlm_chart.plotItem.layout.setMinimumWidth(0)
+ # vlm_chart.removeAxis('left')
+ vlm_viz = vlm_chart._vizs['volume']
+ vlm_viz.render = False
# avoid range sorting on volume once disabled
- chart.view.disable_auto_yrange()
+ vlm_chart.view.disable_auto_yrange()
# Trade rate overlay
# XXX: requires an additional overlay for
# a trades-per-period (time) y-range.
- tr_pi = chart.overlay_plotitem(
+ tr_pi = vlm_chart.overlay_plotitem(
'trade_rates',
# TODO: dynamically update period (and thus this axis?)
# title from user input.
axis_title='clears',
-
axis_side='left',
+
axis_kwargs={
'typical_max_str': ' 10.0 M ',
'formatter': partial(
@@ -883,18 +870,13 @@ def chart_curves(
},
)
- # add custom auto range handler
- tr_pi.vb.maxmin = partial(
- multi_maxmin,
- # keep both regular and dark vlm in view
- names=trade_rate_fields,
- )
- tr_pi.hideAxis('bottom')
+ await started.wait()
chart_curves(
trade_rate_fields,
tr_pi,
- fr_shm,
+ fr_flume.rt_shm,
+ fr_flume,
# step_mode=True,
# dashed line to represent "individual trades" being
@@ -928,7 +910,7 @@ def chart_curves(
async def start_fsp_displays(
linked: LinkedSplits,
- ohlcv: ShmArray,
+ flume: Flume,
group_status_key: str,
loglevel: str,
@@ -971,7 +953,10 @@ async def start_fsp_displays(
async with (
# NOTE: this admin internally opens an actor cluster
- open_fsp_admin(linked, ohlcv) as admin,
+ open_fsp_admin(
+ linked,
+ flume,
+ ) as admin,
):
statuses = []
for target, conf in fsp_conf.items():
diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py
index e17e662ef..dc0e47ed3 100644
--- a/piker/ui/_interaction.py
+++ b/piker/ui/_interaction.py
@@ -20,8 +20,15 @@
"""
from __future__ import annotations
from contextlib import asynccontextmanager
+from math import (
+ isinf,
+)
import time
-from typing import Optional, Callable
+from typing import (
+ Optional,
+ Callable,
+ TYPE_CHECKING,
+)
import pyqtgraph as pg
# from pyqtgraph.GraphicsScene import mouseEvents
@@ -35,10 +42,17 @@
from ..log import get_logger
from .._profile import Profiler
from .._profile import pg_profile_enabled, ms_slower_then
+from ..data.types import Struct
+from ..data._pathops import slice_from_time
# from ._style import _min_points_to_show
from ._editors import SelectRect
from . import _event
+if TYPE_CHECKING:
+ from ._chart import ChartPlotWidget
+ from ._dataviz import Viz
+ # from ._overlay import PlotItemOverlay
+
log = get_logger(__name__)
@@ -76,7 +90,6 @@ async def handle_viewmode_kb_inputs(
pressed: set[str] = set()
last = time.time()
- trigger_mode: str
action: str
on_next_release: Optional[Callable] = None
@@ -332,6 +345,49 @@ async def handle_viewmode_mouse(
view.order_mode.submit_order()
+class OverlayT(Struct):
+ '''
+ An overlay co-domain range transformer.
+
+ Used to translate and apply a range from one y-range
+ to another based on a returns logarithm:
+
+ R(ymn, ymx, yref) = (ymx - yref)/yref
+
+ which gives the log-scale multiplier, and
+
+ ymx_t = yref * (1 + R)
+
+ which gives the inverse to translate to the same value
+ in the target co-domain.
+
+ '''
+ viz: Viz # viz with largest measured dispersion
+
+ mx: float = 0
+ mn: float = float('inf')
+
+ up_swing: float = 0
+ down_swing: float = 0
+ disp: float = 0
+
+ def loglin_from_range(
+ self,
+
+ y_ref: float, # reference value for dispersion metric
+ mn: float, # min y in target log-lin range
+ mx: float, # max y in target log-lin range
+ offset: float, # y-offset to start log-scaling from
+
+ ) -> tuple[float, float]:
+ r_up = (mx - y_ref) / y_ref
+ r_down = (mn - y_ref) / y_ref
+ ymn = offset * (1 + r_down)
+ ymx = offset * (1 + r_up)
+
+ return ymn, ymx
+
+
class ChartView(ViewBox):
'''
Price chart view box with interaction behaviors you'd expect from
@@ -366,7 +422,6 @@ def __init__(
)
# for "known y-range style"
self._static_yrange = static_yrange
- self._maxmin = None
# disable vertical scrolling
self.setMouseEnabled(
@@ -375,7 +430,7 @@ def __init__(
)
self.linked = None
- self._chart: 'ChartPlotWidget' = None # noqa
+ self._chart: ChartPlotWidget | None = None # noqa
# add our selection box annotator
self.select_box = SelectRect(self)
@@ -387,6 +442,10 @@ def __init__(
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self._ic = None
+ # TODO: probably just assign this whenever a new `PlotItem` is
+ # allocated since they're 1to1 with views..
+ self._viz: Viz | None = None
+
def start_ic(
self,
) -> None:
@@ -446,29 +505,18 @@ async def open_async_input_handler(
yield self
@property
- def chart(self) -> 'ChartPlotWidget': # type: ignore # noqa
+ def chart(self) -> ChartPlotWidget: # type: ignore # noqa
return self._chart
@chart.setter
- def chart(self, chart: 'ChartPlotWidget') -> None: # type: ignore # noqa
+ def chart(self, chart: ChartPlotWidget) -> None: # type: ignore # noqa
self._chart = chart
self.select_box.chart = chart
- if self._maxmin is None:
- self._maxmin = chart.maxmin
-
- @property
- def maxmin(self) -> Callable:
- return self._maxmin
-
- @maxmin.setter
- def maxmin(self, callback: Callable) -> None:
- self._maxmin = callback
def wheelEvent(
self,
ev,
axis=None,
- # relayed_from: ChartView = None,
):
'''
Override "center-point" location for scrolling.
@@ -483,7 +531,6 @@ def wheelEvent(
if (
not linked
):
- # print(f'{self.name} not linked but relay from {relayed_from.name}')
return
if axis in (0, 1):
@@ -495,22 +542,23 @@ def wheelEvent(
chart = self.linked.chart
# don't zoom more then the min points setting
- l, lbar, rbar, r = chart.bars_range()
- # vl = r - l
+ viz = chart.get_viz(chart.name)
+ vl, lbar, rbar, vr = viz.bars_range()
- # if ev.delta() > 0 and vl <= _min_points_to_show:
- # log.debug("Max zoom bruh...")
+ # TODO: max/min zoom limits incorporating time step size.
+ # rl = vr - vl
+ # if ev.delta() > 0 and rl <= _min_points_to_show:
+ # log.warning("Max zoom bruh...")
# return
-
# if (
# ev.delta() < 0
- # and vl >= len(chart._flows[chart.name].shm.array) + 666
+ # and rl >= len(chart._vizs[chart.name].shm.array) + 666
# ):
- # log.debug("Min zoom bruh...")
+ # log.warning("Min zoom bruh...")
# return
# actual scaling factor
- s = 1.015 ** (ev.delta() * -1 / 20) # self.state['wheelScaleFactor'])
+ s = 1.016 ** (ev.delta() * -1 / 20) # self.state['wheelScaleFactor'])
s = [(None if m is False else s) for m in mask]
if (
@@ -536,50 +584,19 @@ def wheelEvent(
# scale_y = 1.3 ** (center.y() * -1 / 20)
self.scaleBy(s, center)
+ # zoom in view-box area
else:
-
- # center = pg.Point(
- # fn.invertQTransform(self.childGroup.transform()).map(ev.pos())
- # )
-
- # XXX: scroll "around" the right most element in the view
- # which stays "pinned" in place.
-
- # furthest_right_coord = self.boundingRect().topRight()
-
- # yaxis = pg.Point(
- # fn.invertQTransform(
- # self.childGroup.transform()
- # ).map(furthest_right_coord)
- # )
-
- # This seems like the most "intuitive option, a hybrid of
- # tws and tv styles
- last_bar = pg.Point(int(rbar)) + 1
-
- ryaxis = chart.getAxis('right')
- r_axis_x = ryaxis.pos().x()
-
- end_of_l1 = pg.Point(
- round(
- chart.cv.mapToView(
- pg.Point(r_axis_x - chart._max_l1_line_len)
- # QPointF(chart._max_l1_line_len, 0)
- ).x()
- )
- ) # .x()
-
- # self.state['viewRange'][0][1] = end_of_l1
- # focal = pg.Point((last_bar.x() + end_of_l1)/2)
-
+ # use right-most point of current curve graphic
+ xl = viz.graphics.x_last()
focal = min(
- last_bar,
- end_of_l1,
- key=lambda p: p.x()
+ xl,
+ vr,
)
- # focal = pg.Point(last_bar.x() + end_of_l1)
self._resetTarget()
+
+ # NOTE: scroll "around" the right most datum-element in view
+ # gives the feeling of staying "pinned" in place.
self.scaleBy(s, focal)
# XXX: the order of the next 2 lines i'm pretty sure
@@ -587,7 +604,7 @@ def wheelEvent(
# update, but i gotta feelin that because this one is signal
# based (and thus not necessarily sync invoked right away)
# that calling the resize method manually might work better.
- self.sigRangeChangedManually.emit(mask)
+ # self.sigRangeChangedManually.emit(mask)
# XXX: without this is seems as though sometimes
# when zooming in from far out (and maybe vice versa?)
@@ -597,7 +614,8 @@ def wheelEvent(
# that never seems to happen? Only question is how much this
# "double work" is causing latency when these missing event
# fires don't happen?
- self.maybe_downsample_graphics()
+ self.interact_graphics_cycle()
+ self.interact_graphics_cycle()
ev.accept()
@@ -605,21 +623,8 @@ def mouseDragEvent(
self,
ev,
axis: Optional[int] = None,
- # relayed_from: ChartView = None,
) -> None:
- # if relayed_from:
- # print(f'PAN: {self.name} -> RELAYED FROM: {relayed_from.name}')
-
- # NOTE since in the overlay case axes are already
- # "linked" any x-range change will already be mirrored
- # in all overlaid ``PlotItems``, so we need to simply
- # ignore the signal here since otherwise we get N-calls
- # from N-overlays resulting in an "accelerated" feeling
- # panning motion instead of the expect linear shift.
- # if relayed_from:
- # return
-
pos = ev.pos()
lastPos = ev.lastPos()
dif = pos - lastPos
@@ -629,7 +634,10 @@ def mouseDragEvent(
button = ev.button()
# Ignore axes if mouse is disabled
- mouseEnabled = np.array(self.state['mouseEnabled'], dtype=np.float)
+ mouseEnabled = np.array(
+ self.state['mouseEnabled'],
+ dtype=np.float,
+ )
mask = mouseEnabled.copy()
if axis is not None:
mask[1-axis] = 0.0
@@ -689,9 +697,6 @@ def mouseDragEvent(
# PANNING MODE
else:
- # XXX: WHY
- ev.accept()
-
try:
self.start_ic()
except RuntimeError:
@@ -715,7 +720,10 @@ def mouseDragEvent(
if x is not None or y is not None:
self.translateBy(x=x, y=y)
- self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
+ # self.sigRangeChangedManually.emit(mask)
+ # self.state['mouseEnabled']
+ # )
+ self.interact_graphics_cycle()
if ev.isFinish():
self.signal_ic()
@@ -723,6 +731,9 @@ def mouseDragEvent(
# self._ic = None
# self.chart.resume_all_feeds()
+ # # XXX: WHY
+ # ev.accept()
+
# WEIRD "RIGHT-CLICK CENTER ZOOM" MODE
elif button & QtCore.Qt.RightButton:
@@ -743,10 +754,12 @@ def mouseDragEvent(
center = Point(tr.map(ev.buttonDownPos(QtCore.Qt.RightButton)))
self._resetTarget()
self.scaleBy(x=x, y=y, center=center)
- self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
- # XXX: WHY
- ev.accept()
+ # self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
+ self.interact_graphics_cycle()
+
+ # XXX: WHY
+ ev.accept()
# def mouseClickEvent(self, event: QtCore.QEvent) -> None:
# '''This routine is rerouted to an async handler.
@@ -768,7 +781,12 @@ def _set_yrange(
*,
yrange: Optional[tuple[float, float]] = None,
- range_margin: float = 0.06,
+ viz: Viz | None = None,
+
+ # NOTE: this value pairs (more or less) with L1 label text
+ # height offset from from the bid/ask lines.
+ range_margin: float | None = 0.09,
+
bars_range: Optional[tuple[int, int, int, int]] = None,
# flag to prevent triggering sibling charts from the same linked
@@ -786,14 +804,13 @@ def _set_yrange(
'''
name = self.name
- # print(f'YRANGE ON {name}')
+ # print(f'YRANGE ON {name} -> yrange{yrange}')
profiler = Profiler(
msg=f'`ChartView._set_yrange()`: `{name}`',
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
delayed=True,
)
- set_range = True
chart = self._chart
# view has been set in 'axis' mode
@@ -802,8 +819,8 @@ def _set_yrange(
# - disable autoranging
# - remove any y range limits
if chart._static_yrange == 'axis':
- set_range = False
self.setLimits(yMin=None, yMax=None)
+ return
# static y-range has been set likely by
# a specialized FSP configuration.
@@ -816,42 +833,70 @@ def _set_yrange(
elif yrange is not None:
ylow, yhigh = yrange
- if set_range:
+ # XXX: only compute the mxmn range
+ # if none is provided as input!
+ if not yrange:
- # XXX: only compute the mxmn range
- # if none is provided as input!
- if not yrange:
- # flow = chart._flows[name]
- yrange = self._maxmin()
+ if not viz:
+ breakpoint()
- if yrange is None:
- log.warning(f'No yrange provided for {name}!?')
- print(f"WTF NO YRANGE {name}")
- return
+ out = viz.maxmin()
+ if out is None:
+ log.warning(f'No yrange provided for {name}!?')
+ return
+ (
+ ixrng,
+ _,
+ yrange
+ ) = out
- ylow, yhigh = yrange
+ profiler(f'`{self.name}:Viz.maxmin()` -> {ixrng}=>{yrange}')
+
+ if yrange is None:
+ log.warning(f'No yrange provided for {name}!?')
+ return
- profiler(f'callback ._maxmin(): {yrange}')
+ ylow, yhigh = yrange
- # view margins: stay within a % of the "true range"
+ # view margins: stay within a % of the "true range"
+ if range_margin is not None:
diff = yhigh - ylow
- ylow = ylow - (diff * range_margin)
- yhigh = yhigh + (diff * range_margin)
-
- # XXX: this often needs to be unset
- # to get different view modes to operate
- # correctly!
- self.setLimits(
- yMin=ylow,
- yMax=yhigh,
+ ylow = max(
+ ylow - (diff * range_margin),
+ 0,
+ )
+ yhigh = min(
+ yhigh + (diff * range_margin),
+ yhigh * (1 + range_margin),
)
- self.setYRange(ylow, yhigh)
- profiler(f'set limits: {(ylow, yhigh)}')
+ # XXX: this often needs to be unset
+ # to get different view modes to operate
+ # correctly!
+
+ # print(
+ # f'set limits {self.name}:\n'
+ # f'ylow: {ylow}\n'
+ # f'yhigh: {yhigh}\n'
+ # )
+ self.setYRange(
+ ylow,
+ yhigh,
+ padding=0,
+ )
+ self.setLimits(
+ yMin=ylow,
+ yMax=yhigh,
+ )
+ self.update()
+
+ # LOL: yet anothercucking pg buggg..
+ # can't use `msg=f'setYRange({ylow}, {yhigh}')`
profiler.finish()
def enable_auto_yrange(
self,
+ viz: Viz,
src_vb: Optional[ChartView] = None,
) -> None:
@@ -863,9 +908,6 @@ def enable_auto_yrange(
if src_vb is None:
src_vb = self
- # widget-UIs/splitter(s) resizing
- src_vb.sigResized.connect(self._set_yrange)
-
# re-sampling trigger:
# TODO: a smarter way to avoid calling this needlessly?
# 2 things i can think of:
@@ -873,38 +915,22 @@ def enable_auto_yrange(
# iterate those.
# - only register this when certain downsample-able graphics are
# "added to scene".
- src_vb.sigRangeChangedManually.connect(
- self.maybe_downsample_graphics
+ # src_vb.sigRangeChangedManually.connect(
+ # self.interact_graphics_cycle
+ # )
+
+ # widget-UIs/splitter(s) resizing
+ src_vb.sigResized.connect(
+ self.interact_graphics_cycle
)
- # mouse wheel doesn't emit XRangeChanged
- src_vb.sigRangeChangedManually.connect(self._set_yrange)
-
- # XXX: enabling these will cause "jittery"-ness
- # on zoom where sharp diffs in the y-range will
- # not re-size right away until a new sample update?
- # if src_vb is not self:
- # src_vb.sigXRangeChanged.connect(self._set_yrange)
- # src_vb.sigXRangeChanged.connect(
- # self.maybe_downsample_graphics
- # )
def disable_auto_yrange(self) -> None:
+ # XXX: not entirely sure why we can't de-reg this..
self.sigResized.disconnect(
- self._set_yrange,
- )
- self.sigRangeChangedManually.disconnect(
- self.maybe_downsample_graphics
- )
- self.sigRangeChangedManually.disconnect(
- self._set_yrange,
+ self.interact_graphics_cycle
)
- # self.sigXRangeChanged.disconnect(self._set_yrange)
- # self.sigXRangeChanged.disconnect(
- # self.maybe_downsample_graphics
- # )
-
def x_uppx(self) -> float:
'''
Return the "number of x units" within a single
@@ -912,7 +938,7 @@ def x_uppx(self) -> float:
graphics items which are our children.
'''
- graphics = [f.graphics for f in self._chart._flows.values()]
+ graphics = [f.graphics for f in self._chart._vizs.values()]
if not graphics:
return 0
@@ -923,59 +949,439 @@ def x_uppx(self) -> float:
else:
return 0
- def maybe_downsample_graphics(
+ def interact_graphics_cycle(
self,
- autoscale_overlays: bool = True,
+ *args, # capture signal-handler related shit
+
+ debug_print: bool = False,
+ do_overlay_scaling: bool = True,
+ do_linked_charts: bool = True,
):
profiler = Profiler(
- msg=f'ChartView.maybe_downsample_graphics() for {self.name}',
+ msg=f'ChartView.interact_graphics_cycle() for {self.name}',
disabled=not pg_profile_enabled(),
+ ms_threshold=ms_slower_then,
# XXX: important to avoid not seeing underlying
- # ``.update_graphics_from_flow()`` nested profiling likely
+ # ``Viz.update_graphics()`` nested profiling likely
# due to the way delaying works and garbage collection of
# the profiler in the delegated method calls.
- ms_threshold=6,
- # ms_threshold=ms_slower_then,
+ delayed=True,
+
+ # for hardcore latency checking, comment these flags above.
+ # disabled=False,
+ # ms_threshold=4,
)
- # TODO: a faster single-loop-iterator way of doing this XD
chart = self._chart
- plots = {chart.name: chart}
-
linked = self.linked
- if linked:
+ if (
+ do_linked_charts
+ and linked
+ ):
+ plots = {linked.chart.name: linked.chart}
plots |= linked.subplots
+ else:
+ plots = {chart.name: chart}
+ # TODO: a faster single-loop-iterator way of doing this?
for chart_name, chart in plots.items():
- for name, flow in chart._flows.items():
-
- if (
- not flow.render
- # XXX: super important to be aware of this.
- # or not flow.graphics.isVisible()
- ):
+ # Common `PlotItem` maxmin table; presumes that some path
+ # graphics (and thus their backing data sets) are in the
+ # same co-domain and view box (since the were added
+ # a separate graphics objects to a common plot) and thus can
+ # be sorted as one set per plot.
+ mxmns_by_common_pi: dict[
+ pg.PlotItem,
+ tuple[float, float],
+ ] = {}
+
+ # proportional group auto-scaling per overlay set.
+ # -> loop through overlays on each multi-chart widget
+ # and scale all y-ranges based on autoscale config.
+ # -> for any "group" overlay we want to dispersion normalize
+ # and scale minor charts onto the major chart: the chart
+ # with the most dispersion in the set.
+ major_viz: Viz = None
+ major_mx: float = 0
+ major_mn: float = float('inf')
+ # mx_up_rng: float = 0
+ # mn_down_rng: float = 0
+ mx_disp: float = 0
+
+ # collect certain flows have grapics objects **in seperate
+ # plots/viewboxes** into groups and do a common calc to
+ # determine auto-ranging input for `._set_yrange()`.
+ # this is primarly used for our so called "log-linearized
+ # multi-plot" overlay technique.
+ start_datums: dict[
+ ViewBox,
+ tuple[
+ Viz,
+ float, # y start
+ float, # y min
+ float, # y max
+ float, # y median
+ slice, # in-view array slice
+ np.ndarray, # in-view array
+ ],
+ ] = {}
+ major_in_view: np.ndarray = None
+
+ for name, viz in chart._vizs.items():
+
+ if not viz.render:
# print(f'skipping {flow.name}')
continue
# pass in no array which will read and render from the last
# passed array (normally provided by the display loop.)
- chart.update_graphics_from_flow(
- name,
- use_vr=True,
+ in_view, i_read_range, _ = viz.update_graphics()
+
+ if not in_view:
+ continue
+
+ profiler(f'{viz.name}@{chart_name} `Viz.update_graphics()`')
+
+ out = viz.maxmin(i_read_range=i_read_range)
+ if out is None:
+ log.warning(f'No yrange provided for {name}!?')
+ return
+ (
+ ixrng,
+ read_slc,
+ yrange
+ ) = out
+ profiler(f'{viz.name}@{chart_name} `Viz.maxmin()`')
+
+ pi = viz.plot
+
+ # handle multiple graphics-objs per viewbox cases
+ mxmn = mxmns_by_common_pi.get(pi)
+ if mxmn:
+ yrange = mxmns_by_common_pi[pi] = (
+ min(yrange[0], mxmn[0]),
+ max(yrange[1], mxmn[1]),
+ )
+
+ else:
+ mxmns_by_common_pi[pi] = yrange
+
+ profiler(f'{viz.name}@{chart_name} common pi sort')
+
+ # handle overlay log-linearized group scaling cases
+ # TODO: a better predicate here, likely something
+ # to do with overlays and their settings..
+ if (
+ viz.is_ohlc
+ ):
+ ymn, ymx = yrange
+ # print(f'adding {viz.name} to overlay')
+ # mxmn_groups[viz.name] = out
+ # viz = chart._vizs[viz_name]
+
+ # determine start datum in view
+ arr = viz.shm.array
+ in_view = arr[read_slc]
+ row_start = arr[read_slc.start - 1]
+
+ # y_med = (ymx - ymn) / 2
+ # y_med = viz.median_from_range(
+ # read_slc.start,
+ # read_slc.stop,
+ # )
+ if viz.is_ohlc:
+ y_start = row_start['open']
+ else:
+ y_start = row_start[viz.name]
+
+ profiler(f'{viz.name}@{chart_name} MINOR curve median')
+
+ start_datums[viz.plot.vb] = (
+ viz,
+ y_start,
+ ymn,
+ ymx,
+ # y_med,
+ read_slc,
+ in_view,
+ )
+
+ # find curve with max dispersion
+ disp = abs(ymx - ymn) / y_start
+
+ # track the "major" curve as the curve with most
+ # dispersion.
+ if disp > mx_disp:
+ major_viz = viz
+ mx_disp = disp
+ major_mn = ymn
+ major_mx = ymx
+ major_in_view = in_view
+ profiler(f'{viz.name}@{chart_name} set new major')
+
+ # compute directional (up/down) y-range % swing/dispersion
+ # y_ref = y_med
+ # up_rng = (ymx - y_ref) / y_ref
+ # down_rng = (ymn - y_ref) / y_ref
+
+ # mx_up_rng = max(mx_up_rng, up_rng)
+ # mn_down_rng = min(mn_down_rng, down_rng)
+
+ # print(
+ # f'{viz.name}@{chart_name} group mxmn calc\n'
+ # '--------------------\n'
+ # f'y_start: {y_start}\n'
+ # f'ymn: {ymn}\n'
+ # f'ymx: {ymx}\n'
+ # f'mx_disp: {mx_disp}\n'
+ # f'up %: {up_rng * 100}\n'
+ # f'down %: {down_rng * 100}\n'
+ # f'mx up %: {mx_up_rng * 100}\n'
+ # f'mn down %: {mn_down_rng * 100}\n'
+ # )
+ profiler(f'{viz.name}@{chart_name} MINOR curve scale')
+
+ # non-overlay group case
+ else:
+ pi.vb._set_yrange(yrange=yrange)
+ profiler(
+ f'{viz.name}@{chart_name} simple std `._set_yrange()`'
+ )
+
+ profiler(f'<{chart_name}>.interact_graphics_cycle({name})')
+ if not start_datums:
+ return
+
+ # if no overlays, set lone chart's yrange and short circuit
+ if (
+ len(start_datums) < 2
+ or not do_overlay_scaling
+ ):
+ if not major_viz:
+ major_viz = viz
+
+ # print(f'ONLY ranging major: {viz.name}')
+ major_viz.plot.vb._set_yrange(
+ yrange=yrange,
+ )
+ profiler(f'{viz.name}@{chart_name} single curve yrange')
+ return
+
+ # conduct "log-linearized multi-plot" scalings for all groups
+ for (
+ view,
+ (
+ viz,
+ y_start,
+ y_min,
+ y_max,
+ # y_med,
+ read_slc,
+ minor_in_view,
)
+ ) in start_datums.items():
+
+ # we use the ymn/mx verbatim from the major curve
+ # (i.e. the curve measured to have the highest
+ # dispersion in view).
+ if viz is major_viz:
+ ymn = y_min
+ ymx = y_max
+ continue
- # for each overlay on this chart auto-scale the
- # y-range to max-min values.
- if autoscale_overlays:
- overlay = chart.pi_overlay
- if overlay:
- for pi in overlay.overlays:
- pi.vb._set_yrange(
- # TODO: get the range once up front...
- # bars_range=br,
+ else:
+ key = 'open' if viz.is_ohlc else viz.name
+
+ # handle case where major and minor curve(s) have
+ # a disjoint x-domain (one curve is smaller in
+ # length then the other):
+ # - find the highest (time) index common to both
+ # curves.
+ # - slice out the first "intersecting" y-value from
+ # both curves for use in log-linear scaling such
+ # that the intersecting y-value is used as the
+ # reference point for scaling minor curve's
+ # y-range based on the major curves y-range.
+
+ # get intersection point y-values for both curves
+ minor_in_view_start = minor_in_view[0]
+ minor_i_start = minor_in_view_start['index']
+ minor_i_start_t = minor_in_view_start['time']
+
+ major_in_view_start = major_in_view[0]
+ major_i_start = major_in_view_start['index']
+ major_i_start_t = major_in_view_start['time']
+
+ y_major_intersect = major_in_view_start[key]
+ y_minor_intersect = minor_in_view_start[key]
+
+ profiler(f'{viz.name}@{chart_name} intersect detection')
+
+ tdiff = (major_i_start_t - minor_i_start_t)
+ if debug_print:
+ print(
+ f'{major_viz.name} time diff with minor:\n'
+ f'maj:{major_i_start_t}\n'
+ '-\n'
+ f'min:{minor_i_start_t}\n'
+ f'=> {tdiff}\n'
+ )
+
+ # major has later timestamp adjust minor
+ if tdiff > 0:
+ slc = slice_from_time(
+ arr=minor_in_view,
+ start_t=major_i_start_t,
+ stop_t=major_i_start_t,
+ )
+ y_minor_intersect = minor_in_view[slc.start][key]
+ profiler(f'{viz.name}@{chart_name} intersect by t')
+
+ # minor has later timestamp adjust major
+ elif tdiff < 0:
+ slc = slice_from_time(
+ arr=major_in_view,
+ start_t=minor_i_start_t,
+ stop_t=minor_i_start_t,
+ )
+ y_major_intersect = major_in_view[slc.start][key]
+
+ profiler(f'{viz.name}@{chart_name} intersect by t')
+
+ if debug_print:
+ print(
+ f'major_i_start: {major_i_start}\n'
+ f'major_i_start_t: {major_i_start_t}\n'
+ f'minor_i_start: {minor_i_start}\n'
+ f'minor_i_start_t: {minor_i_start_t}\n'
+ )
+
+ # TODO: probably write this as a compile cpython or
+ # numba func.
+
+ # compute directional (up/down) y-range
+ # % swing/dispersion starting at the reference index
+ # determined by the above indexing arithmetic.
+ y_ref = y_major_intersect
+ if not y_ref:
+ log.warning(
+ f'BAD y_major_intersect?!: {y_major_intersect}'
+ )
+ # breakpoint()
+
+ r_up = (major_mx - y_ref) / y_ref
+ r_down = (major_mn - y_ref) / y_ref
+
+ minor_y_start = y_minor_intersect
+ ymn = minor_y_start * (1 + r_down)
+ ymx = minor_y_start * (1 + r_up)
+
+ profiler(f'{viz.name}@{chart_name} SCALE minor')
+
+ # XXX: handle out of view cases where minor curve
+ # now is outside the range of the major curve. in
+ # this case we then re-scale the major curve to
+ # include the range missing now enforced by the
+ # minor (now new major for this *side*). Note this
+ # is side (up/down) specific.
+ new_maj_mxmn: None | tuple[float, float] = None
+ if y_max > ymx:
+
+ y_ref = y_minor_intersect
+ r_up_minor = (y_max - y_ref) / y_ref
+
+ y_maj_ref = y_major_intersect
+ new_maj_ymx = y_maj_ref * (1 + r_up_minor)
+ new_maj_mxmn = (major_mn, new_maj_ymx)
+ if debug_print:
+ print(
+ f'{view.name} OUT OF RANGE:\n'
+ '--------------------\n'
+ f'y_max:{y_max} > ymx:{ymx}\n'
+ )
+ ymx = y_max
+ profiler(f'{viz.name}@{chart_name} re-SCALE major UP')
+
+ if y_min < ymn:
+
+ y_ref = y_minor_intersect
+ r_down_minor = (y_min - y_ref) / y_ref
+
+ y_maj_ref = y_major_intersect
+ new_maj_ymn = y_maj_ref * (1 + r_down_minor)
+ new_maj_mxmn = (
+ new_maj_ymn,
+ new_maj_mxmn[1] if new_maj_mxmn else major_mx
+ )
+ if debug_print:
+ print(
+ f'{view.name} OUT OF RANGE:\n'
+ '--------------------\n'
+ f'y_min:{y_min} < ymn:{ymn}\n'
+ )
+ ymn = y_min
+
+ profiler(
+ f'{viz.name}@{chart_name} re-SCALE major DOWN'
+ )
+
+ if new_maj_mxmn:
+ if debug_print:
+ print(
+ f'RESCALE MAJOR {major_viz.name}:\n'
+ f'previous: {(major_mn, major_mx)}\n'
+ f'new: {new_maj_mxmn}\n'
)
- profiler('autoscaled linked plots')
+ major_mn, major_mx = new_maj_mxmn
+
+ if debug_print:
+ print(
+ f'{view.name} APPLY group mxmn\n'
+ '--------------------\n'
+ f'y_minor_intersect: {y_minor_intersect}\n'
+ f'y_major_intersect: {y_major_intersect}\n'
+ # f'mn_down_rng: {mn_down_rng * 100}\n'
+ # f'mx_up_rng: {mx_up_rng * 100}\n'
+ f'scaled ymn: {ymn}\n'
+ f'scaled ymx: {ymx}\n'
+ f'scaled mx_disp: {mx_disp}\n'
+ )
+
+ if (
+ isinf(ymx)
+ or isinf(ymn)
+ ):
+ log.warning(
+ f'BAD ymx/ymn: {(ymn, ymx)}'
+ )
+ continue
- profiler(f'<{chart_name}>.update_graphics_from_flow({name})')
+ view._set_yrange(
+ yrange=(ymn, ymx),
+ )
+ profiler(f'{viz.name}@{chart_name} log-SCALE minor')
+
+ # NOTE XXX: we have to set the major curve's range once (and
+ # only once) here since we're doing this entire routine
+ # inside of a single render cycle (and apparently calling
+ # `ViewBox.setYRange()` multiple times within one only takes
+ # the first call as serious...) XD
+ if debug_print:
+ print(
+ f'Scale MAJOR {major_viz.name}:\n'
+ f'scaled mx_disp: {mx_disp}\n'
+ f'previous: {(major_mn, major_mx)}\n'
+ f'new: {new_maj_mxmn}\n'
+ )
+ major_viz.plot.vb._set_yrange(
+ yrange=(major_mn, major_mx),
+ )
+ profiler(f'{viz.name}@{chart_name} log-SCALE major')
+ # major_mx, major_mn = new_maj_mxmn
+ # vrs = major_viz.plot.vb.viewRange()
+ # if vrs[1][0] > major_mn:
+ # breakpoint()
+
+ profiler.finish()
diff --git a/piker/ui/_l1.py b/piker/ui/_l1.py
index bfa0551ef..23162c70b 100644
--- a/piker/ui/_l1.py
+++ b/piker/ui/_l1.py
@@ -26,22 +26,24 @@
from ._axes import YAxisLabel
from ._style import hcolor
+from ._pg_overrides import PlotItem
class LevelLabel(YAxisLabel):
- """Y-axis (vertically) oriented, horizontal label that sticks to
+ '''
+ Y-axis (vertically) oriented, horizontal label that sticks to
where it's placed despite chart resizing and supports displaying
multiple fields.
TODO: replace the rectangle-text part with our new ``Label`` type.
- """
- _x_margin = 0
- _y_margin = 0
+ '''
+ _x_br_offset: float = -16
+ _y_txt_h_scaling: float = 2
# adjustment "further away from" anchor point
- _x_offset = 9
+ _x_offset = 0
_y_offset = 0
# fields to be displayed in the label string
@@ -57,12 +59,12 @@ def __init__(
chart,
parent,
- color: str = 'bracket',
+ color: str = 'default_light',
orient_v: str = 'bottom',
- orient_h: str = 'left',
+ orient_h: str = 'right',
- opacity: float = 0,
+ opacity: float = 1,
# makes order line labels offset from their parent axis
# such that they don't collide with the L1/L2 lines/prices
@@ -98,13 +100,15 @@ def __init__(
self._h_shift = {
'left': -1.,
- 'right': 0.
+ 'right': 0.,
}[orient_h]
self.fields = self._fields.copy()
# ensure default format fields are in correct
self.set_fmt_str(self._fmt_str, self.fields)
+ self.setZValue(10)
+
@property
def color(self):
return self._hcolor
@@ -112,7 +116,10 @@ def color(self):
@color.setter
def color(self, color: str) -> None:
self._hcolor = color
- self._pen = self.pen = pg.mkPen(hcolor(color))
+ self._pen = self.pen = pg.mkPen(
+ hcolor(color),
+ width=3,
+ )
def update_on_resize(self, vr, r):
"""Tiis is a ``.sigRangeChanged()`` handler.
@@ -124,15 +131,16 @@ def update_fields(
self,
fields: dict = None,
) -> None:
- """Update the label's text contents **and** position from
+ '''
+ Update the label's text contents **and** position from
a view box coordinate datum.
- """
+ '''
self.fields.update(fields)
level = self.fields['level']
# map "level" to local coords
- abs_xy = self._chart.mapFromView(QPointF(0, level))
+ abs_xy = self._pi.mapFromView(QPointF(0, level))
self.update_label(
abs_xy,
@@ -149,7 +157,7 @@ def update_label(
h, w = self.set_label_str(fields)
if self._adjust_to_l1:
- self._x_offset = self._chart._max_l1_line_len
+ self._x_offset = self._pi.chart_widget._max_l1_line_len
self.setPos(QPointF(
self._h_shift * (w + self._x_offset),
@@ -174,7 +182,8 @@ def set_label_str(
fields: dict,
):
# use space as e3 delim
- self.label_str = self._fmt_str.format(**fields).replace(',', ' ')
+ self.label_str = self._fmt_str.format(
+ **fields).replace(',', ' ')
br = self.boundingRect()
h, w = br.height(), br.width()
@@ -187,14 +196,14 @@ def draw(
self,
p: QtGui.QPainter,
rect: QtCore.QRectF
+
) -> None:
- p.setPen(self._pen)
+ p.setPen(self._pen)
rect = self.rect
if self._orient_v == 'bottom':
lp, rp = rect.topLeft(), rect.topRight()
- # p.drawLine(rect.topLeft(), rect.topRight())
elif self._orient_v == 'top':
lp, rp = rect.bottomLeft(), rect.bottomRight()
@@ -208,6 +217,11 @@ def draw(
])
)
+ p.fillRect(
+ self.rect,
+ self.bg_color,
+ )
+
def highlight(self, pen) -> None:
self._pen = pen
self.update()
@@ -236,43 +250,46 @@ def set_label_str(
# Set a global "max L1 label length" so we can
# look it up on order lines and adjust their
# labels not to overlap with it.
- chart = self._chart
+ chart = self._pi.chart_widget
chart._max_l1_line_len: float = max(
chart._max_l1_line_len,
- w
+ w,
)
return h, w
class L1Labels:
- """Level 1 bid ask labels for dynamic update on price-axis.
+ '''
+ Level 1 bid ask labels for dynamic update on price-axis.
- """
+ '''
def __init__(
self,
- chart: 'ChartPlotWidget', # noqa
+ plotitem: PlotItem,
digits: int = 2,
size_digits: int = 3,
font_size: str = 'small',
) -> None:
- self.chart = chart
+ chart = self.chart = plotitem.chart_widget
- raxis = chart.getAxis('right')
+ raxis = plotitem.getAxis('right')
kwargs = {
- 'chart': chart,
+ 'chart': plotitem,
'parent': raxis,
- 'opacity': 1,
+ 'opacity': .9,
'font_size': font_size,
- 'fg_color': chart.pen_color,
- 'bg_color': chart.view_color,
+ 'fg_color': 'default_light',
+ 'bg_color': chart.view_color, # normally 'papas_special'
}
+ # TODO: add humanized source-asset
+ # info format.
fmt_str = (
- ' {size:.{size_digits}f} x '
- '{level:,.{level_digits}f} '
+ ' {size:.{size_digits}f} u'
+ # '{level:,.{level_digits}f} '
)
fields = {
'level': 0,
@@ -285,12 +302,17 @@ def __init__(
orient_v='bottom',
**kwargs,
)
- bid.set_fmt_str(fmt_str=fmt_str, fields=fields)
+ bid.set_fmt_str(
+ fmt_str='\n' + fmt_str,
+ fields=fields,
+ )
bid.show()
ask = self.ask_label = L1Label(
orient_v='top',
**kwargs,
)
- ask.set_fmt_str(fmt_str=fmt_str, fields=fields)
+ ask.set_fmt_str(
+ fmt_str=fmt_str,
+ fields=fields)
ask.show()
diff --git a/piker/ui/_label.py b/piker/ui/_label.py
index 699a81ae9..247b4cc09 100644
--- a/piker/ui/_label.py
+++ b/piker/ui/_label.py
@@ -233,6 +233,36 @@ def hide(self) -> None:
def delete(self) -> None:
self.vb.scene().removeItem(self.txt)
+ # NOTE: pulled out from ``ChartPlotWidget`` from way way old code.
+ # def _label_h(self, yhigh: float, ylow: float) -> float:
+ # # compute contents label "height" in view terms
+ # # to avoid having data "contents" overlap with them
+ # if self._labels:
+ # label = self._labels[self.name][0]
+
+ # rect = label.itemRect()
+ # tl, br = rect.topLeft(), rect.bottomRight()
+ # vb = self.plotItem.vb
+
+ # try:
+ # # on startup labels might not yet be rendered
+ # top, bottom = (vb.mapToView(tl).y(), vb.mapToView(br).y())
+
+ # # XXX: magic hack, how do we compute exactly?
+ # label_h = (top - bottom) * 0.42
+
+ # except np.linalg.LinAlgError:
+ # label_h = 0
+ # else:
+ # label_h = 0
+
+ # # print(f'label height {self.name}: {label_h}')
+
+ # if label_h > yhigh - ylow:
+ # label_h = 0
+
+ # print(f"bounds (ylow, yhigh): {(ylow, yhigh)}")
+
class FormatLabel(QLabel):
'''
diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py
index b2ff6e103..104b860cf 100644
--- a/piker/ui/_ohlc.py
+++ b/piker/ui/_ohlc.py
@@ -18,13 +18,8 @@
"""
from __future__ import annotations
-from typing import (
- Optional,
- TYPE_CHECKING,
-)
import numpy as np
-import pyqtgraph as pg
from PyQt5 import (
QtGui,
QtWidgets,
@@ -33,17 +28,14 @@
QLineF,
QRectF,
)
-
+from PyQt5.QtWidgets import QGraphicsItem
from PyQt5.QtGui import QPainterPath
+from ._curve import FlowGraphic
from .._profile import pg_profile_enabled, ms_slower_then
-from ._style import hcolor
from ..log import get_logger
from .._profile import Profiler
-if TYPE_CHECKING:
- from ._chart import LinkedSplits
-
log = get_logger(__name__)
@@ -51,7 +43,8 @@
def bar_from_ohlc_row(
row: np.ndarray,
# 0.5 is no overlap between arms, 1.0 is full overlap
- w: float = 0.43
+ bar_w: float,
+ bar_gap: float = 0.16
) -> tuple[QLineF]:
'''
@@ -59,8 +52,7 @@ def bar_from_ohlc_row(
OHLC "bar" for use in the "last datum" of a series.
'''
- open, high, low, close, index = row[
- ['open', 'high', 'low', 'close', 'index']]
+ open, high, low, close, index = row
# TODO: maybe consider using `QGraphicsLineItem` ??
# gives us a ``.boundingRect()`` on the objects which may make
@@ -68,9 +60,11 @@ def bar_from_ohlc_row(
# history path faster since it's done in C++:
# https://doc.qt.io/qt-5/qgraphicslineitem.html
+ mid: float = (bar_w / 2) + index
+
# high -> low vertical (body) line
if low != high:
- hl = QLineF(index, low, index, high)
+ hl = QLineF(mid, low, mid, high)
else:
# XXX: if we don't do it renders a weird rectangle?
# see below for filtering this later...
@@ -81,45 +75,47 @@ def bar_from_ohlc_row(
# the index's range according to the view mapping coordinates.
# open line
- o = QLineF(index - w, open, index, open)
+ o = QLineF(index + bar_gap, open, mid, open)
# close line
- c = QLineF(index, close, index + w, close)
+ c = QLineF(
+ mid, close,
+ index + bar_w - bar_gap, close,
+ )
return [hl, o, c]
-class BarItems(pg.GraphicsObject):
+class BarItems(FlowGraphic):
'''
"Price range" bars graphics rendered from a OHLC sampled sequence.
'''
+ # XXX: causes this weird jitter bug when click-drag panning
+ # where the path curve will awkwardly flicker back and forth?
+ cache_mode: int = QGraphicsItem.NoCache
+
def __init__(
self,
- linked: LinkedSplits,
- plotitem: 'pg.PlotItem', # noqa
- pen_color: str = 'bracket',
- last_bar_color: str = 'bracket',
-
- name: Optional[str] = None,
+ *args,
+ **kwargs,
) -> None:
- super().__init__()
- self.linked = linked
- # XXX: for the mega-lulz increasing width here increases draw
- # latency... so probably don't do it until we figure that out.
- self._color = pen_color
- self.bars_pen = pg.mkPen(hcolor(pen_color), width=1)
- self.last_bar_pen = pg.mkPen(hcolor(last_bar_color), width=2)
- self._name = name
-
- self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
- self.path = QPainterPath()
- self._last_bar_lines: Optional[tuple[QLineF, ...]] = None
-
- def x_uppx(self) -> int:
- # we expect the downsample curve report this.
- return 0
+
+ super().__init__(*args, **kwargs)
+ self._last_bar_lines: tuple[QLineF, ...] | None = None
+
+ def x_last(self) -> None | float:
+ '''
+ Return the last most x value of the close line segment
+ or if not drawn yet, ``None``.
+
+ '''
+ if self._last_bar_lines:
+ close_arm_line = self._last_bar_lines[-1]
+ return close_arm_line.x2() if close_arm_line else None
+ else:
+ return None
# Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect
def boundingRect(self):
@@ -201,12 +197,12 @@ def paint(
# as is necesarry for what's in "view". Not sure if this will
# lead to any perf gains other then when zoomed in to less bars
# in view.
- p.setPen(self.last_bar_pen)
+ p.setPen(self.last_step_pen)
if self._last_bar_lines:
p.drawLines(*tuple(filter(bool, self._last_bar_lines)))
profiler('draw last bar')
- p.setPen(self.bars_pen)
+ p.setPen(self._pen)
p.drawPath(self.path)
profiler(f'draw history path: {self.path.capacity()}')
@@ -214,33 +210,40 @@ def draw_last_datum(
self,
path: QPainterPath,
src_data: np.ndarray,
- render_data: np.ndarray,
reset: bool,
array_key: str,
+ index_field: str,
+ ) -> None:
+
+ # relevant fields
fields: list[str] = [
- 'index',
'open',
'high',
'low',
'close',
- ],
-
- ) -> None:
-
- # relevant fields
+ index_field,
+ ]
ohlc = src_data[fields]
# last_row = ohlc[-1:]
# individual values
- last_row = i, o, h, l, last = ohlc[-1]
+ last_row = o, h, l, last, i = ohlc[-1]
# times = src_data['time']
# if times[-1] - times[-2]:
# breakpoint()
+ index = src_data[index_field]
+ step_size = index[-1] - index[-2]
+
# generate new lines objects for updatable "current bar"
- self._last_bar_lines = bar_from_ohlc_row(last_row)
+ bg: float = 0.16 * step_size
+ self._last_bar_lines = bar_from_ohlc_row(
+ last_row,
+ bar_w=step_size,
+ bar_gap=bg,
+ )
# assert i == graphics.start_index - 1
# assert i == last_index
@@ -255,10 +258,16 @@ def draw_last_datum(
if l != h: # noqa
if body is None:
- body = self._last_bar_lines[0] = QLineF(i, l, i, h)
+ body = self._last_bar_lines[0] = QLineF(
+ i + bg, l,
+ i + step_size - bg, h,
+ )
else:
# update body
- body.setLine(i, l, i, h)
+ body.setLine(
+ body.x1(), l,
+ body.x2(), h,
+ )
# XXX: pretty sure this is causing an issue where the
# bar has a large upward move right before the next
@@ -269,5 +278,4 @@ def draw_last_datum(
# date / from some previous sample. It's weird though
# because i've seen it do this to bars i - 3 back?
- # return ohlc['time'], ohlc['close']
- return ohlc['index'], ohlc['close']
+ return ohlc[index_field], ohlc['close']
diff --git a/piker/ui/_overlay.py b/piker/ui/_overlay.py
index ac15a9dc2..ad11c5e49 100644
--- a/piker/ui/_overlay.py
+++ b/piker/ui/_overlay.py
@@ -22,7 +22,6 @@
from functools import partial
from typing import (
Callable,
- Optional,
)
from pyqtgraph.graphicsItems.AxisItem import AxisItem
@@ -92,11 +91,11 @@ class ComposedGridLayout:
'''
def __init__(
self,
- item: PlotItem,
+ pi: PlotItem,
) -> None:
- self.items: list[PlotItem] = []
+ self.pitems: list[PlotItem] = []
self._pi2axes: dict[ # TODO: use a ``bidict`` here?
int,
dict[str, AxisItem],
@@ -116,6 +115,7 @@ def __init__(
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
+ layout.setMinimumWidth(0)
if name in ('top', 'bottom'):
orient = Qt.Vertical
@@ -125,7 +125,11 @@ def __init__(
layout.setOrientation(orient)
- self.insert_plotitem(0, item)
+ self.insert_plotitem(
+ 0,
+ pi,
+ remove_axes=False,
+ )
# insert surrounding linear layouts into the parent pi's layout
# such that additional axes can be appended arbitrarily without
@@ -135,13 +139,16 @@ def __init__(
# TODO: do we need this?
# axis should have been removed during insert above
index = _axes_layout_indices[name]
- axis = item.layout.itemAt(*index)
+ axis = pi.layout.itemAt(*index)
if axis and axis.isVisible():
assert linlayout.itemAt(0) is axis
- # item.layout.removeItem(axis)
- item.layout.addItem(linlayout, *index)
- layout = item.layout.itemAt(*index)
+ # XXX: see comment in ``.insert_plotitem()``...
+ # our `PlotItem.removeAxis()` does this internally.
+ # pi.layout.removeItem(axis)
+
+ pi.layout.addItem(linlayout, *index)
+ layout = pi.layout.itemAt(*index)
assert layout is linlayout
def _register_item(
@@ -157,14 +164,16 @@ def _register_item(
self._pi2axes.setdefault(name, {})[index] = axis
# enter plot into list for index tracking
- self.items.insert(index, plotitem)
+ self.pitems.insert(index, plotitem)
def insert_plotitem(
self,
index: int,
plotitem: PlotItem,
- ) -> (int, int):
+ remove_axes: bool = False,
+
+ ) -> tuple[int, list[AxisItem]]:
'''
Place item at index by inserting all axes into the grid
at list-order appropriate position.
@@ -175,11 +184,14 @@ def insert_plotitem(
'`.insert_plotitem()` only supports an index >= 0'
)
+ inserted_axes: list[AxisItem] = []
+
# add plot's axes in sequence to the embedded linear layouts
# for each "side" thus avoiding graphics collisions.
for name, axis_info in plotitem.axes.copy().items():
linlayout, axes = self.sides[name]
axis = axis_info['item']
+ inserted_axes.append(axis)
if axis in axes:
# TODO: re-order using ``.pop()`` ?
@@ -192,22 +204,16 @@ def insert_plotitem(
if (
not axis.isVisible()
- # XXX: we never skip moving the axes for the *first*
+ # XXX: we never skip moving the axes for the *root*
# plotitem inserted (even if not shown) since we need to
# move all the hidden axes into linear sub-layouts for
# that "central" plot in the overlay. Also if we don't
# do it there's weird geomoetry calc offsets that make
# view coords slightly off somehow .. smh
- and not len(self.items) == 0
+ and not len(self.pitems) == 0
):
continue
- # XXX: Remove old axis? No, turns out we don't need this?
- # DON'T unlink it since we the original ``ViewBox``
- # to still drive it B)
- # popped = plotitem.removeAxis(name, unlink=False)
- # assert axis is popped
-
# invert insert index for layouts which are
# not-left-to-right, top-to-bottom insert oriented
insert_index = index
@@ -220,7 +226,17 @@ def insert_plotitem(
self._register_item(index, plotitem)
- return index
+ if remove_axes:
+ for name, axis_info in plotitem.axes.copy().items():
+ axis = axis_info['item']
+ # XXX: Remove old axis?
+ # No, turns out we don't need this?
+ # DON'T UNLINK IT since we need the original ``ViewBox`` to
+ # still drive it with events/handlers B)
+ popped = plotitem.removeAxis(name, unlink=False)
+ assert axis is popped
+
+ return (index, inserted_axes)
def append_plotitem(
self,
@@ -234,20 +250,20 @@ def append_plotitem(
'''
# for left and bottom axes we have to first remove
# items and re-insert to maintain a list-order.
- return self.insert_plotitem(len(self.items), item)
+ return self.insert_plotitem(len(self.pitems), item)
def get_axis(
self,
plot: PlotItem,
name: str,
- ) -> Optional[AxisItem]:
+ ) -> AxisItem | None:
'''
Retrieve the named axis for overlayed ``plot`` or ``None``
if axis for that name is not shown.
'''
- index = self.items.index(plot)
+ index = self.pitems.index(plot)
named = self._pi2axes[name]
return named.get(index)
@@ -306,14 +322,17 @@ def __init__(
# events/signals.
root_plotitem.vb.setZValue(10)
- self.overlays: list[PlotItem] = []
self.layout = ComposedGridLayout(root_plotitem)
self._relays: dict[str, Signal] = {}
+ @property
+ def overlays(self) -> list[PlotItem]:
+ return self.layout.pitems
+
def add_plotitem(
self,
plotitem: PlotItem,
- index: Optional[int] = None,
+ index: int | None = None,
# event/signal names which will be broadcasted to all added
# (relayee) ``PlotItem``s (eg. ``ViewBox.mouseDragEvent``).
@@ -324,11 +343,9 @@ def add_plotitem(
# (0, 1), # link both
link_axes: tuple[int] = (),
- ) -> None:
+ ) -> tuple[int, list[AxisItem]]:
- index = index or len(self.overlays)
root = self.root_plotitem
- self.overlays.insert(index, plotitem)
vb: ViewBox = plotitem.vb
# TODO: some sane way to allow menu event broadcast XD
@@ -361,8 +378,8 @@ def add_plotitem(
if not sub_handlers:
src_handler = getattr(
- root.vb,
- ev_name,
+ root.vb,
+ ev_name,
)
def broadcast(
@@ -370,7 +387,7 @@ def broadcast(
# TODO: drop this viewbox specific input and
# allow a predicate to be passed in by user.
- axis: 'Optional[int]' = None,
+ axis: int | None = None,
*,
@@ -476,7 +493,10 @@ def broadcast(
# ``PlotItem`` dynamically.
# append-compose into the layout all axes from this plot
- self.layout.insert_plotitem(index, plotitem)
+ if index is None:
+ insert_index, axes = self.layout.append_plotitem(plotitem)
+ else:
+ insert_index, axes = self.layout.insert_plotitem(index, plotitem)
plotitem.setGeometry(root.vb.sceneBoundingRect())
@@ -496,6 +516,11 @@ def size_to_viewbox(vb: 'ViewBox'):
vb.setZValue(100)
+ return (
+ index,
+ axes,
+ )
+
def get_axis(
self,
plot: PlotItem,
diff --git a/piker/ui/_pg_overrides.py b/piker/ui/_pg_overrides.py
index a961e5675..53ed5405c 100644
--- a/piker/ui/_pg_overrides.py
+++ b/piker/ui/_pg_overrides.py
@@ -26,6 +26,8 @@
import pyqtgraph as pg
+from ._axes import Axis
+
def invertQTransform(tr):
"""Return a QTransform that is the inverse of *tr*.
@@ -52,6 +54,10 @@ def _do_overrides() -> None:
pg.functions.invertQTransform = invertQTransform
pg.PlotItem = PlotItem
+ # enable "QPainterPathPrivate for faster arrayToQPath" from
+ # https://github.com/pyqtgraph/pyqtgraph/pull/2324
+ pg.setConfigOption('enableExperimental', True)
+
# NOTE: the below customized type contains all our changes on a method
# by method basis as per the diff:
@@ -62,6 +68,20 @@ class PlotItem(pg.PlotItem):
Overrides for the core plot object mostly pertaining to overlayed
multi-view management as it relates to multi-axis managment.
+ This object is the combination of a ``ViewBox`` and multiple
+ ``AxisItem``s and so far we've added additional functionality and
+ APIs for:
+ - removal of axes
+
+ ---
+
+ From ``pyqtgraph`` super type docs:
+ - Manage placement of ViewBox, AxisItems, and LabelItems
+ - Create and manage a list of PlotDataItems displayed inside the
+ ViewBox
+ - Implement a context menu with commonly used display and analysis
+ options
+
'''
def __init__(
self,
@@ -71,7 +91,7 @@ def __init__(
title=None,
viewBox=None,
axisItems=None,
- default_axes=['left', 'bottom'],
+ default_axes=['right', 'bottom'],
enableMenu=True,
**kargs
):
@@ -86,6 +106,8 @@ def __init__(
enableMenu=enableMenu,
kargs=kargs,
)
+ self.name = name
+ self.chart_widget = None
# self.setAxisItems(
# axisItems,
# default_axes=default_axes,
@@ -108,7 +130,7 @@ def removeAxis(
If the ``unlink: bool`` is set to ``False`` then the axis will
stay linked to its view and will only be removed from the
- layoutonly be removed from the layout.
+ layout.
If no axis with ``name: str`` is found then this is a noop.
@@ -122,7 +144,10 @@ def removeAxis(
axis = entry['item']
self.layout.removeItem(axis)
- axis.scene().removeItem(axis)
+ scn = axis.scene()
+ if scn:
+ scn.removeItem(axis)
+
if unlink:
axis.unlinkFromView()
@@ -209,7 +234,12 @@ def setAxisItems(
# adding this is without it there's some weird
# ``ViewBox`` geometry bug.. where a gap for the
# 'bottom' axis is somehow left in?
- axis = pg.AxisItem(orientation=name, parent=self)
+ # axis = pg.AxisItem(orientation=name, parent=self)
+ axis = Axis(
+ self,
+ orientation=name,
+ parent=self,
+ )
axis.linkToView(self.vb)
diff --git a/piker/ui/_position.py b/piker/ui/_position.py
index 985841619..9baca8ee6 100644
--- a/piker/ui/_position.py
+++ b/piker/ui/_position.py
@@ -41,7 +41,11 @@
pp_tight_and_right, # wanna keep it straight in the long run
gpath_pin,
)
-from ..calc import humanize, pnl, puterize
+from ..calc import (
+ humanize,
+ pnl,
+ puterize,
+)
from ..clearing._allocate import Allocator
from ..pp import Position
from ..data._normalize import iterticks
@@ -80,9 +84,9 @@ async def update_pnl_from_feed(
'''
global _pnl_tasks
- pp = order_mode.current_pp
- live = pp.live_pp
- key = live.symbol.front_fqsn()
+ pp: PositionTracker = order_mode.current_pp
+ live: Position = pp.live_pp
+ key: str = live.symbol.front_fqsn()
log.info(f'Starting pnl display for {pp.alloc.account}')
@@ -101,11 +105,22 @@ async def update_pnl_from_feed(
async with flume.stream.subscribe() as bstream:
# last_tick = time.time()
async for quotes in bstream:
-
# now = time.time()
# period = now - last_tick
for sym, quote in quotes.items():
+ # print(f'{key} PnL: sym:{sym}')
+
+ # TODO: uggggh we probably want a better state
+ # management then this sincce we want to enable
+ # updating whatever the current symbol is in
+ # real-time right?
+ if sym != key:
+ continue
+
+ # watch out for wrong quote msg-data if you muck
+ # with backend feed subs code..
+ # assert sym == quote['fqsn']
for tick in iterticks(quote, types):
# print(f'{1/period} Hz')
@@ -119,13 +134,17 @@ async def update_pnl_from_feed(
else:
# compute and display pnl status
- order_mode.pane.pnl_label.format(
- pnl=copysign(1, size) * pnl(
- # live.ppu,
- order_mode.current_pp.live_pp.ppu,
- tick['price'],
- ),
- )
+ pnl_val = (
+ copysign(1, size)
+ *
+ pnl(
+ # live.ppu,
+ order_mode.current_pp.live_pp.ppu,
+ tick['price'],
+ )
+ )
+ # print(f'formatting PNL {sym} => {pnl_val}')
+ order_mode.pane.pnl_label.format(pnl=pnl_val)
# last_tick = time.time()
finally:
diff --git a/piker/ui/_render.py b/piker/ui/_render.py
new file mode 100644
index 000000000..dc162834c
--- /dev/null
+++ b/piker/ui/_render.py
@@ -0,0 +1,320 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+High level streaming graphics primitives.
+
+This is an intermediate layer which associates real-time low latency
+graphics primitives with underlying stream/flow related data structures
+for fast incremental update.
+
+'''
+from __future__ import annotations
+from typing import (
+ TYPE_CHECKING,
+)
+
+import msgspec
+import numpy as np
+import pyqtgraph as pg
+from PyQt5.QtGui import QPainterPath
+
+from ..data._formatters import (
+ IncrementalFormatter,
+)
+from ..data._pathops import (
+ xy_downsample,
+)
+from ..log import get_logger
+from .._profile import (
+ Profiler,
+)
+
+if TYPE_CHECKING:
+ from ._dataviz import Viz
+
+
+log = get_logger(__name__)
+
+
+class Renderer(msgspec.Struct):
+
+ viz: Viz
+ fmtr: IncrementalFormatter
+
+ # output graphics rendering, the main object
+ # processed in ``QGraphicsObject.paint()``
+ path: QPainterPath | None = None
+ fast_path: QPainterPath | None = None
+
+ # downsampling state
+ _last_uppx: float = 0
+ _in_ds: bool = False
+
+ def draw_path(
+ self,
+ x: np.ndarray,
+ y: np.ndarray,
+ connect: str | np.ndarray = 'all',
+ path: QPainterPath | None = None,
+ redraw: bool = False,
+
+ ) -> QPainterPath:
+
+ path_was_none = path is None
+
+ if redraw and path:
+ path.clear()
+
+ # TODO: avoid this?
+ if self.fast_path:
+ self.fast_path.clear()
+
+ path = pg.functions.arrayToQPath(
+ x,
+ y,
+ connect=connect,
+ finiteCheck=False,
+
+ # reserve mem allocs see:
+ # - https://doc.qt.io/qt-5/qpainterpath.html#reserve
+ # - https://doc.qt.io/qt-5/qpainterpath.html#capacity
+ # - https://doc.qt.io/qt-5/qpainterpath.html#clear
+ # XXX: right now this is based on ad-hoc checks on a
+ # hidpi 3840x2160 4k monitor but we should optimize for
+ # the target display(s) on the sys.
+ # if no_path_yet:
+ # graphics.path.reserve(int(500e3))
+ # path=path, # path re-use / reserving
+ )
+
+ # avoid mem allocs if possible
+ if path_was_none:
+ path.reserve(path.capacity())
+
+ return path
+
+ def render(
+ self,
+
+ new_read,
+ array_key: str,
+ profiler: Profiler,
+ uppx: float = 1,
+
+ # redraw and ds flags
+ should_redraw: bool = False,
+ new_sample_rate: bool = False,
+ should_ds: bool = False,
+ showing_src_data: bool = True,
+
+ do_append: bool = True,
+ use_fpath: bool = True,
+
+ # only render datums "in view" of the ``ChartView``
+ use_vr: bool = True,
+
+ ) -> tuple[QPainterPath, bool]:
+ '''
+ Render the current graphics path(s)
+
+ There are (at least) 3 stages from source data to graphics data:
+ - a data transform (which can be stored in additional shm)
+ - a graphics transform which converts discrete basis data to
+ a `float`-basis view-coords graphics basis. (eg. ``ohlc_flatten()``,
+ ``step_path_arrays_from_1d()``, etc.)
+
+ - blah blah blah (from notes)
+
+ '''
+ # TODO: can the renderer just call ``Viz.read()`` directly?
+ # unpack latest source data read
+ fmtr = self.fmtr
+
+ (
+ _,
+ _,
+ array,
+ ivl,
+ ivr,
+ in_view,
+ ) = new_read
+
+ # xy-path data transform: convert source data to a format
+ # able to be passed to a `QPainterPath` rendering routine.
+ fmt_out = fmtr.format_to_1d(
+ new_read,
+ array_key,
+ profiler,
+
+ slice_to_inview=use_vr,
+ )
+
+ # no history in view case
+ if not fmt_out:
+ # XXX: this might be why the profiler only has exits?
+ return
+
+ (
+ x_1d,
+ y_1d,
+ connect,
+ prepend_length,
+ append_length,
+ view_changed,
+ # append_tres,
+
+ ) = fmt_out
+
+ # redraw conditions
+ if (
+ prepend_length > 0
+ or new_sample_rate
+ or view_changed
+
+ # NOTE: comment this to try and make "append paths"
+ # work below..
+ or append_length > 0
+ ):
+ should_redraw = True
+
+ path: QPainterPath = self.path
+ fast_path: QPainterPath = self.fast_path
+ reset: bool = False
+
+ self.viz.yrange = None
+
+ # redraw the entire source data if we have either of:
+ # - no prior path graphic rendered or,
+ # - we always intend to re-render the data only in view
+ if (
+ path is None
+ or should_redraw
+ ):
+ # print(f"{self.viz.name} -> REDRAWING BRUH")
+ if new_sample_rate and showing_src_data:
+ log.info(f'DE-downsampling -> {array_key}')
+ self._in_ds = False
+
+ elif should_ds and uppx > 1:
+
+ ds_out = xy_downsample(
+ x_1d,
+ y_1d,
+ uppx,
+ )
+ if ds_out is not None:
+ x_1d, y_1d, ymn, ymx = ds_out
+ self.viz.yrange = ymn, ymx
+ # print(f'{self.viz.name} post ds: ymn, ymx: {ymn},{ymx}')
+
+ reset = True
+ profiler(f'FULL PATH downsample redraw={should_ds}')
+ self._in_ds = True
+
+ path = self.draw_path(
+ x=x_1d,
+ y=y_1d,
+ connect=connect,
+ path=path,
+ redraw=True,
+ )
+
+ profiler(
+ 'generated fresh path. '
+ f'(should_redraw: {should_redraw} '
+ f'should_ds: {should_ds} new_sample_rate: {new_sample_rate})'
+ )
+
+ # TODO: get this piecewise prepend working - right now it's
+ # giving heck on vwap...
+ # elif prepend_length:
+
+ # prepend_path = pg.functions.arrayToQPath(
+ # x[0:prepend_length],
+ # y[0:prepend_length],
+ # connect='all'
+ # )
+
+ # # swap prepend path in "front"
+ # old_path = graphics.path
+ # graphics.path = prepend_path
+ # # graphics.path.moveTo(new_x[0], new_y[0])
+ # graphics.path.connectPath(old_path)
+
+ elif (
+ append_length > 0
+ and do_append
+ ):
+ profiler(f'sliced append path {append_length}')
+ # (
+ # x_1d,
+ # y_1d,
+ # connect,
+ # ) = append_tres
+
+ profiler(
+ f'diffed array input, append_length={append_length}'
+ )
+
+ # if should_ds and uppx > 1:
+ # new_x, new_y = xy_downsample(
+ # new_x,
+ # new_y,
+ # uppx,
+ # )
+ # profiler(f'fast path downsample redraw={should_ds}')
+
+ append_path = self.draw_path(
+ x=x_1d,
+ y=y_1d,
+ connect=connect,
+ path=fast_path,
+ )
+ profiler('generated append qpath')
+
+ if use_fpath:
+ # an attempt at trying to make append-updates faster..
+ if fast_path is None:
+ fast_path = append_path
+ # fast_path.reserve(int(6e3))
+ else:
+ # print(
+ # f'{self.viz.name}: FAST PATH\n'
+ # f"append_path br: {append_path.boundingRect()}\n"
+ # f"path size: {size}\n"
+ # f"append_path len: {append_path.length()}\n"
+ # f"fast_path len: {fast_path.length()}\n"
+ # )
+
+ fast_path.connectPath(append_path)
+ size = fast_path.capacity()
+ profiler(f'connected fast path w size: {size}')
+
+ # graphics.path.moveTo(new_x[0], new_y[0])
+ # path.connectPath(append_path)
+
+ # XXX: lol this causes a hang..
+ # graphics.path = graphics.path.simplified()
+ else:
+ size = path.capacity()
+ profiler(f'connected history path w size: {size}')
+ path.connectPath(append_path)
+
+ self.path = path
+ self.fast_path = fast_path
+
+ return self.path, reset
diff --git a/piker/ui/_search.py b/piker/ui/_search.py
index 6c7c6fd8a..ef0cca80d 100644
--- a/piker/ui/_search.py
+++ b/piker/ui/_search.py
@@ -144,15 +144,29 @@ def __init__(
self._font_size: int = 0 # pixels
self._init: bool = False
- async def on_pressed(self, idx: QModelIndex) -> None:
+ async def on_pressed(
+ self,
+ idx: QModelIndex,
+ ) -> None:
'''
Mouse pressed on view handler.
'''
search = self.parent()
- await search.chart_current_item()
+
+ await search.chart_current_item(
+ clear_to_cache=True,
+ )
+
+ # XXX: this causes Qt to hang and segfault..lovely
+ # self.show_cache_entries(
+ # only=True,
+ # keep_current_item_selected=True,
+ # )
+
search.focus()
+
def set_font_size(self, size: int = 18):
# print(size)
if size < 0:
@@ -288,7 +302,7 @@ def select_from_idx(
def select_first(self) -> QStandardItem:
'''
Select the first depth >= 2 entry from the completer tree and
- return it's item.
+ return its item.
'''
# ensure we're **not** selecting the first level parent node and
@@ -416,12 +430,26 @@ def set_section_entries(
section: str,
values: Sequence[str],
clear_all: bool = False,
+ reverse: bool = False,
) -> None:
'''
Set result-rows for depth = 1 tree section ``section``.
'''
+ if (
+ values
+ and not isinstance(values[0], str)
+ ):
+ flattened: list[str] = []
+ for val in values:
+ flattened.extend(val)
+
+ values = flattened
+
+ if reverse:
+ values = reversed(values)
+
model = self.model()
if clear_all:
# XXX: rewrite the model from scratch if caller requests it
@@ -598,22 +626,50 @@ def focus(self) -> None:
self.show()
self.bar.focus()
- def show_only_cache_entries(self) -> None:
+ def show_cache_entries(
+ self,
+ only: bool = False,
+ keep_current_item_selected: bool = False,
+
+ ) -> None:
'''
Clear the search results view and show only cached (aka recently
loaded with active data) feeds in the results section.
'''
godw = self.godwidget
+
+ # first entry in the cache is the current symbol(s)
+ fqsns = set()
+ for multi_fqsns in list(godw._chart_cache):
+ for fqsn in set(multi_fqsns):
+ fqsns.add(fqsn)
+
+ if keep_current_item_selected:
+ sel = self.view.selectionModel()
+ cidx = sel.currentIndex()
+
self.view.set_section_entries(
'cache',
- list(reversed(godw._chart_cache)),
+ list(fqsns),
# remove all other completion results except for cache
- clear_all=True,
+ clear_all=only,
+ reverse=True,
)
- def get_current_item(self) -> Optional[tuple[str, str]]:
- '''Return the current completer tree selection as
+ if (
+ keep_current_item_selected
+ and cidx.isValid()
+ ):
+ # set current selection back to what it was before filling out
+ # the view results.
+ self.view.select_from_idx(cidx)
+ else:
+ self.view.select_first()
+
+ def get_current_item(self) -> tuple[QModelIndex, str, str] | None:
+ '''
+ Return the current completer tree selection as
a tuple ``(parent: str, child: str)`` if valid, else ``None``.
'''
@@ -639,7 +695,11 @@ def get_current_item(self) -> Optional[tuple[str, str]]:
if provider == 'cache':
symbol, _, provider = symbol.rpartition('.')
- return provider, symbol
+ return (
+ cidx,
+ provider,
+ symbol,
+ )
else:
return None
@@ -660,15 +720,16 @@ async def chart_current_item(
if value is None:
return None
- provider, symbol = value
+ cidx, provider, symbol = value
godw = self.godwidget
- log.info(f'Requesting symbol: {symbol}.{provider}')
+ fqsn = f'{symbol}.{provider}'
+ log.info(f'Requesting symbol: {fqsn}')
+ # assert provider in symbol
await godw.load_symbols(
- provider,
- [symbol],
- 'info',
+ fqsns=[fqsn],
+ loglevel='info',
)
# fully qualified symbol name (SNS i guess is what we're
@@ -682,13 +743,15 @@ async def chart_current_item(
# Re-order the symbol cache on the chart to display in
# LIFO order. this is normally only done internally by
# the chart on new symbols being loaded into memory
- godw.set_chart_symbol(
- fqsn, (
+ godw.set_chart_symbols(
+ (fqsn,), (
godw.hist_linked,
godw.rt_linked,
)
)
- self.show_only_cache_entries()
+ self.show_cache_entries(
+ only=True,
+ )
self.bar.focus()
return fqsn
@@ -757,9 +820,10 @@ async def pack_matches(
with trio.CancelScope() as cs:
task_status.started(cs)
# ensure ^ status is updated
- results = await search(pattern)
+ results = list(await search(pattern))
- if provider != 'cache': # XXX: don't cache the cache results xD
+ # XXX: don't cache the cache results xD
+ if provider != 'cache':
matches[(provider, pattern)] = results
# print(f'results from {provider}: {results}')
@@ -806,7 +870,7 @@ async def fill_results(
has_results: defaultdict[str, set[str]] = defaultdict(set)
# show cached feed list at startup
- search.show_only_cache_entries()
+ search.show_cache_entries()
search.on_resize()
while True:
@@ -860,8 +924,9 @@ async def fill_results(
# it hasn't already been searched with the current
# input pattern (in which case just look up the old
# results).
- if (period >= pause) and (
- provider not in already_has_results
+ if (
+ period >= pause
+ and provider not in already_has_results
):
# TODO: it may make more sense TO NOT search the
@@ -869,7 +934,9 @@ async def fill_results(
# cpu-bound.
if provider != 'cache':
view.clear_section(
- provider, status_field='-> searchin..')
+ provider,
+ status_field='-> searchin..',
+ )
await n.start(
pack_matches,
@@ -890,11 +957,20 @@ async def fill_results(
# re-searching it's ``dict`` since it's easier
# but it also causes it to be slower then cached
# results from other providers on occasion.
- if results and provider != 'cache':
- view.set_section_entries(
- section=provider,
- values=results,
- )
+ if (
+ results
+ ):
+ if provider != 'cache':
+ view.set_section_entries(
+ section=provider,
+ values=results,
+ )
+ else:
+ # if provider == 'cache':
+ # for the cache just show what we got
+ # that matches
+ search.show_cache_entries()
+
else:
view.clear_section(provider)
@@ -916,11 +992,10 @@ async def handle_keyboard_input(
global _search_active, _search_enabled
# startup
- bar = searchbar
- search = searchbar.parent()
- godwidget = search.godwidget
- view = bar.view
- view.set_font_size(bar.dpi_font.px_size)
+ searchw = searchbar.parent()
+ godwidget = searchw.godwidget
+ view = searchbar.view
+ view.set_font_size(searchbar.dpi_font.px_size)
send, recv = trio.open_memory_channel(616)
async with trio.open_nursery() as n:
@@ -931,13 +1006,13 @@ async def handle_keyboard_input(
n.start_soon(
partial(
fill_results,
- search,
+ searchw,
recv,
)
)
- bar.focus()
- search.show_only_cache_entries()
+ searchbar.focus()
+ searchw.show_cache_entries()
await trio.sleep(0)
async for kbmsg in recv_chan:
@@ -949,20 +1024,29 @@ async def handle_keyboard_input(
if mods == Qt.ControlModifier:
ctl = True
- if key in (Qt.Key_Enter, Qt.Key_Return):
+ if key in (
+ Qt.Key_Enter,
+ Qt.Key_Return
+ ):
_search_enabled = False
- await search.chart_current_item(clear_to_cache=True)
- search.show_only_cache_entries()
+ await searchw.chart_current_item(clear_to_cache=True)
+
+ # XXX: causes hang and segfault..
+ # searchw.show_cache_entries(
+ # only=True,
+ # keep_current_item_selected=True,
+ # )
+
view.show_matches()
- search.focus()
-
- elif not ctl and not bar.text():
- # if nothing in search text show the cache
- view.set_section_entries(
- 'cache',
- list(reversed(godwidget._chart_cache)),
- clear_all=True,
- )
+ searchw.focus()
+
+ elif (
+ not ctl
+ and not searchbar.text()
+ ):
+ # TODO: really should factor this somewhere..bc
+ # we're doin it in another spot as well..
+ searchw.show_cache_entries(only=True)
continue
# cancel and close
@@ -971,7 +1055,7 @@ async def handle_keyboard_input(
Qt.Key_Space, # i feel like this is the "native" one
Qt.Key_Alt,
}:
- bar.unfocus()
+ searchbar.unfocus()
# kill the search and focus back on main chart
if godwidget:
@@ -979,41 +1063,54 @@ async def handle_keyboard_input(
continue
- if ctl and key in {
- Qt.Key_L,
- }:
+ if (
+ ctl
+ and key in {Qt.Key_L}
+ ):
# like url (link) highlight in a web browser
- bar.focus()
+ searchbar.focus()
# selection navigation controls
- elif ctl and key in {
- Qt.Key_D,
- }:
+ elif (
+ ctl
+ and key in {Qt.Key_D}
+ ):
view.next_section(direction='down')
_search_enabled = False
- elif ctl and key in {
- Qt.Key_U,
- }:
+ elif (
+ ctl
+ and key in {Qt.Key_U}
+ ):
view.next_section(direction='up')
_search_enabled = False
# selection navigation controls
- elif (ctl and key in {
-
- Qt.Key_K,
- Qt.Key_J,
-
- }) or key in {
-
- Qt.Key_Up,
- Qt.Key_Down,
- }:
+ elif (
+ ctl and (
+ key in {
+ Qt.Key_K,
+ Qt.Key_J,
+ }
+
+ or key in {
+ Qt.Key_Up,
+ Qt.Key_Down,
+ }
+ )
+ ):
_search_enabled = False
- if key in {Qt.Key_K, Qt.Key_Up}:
+
+ if key in {
+ Qt.Key_K,
+ Qt.Key_Up
+ }:
item = view.select_previous()
- elif key in {Qt.Key_J, Qt.Key_Down}:
+ elif key in {
+ Qt.Key_J,
+ Qt.Key_Down,
+ }:
item = view.select_next()
if item:
@@ -1022,26 +1119,39 @@ async def handle_keyboard_input(
# if we're in the cache section and thus the next
# selection is a cache item, switch and show it
# immediately since it should be very fast.
- if parent_item and parent_item.text() == 'cache':
- await search.chart_current_item(clear_to_cache=False)
+ if (
+ parent_item
+ and parent_item.text() == 'cache'
+ ):
+ await searchw.chart_current_item(clear_to_cache=False)
+ # ACTUAL SEARCH BLOCK #
+ # where we fuzzy complete and fill out sections.
elif not ctl:
# relay to completer task
_search_enabled = True
- send.send_nowait(search.bar.text())
+ send.send_nowait(searchw.bar.text())
_search_active.set()
async def search_simple_dict(
text: str,
source: dict,
+
) -> dict[str, Any]:
+ tokens = []
+ for key in source:
+ if not isinstance(key, str):
+ tokens.extend(key)
+ else:
+ tokens.append(key)
+
# search routine can be specified as a function such
# as in the case of the current app's local symbol cache
matches = fuzzy.extractBests(
text,
- source.keys(),
+ tokens,
score_cutoff=90,
)
diff --git a/piker/ui/_style.py b/piker/ui/_style.py
index 227310933..52ac753a6 100644
--- a/piker/ui/_style.py
+++ b/piker/ui/_style.py
@@ -240,12 +240,12 @@ def hcolor(name: str) -> str:
'gunmetal': '#91A3B0',
'battleship': '#848482',
+ # default ohlc-bars/curve gray
+ 'bracket': '#666666', # like the logo
+
# bluish
'charcoal': '#36454F',
- # default bars
- 'bracket': '#666666', # like the logo
-
# work well for filled polygons which want a 'bracket' feel
# going light to dark
'davies': '#555555',
diff --git a/piker/ui/order_mode.py b/piker/ui/order_mode.py
index 7e4ae0663..e339739e8 100644
--- a/piker/ui/order_mode.py
+++ b/piker/ui/order_mode.py
@@ -88,7 +88,7 @@ class Dialog(Struct):
# TODO: use ``pydantic.UUID4`` field
uuid: str
order: Order
- symbol: Symbol
+ symbol: str
lines: list[LevelLine]
last_status_close: Callable = lambda: None
msgs: dict[str, dict] = {}
@@ -349,7 +349,7 @@ def submit_order(
'''
if not order:
- staged = self._staged_order
+ staged: Order = self._staged_order
# apply order fields for ems
oid = str(uuid.uuid4())
order = staged.copy()
@@ -379,7 +379,7 @@ def submit_order(
dialog = Dialog(
uuid=order.oid,
order=order,
- symbol=order.symbol,
+ symbol=order.symbol, # XXX: always a str?
lines=lines,
last_status_close=self.multistatus.open_status(
f'submitting {order.exec_mode}-{order.action}',
@@ -494,7 +494,7 @@ def on_fill(
uuid: str,
price: float,
- arrow_index: float,
+ time_s: float,
pointing: Optional[str] = None,
@@ -513,22 +513,32 @@ def on_fill(
'''
dialog = self.dialogs[uuid]
lines = dialog.lines
+ chart = self.chart
+
# XXX: seems to fail on certain types of races?
# assert len(lines) == 2
if lines:
- flume: Flume = self.feed.flumes[self.chart.linked.symbol.fqsn]
+ flume: Flume = self.feed.flumes[chart.linked.symbol.fqsn]
_, _, ratio = flume.get_ds_info()
- for i, chart in [
- (arrow_index, self.chart),
- (flume.izero_hist
- +
- round((arrow_index - flume.izero_rt)/ratio),
- self.hist_chart)
+
+ for chart, shm in [
+ (self.chart, flume.rt_shm),
+ (self.hist_chart, flume.hist_shm),
]:
+ viz = chart.get_viz(chart.name)
+ index_field = viz.index_field
+ arr = shm.array
+
+ # TODO: borked for int index based..
+ index = flume.get_index(time_s, arr)
+
+ # get absolute index for arrow placement
+ arrow_index = arr[index_field][index]
+
self.arrows.add(
chart.plotItem,
uuid,
- i,
+ arrow_index,
price,
pointing=pointing,
color=lines[0].color
@@ -693,7 +703,6 @@ async def open_order_mode(
# symbol id
symbol = chart.linked.symbol
- symkey = symbol.front_fqsn()
# map of per-provider account keys to position tracker instances
trackers: dict[str, PositionTracker] = {}
@@ -854,7 +863,7 @@ async def open_order_mode(
# the expected symbol key in its positions msg.
for (broker, acctid), msgs in position_msgs.items():
for msg in msgs:
- log.info(f'Loading pp for {symkey}:\n{pformat(msg)}')
+ log.info(f'Loading pp for {acctid}@{broker}:\n{pformat(msg)}')
await process_trade_msg(
mode,
book,
@@ -930,7 +939,6 @@ async def process_trade_msg(
) -> tuple[Dialog, Status]:
- get_index = mode.chart.get_index
fmsg = pformat(msg)
log.debug(f'Received order msg:\n{fmsg}')
name = msg['name']
@@ -965,6 +973,9 @@ async def process_trade_msg(
oid = msg.oid
dialog: Dialog = mode.dialogs.get(oid)
+ if dialog:
+ fqsn = dialog.symbol
+
match msg:
case Status(
resp='dark_open' | 'open',
@@ -1034,10 +1045,11 @@ async def process_trade_msg(
# should only be one "fill" for an alert
# add a triangle and remove the level line
req = Order(**req)
+ tm = time.time()
mode.on_fill(
oid,
price=req.price,
- arrow_index=get_index(time.time()),
+ time_s=tm,
)
mode.lines.remove_line(uuid=oid)
msg.req = req
@@ -1065,26 +1077,25 @@ async def process_trade_msg(
action = order.action
details = msg.brokerd_msg
- # TODO: some kinda progress system
+ # TODO: put the actual exchange timestamp?
+ # TODO: some kinda progress system?
+
+ # NOTE: currently the ``kraken`` openOrders sub
+ # doesn't deliver their engine timestamp as part of
+ # it's schema, so this value is **not** from them
+ # (see our backend code). We should probably either
+ # include all provider-engine timestamps in the
+ # summary 'closed' status msg and/or figure out
+ # a way to indicate what is a `brokerd` stamp versus
+ # a true backend one? This will require finagling
+ # with how each backend tracks/summarizes time
+ # stamps for the downstream API.
+ tm = details['broker_time']
mode.on_fill(
oid,
price=details['price'],
+ time_s=tm,
pointing='up' if action == 'buy' else 'down',
-
- # TODO: put the actual exchange timestamp
- arrow_index=get_index(
- # TODO: note currently the ``kraken`` openOrders sub
- # doesn't deliver their engine timestamp as part of
- # it's schema, so this value is **not** from them
- # (see our backend code). We should probably either
- # include all provider-engine timestamps in the
- # summary 'closed' status msg and/or figure out
- # a way to indicate what is a `brokerd` stamp versus
- # a true backend one? This will require finagling
- # with how each backend tracks/summarizes time
- # stamps for the downstream API.
- details['broker_time']
- ),
)
# TODO: append these fill events to the position's clear
diff --git a/piker/ui/qt/__init__.py b/piker/ui/qt/__init__.py
deleted file mode 100644
index 8513b317d..000000000
--- a/piker/ui/qt/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-Super hawt Qt UI components
-"""
diff --git a/piker/ui/qt/stackof_candle.py b/piker/ui/qt/stackof_candle.py
deleted file mode 100644
index 0bcd37e40..000000000
--- a/piker/ui/qt/stackof_candle.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import sys
-
-from PySide2.QtCharts import QtCharts
-from PySide2.QtWidgets import QApplication, QMainWindow
-from PySide2.QtCore import Qt, QPointF
-from PySide2 import QtGui
-import qdarkstyle
-
-data = ((1, 7380, 7520, 7380, 7510, 7324),
- (2, 7520, 7580, 7410, 7440, 7372),
- (3, 7440, 7650, 7310, 7520, 7434),
- (4, 7450, 7640, 7450, 7550, 7480),
- (5, 7510, 7590, 7460, 7490, 7502),
- (6, 7500, 7590, 7480, 7560, 7512),
- (7, 7560, 7830, 7540, 7800, 7584))
-
-
-app = QApplication([])
-# set dark stylesheet
-# import pdb; pdb.set_trace()
-app.setStyleSheet(qdarkstyle.load_stylesheet_pyside())
-
-series = QtCharts.QCandlestickSeries()
-series.setDecreasingColor(Qt.darkRed)
-series.setIncreasingColor(Qt.darkGreen)
-
-ma5 = QtCharts.QLineSeries() # 5-days average data line
-tm = [] # stores str type data
-
-# in a loop, series and ma5 append corresponding data
-for num, o, h, l, c, m in data:
- candle = QtCharts.QCandlestickSet(o, h, l, c)
- series.append(candle)
- ma5.append(QPointF(num, m))
- tm.append(str(num))
-
-pen = candle.pen()
-# import pdb; pdb.set_trace()
-
-chart = QtCharts.QChart()
-
-# import pdb; pdb.set_trace()
-series.setBodyOutlineVisible(False)
-series.setCapsVisible(False)
-# brush = QtGui.QBrush()
-# brush.setColor(Qt.green)
-# series.setBrush(brush)
-chart.addSeries(series) # candle
-chart.addSeries(ma5) # ma5 line
-
-chart.setAnimationOptions(QtCharts.QChart.SeriesAnimations)
-chart.createDefaultAxes()
-chart.legend().hide()
-
-chart.axisX(series).setCategories(tm)
-chart.axisX(ma5).setVisible(False)
-
-view = QtCharts.QChartView(chart)
-view.chart().setTheme(QtCharts.QChart.ChartTheme.ChartThemeDark)
-view.setRubberBand(QtCharts.QChartView.HorizontalRubberBand)
-# chartview.chart().setTheme(QtCharts.QChart.ChartTheme.ChartThemeBlueCerulean)
-
-ui = QMainWindow()
-# ui.setGeometry(50, 50, 500, 300)
-ui.setCentralWidget(view)
-ui.show()
-sys.exit(app.exec_())
diff --git a/setup.py b/setup.py
index 44d360fa6..fc84909f9 100755
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@
# 'cryptofeed',
# brokers
- 'asks==2.4.8',
+ 'asks',
'ib_insync',
# numerics
diff --git a/snippets/qt_screen_info.py b/snippets/qt_screen_info.py
index 238367c05..93e6f441a 100644
--- a/snippets/qt_screen_info.py
+++ b/snippets/qt_screen_info.py
@@ -1,22 +1,26 @@
"""
-Resource list for mucking with DPIs on multiple screens:
-
-- https://stackoverflow.com/questions/42141354/convert-pixel-size-to-point-size-for-fonts-on-multiple-platforms
-- https://stackoverflow.com/questions/25761556/qt5-font-rendering-different-on-various-platforms/25929628#25929628
-- https://doc.qt.io/qt-5/highdpi.html
-- https://stackoverflow.com/questions/20464814/changing-dpi-scaling-size-of-display-make-qt-applications-font-size-get-rendere
-- https://stackoverflow.com/a/20465247
-- https://doc.qt.io/archives/qt-4.8/qfontmetrics.html#width
-- https://forum.qt.io/topic/54136/how-do-i-get-the-qscreen-my-widget-is-on-qapplication-desktop-screen-returns-a-qwidget-and-qobject_cast-qscreen-returns-null/3
-- https://forum.qt.io/topic/43625/point-sizes-are-they-reliable/4
-- https://stackoverflow.com/questions/16561879/what-is-the-difference-between-logicaldpix-and-physicaldpix-in-qt
-- https://doc.qt.io/qt-5/qguiapplication.html#screenAt
-
+DPI and info helper script for display metrics.
"""
-from pyqtgraph import QtGui
+# Resource list for mucking with DPIs on multiple screens:
+# https://stackoverflow.com/questions/42141354/convert-pixel-size-to-point-size-for-fonts-on-multiple-platforms
+# https://stackoverflow.com/questions/25761556/qt5-font-rendering-different-on-various-platforms/25929628#25929628
+# https://doc.qt.io/qt-5/highdpi.html
+# https://stackoverflow.com/questions/20464814/changing-dpi-scaling-size-of-display-make-qt-applications-font-size-get-rendere
+# https://stackoverflow.com/a/20465247
+# https://doc.qt.io/archives/qt-4.8/qfontmetrics.html#width
+# https://forum.qt.io/topic/54136/how-do-i-get-the-qscreen-my-widget-is-on-qapplication-desktop-screen-returns-a-qwidget-and-qobject_cast-qscreen-returns-null/3
+# https://forum.qt.io/topic/43625/point-sizes-are-they-reliable/4
+# https://stackoverflow.com/questions/16561879/what-is-the-difference-between-logicaldpix-and-physicaldpix-in-qt
+# https://doc.qt.io/qt-5/qguiapplication.html#screenAt
+
+from pyqtgraph import (
+ QtGui,
+ QtWidgets,
+)
from PyQt5.QtCore import (
- Qt, QCoreApplication
+ Qt,
+ QCoreApplication,
)
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute
@@ -28,55 +32,47 @@
QCoreApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
-app = QtGui.QApplication([])
-window = QtGui.QMainWindow()
-main_widget = QtGui.QWidget()
+app = QtWidgets.QApplication([])
+window = QtWidgets.QMainWindow()
+main_widget = QtWidgets.QWidget()
window.setCentralWidget(main_widget)
window.show()
+# TODO: move widget through multiple displays and auto-detect the pixel
+# ratio? (probably is gonna require calls to i3ipc on linux)..
pxr = main_widget.devicePixelRatioF()
-# screen_num = app.desktop().screenNumber()
+# TODO: how to detect list of displays from API?
# screen = app.screens()[screen_num]
-screen = app.screenAt(main_widget.geometry().center())
-
-name = screen.name()
-size = screen.size()
-geo = screen.availableGeometry()
-phydpi = screen.physicalDotsPerInch()
-logdpi = screen.logicalDotsPerInch()
-print(
- # f'screen number: {screen_num}\n',
- f'screen name: {name}\n'
- f'screen size: {size}\n'
- f'screen geometry: {geo}\n\n'
- f'devicePixelRationF(): {pxr}\n'
- f'physical dpi: {phydpi}\n'
- f'logical dpi: {logdpi}\n'
-)
+def ppscreeninfo(screen: 'QScreen') -> None:
+ # screen_num = app.desktop().screenNumber()
+ name = screen.name()
+ size = screen.size()
+ geo = screen.availableGeometry()
+ phydpi = screen.physicalDotsPerInch()
+ logdpi = screen.logicalDotsPerInch()
+ rr = screen.refreshRate()
+
+ print(
+ # f'screen number: {screen_num}\n',
+ f'screen: {name}\n'
+ f' size: {size}\n'
+ f' geometry: {geo}\n'
+ f' logical dpi: {logdpi}\n'
+ f' devicePixelRationF(): {pxr}\n'
+ f' physical dpi: {phydpi}\n'
+ f' refresh rate: {rr}\n'
+ )
+
+ print('-'*50 + '\n')
-print('-'*50)
+screen = app.screenAt(main_widget.geometry().center())
+ppscreeninfo(screen)
screen = app.primaryScreen()
-
-name = screen.name()
-size = screen.size()
-geo = screen.availableGeometry()
-phydpi = screen.physicalDotsPerInch()
-logdpi = screen.logicalDotsPerInch()
-
-print(
- # f'screen number: {screen_num}\n',
- f'screen name: {name}\n'
- f'screen size: {size}\n'
- f'screen geometry: {geo}\n\n'
- f'devicePixelRationF(): {pxr}\n'
- f'physical dpi: {phydpi}\n'
- f'logical dpi: {logdpi}\n'
-)
-
+ppscreeninfo(screen)
# app-wide font
font = QtGui.QFont("Hack")