Remove unnesesary arguments to some pikerd functions, fix container init error

by switching from log reading to quering es health endpoint, fix install on ci
and add more logging.
explicit_write_pps_on_exit
Guillermo Rodriguez 2023-02-21 13:58:04 -03:00
parent 4122c482ba
commit acc6249d88
No known key found for this signature in database
GPG Key ID: EC3AB66D5D83B392
7 changed files with 77 additions and 46 deletions

View File

@ -42,13 +42,16 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Build DB container
run: docker build -t piker:elastic dockering/elastic
- name: Setup python - name: Setup python
uses: actions/setup-python@v3 uses: actions/setup-python@v3
with: with:
python-version: '3.10' python-version: '3.10'
- name: Install dependencies - name: Install dependencies
run: pip install -U . -r requirements-test.txt -r requirements.txt --upgrade-strategy eager run: pip install -U .[es] -r requirements-test.txt -r requirements.txt --upgrade-strategy eager
- name: Test suite - name: Test suite
run: pytest tests -rs run: pytest tests -rs

View File

@ -316,8 +316,6 @@ async def open_piker_runtime(
@acm @acm
async def open_pikerd( async def open_pikerd(
tsdb: bool,
es: bool,
loglevel: str | None = None, loglevel: str | None = None,
@ -326,6 +324,10 @@ async def open_pikerd(
debug_mode: bool = False, debug_mode: bool = False,
registry_addr: None | tuple[str, int] = None, registry_addr: None | tuple[str, int] = None,
# db init flags
tsdb: bool = False,
es: bool = False,
) -> Services: ) -> Services:
''' '''
Start a root piker daemon who's lifetime extends indefinitely until Start a root piker daemon who's lifetime extends indefinitely until
@ -383,7 +385,7 @@ async def open_pikerd(
start_ahab, start_ahab,
'elasticsearch', 'elasticsearch',
start_elasticsearch, start_elasticsearch,
start_timeout=30.0 start_timeout=240.0 # high cause ci
) )
) )
@ -436,10 +438,10 @@ async def maybe_open_runtime(
@acm @acm
async def maybe_open_pikerd( async def maybe_open_pikerd(
tsdb: bool = False,
es: bool = False,
loglevel: Optional[str] = None, loglevel: Optional[str] = None,
registry_addr: None | tuple = None, registry_addr: None | tuple = None,
tsdb: bool = False,
es: bool = False,
**kwargs, **kwargs,
@ -486,11 +488,11 @@ async def maybe_open_pikerd(
# presume pikerd role since no daemon could be found at # presume pikerd role since no daemon could be found at
# configured address # configured address
async with open_pikerd( async with open_pikerd(
tsdb=tsdb,
es=es,
loglevel=loglevel, loglevel=loglevel,
debug_mode=kwargs.get('debug_mode', False), debug_mode=kwargs.get('debug_mode', False),
registry_addr=registry_addr, registry_addr=registry_addr,
tsdb=tsdb,
es=es,
) as service_manager: ) as service_manager:
# in the case where we're starting up the # in the case where we're starting up the

View File

@ -137,7 +137,13 @@ class Container:
seen_so_far = self.seen_so_far seen_so_far = self.seen_so_far
while True: while True:
logs = self.cntr.logs() try:
logs = self.cntr.logs()
except docker.errors.NotFound:
return False
except docker.errors.APIError:
return False
entries = logs.decode().split('\n') entries = logs.decode().split('\n')
for entry in entries: for entry in entries:
@ -159,9 +165,6 @@ class Container:
level = record['level'] level = record['level']
except json.JSONDecodeError: except json.JSONDecodeError:
# if 'Error' in entry:
# raise RuntimeError(entry)
# raise
msg = entry msg = entry
level = 'error' level = 'error'
@ -175,11 +178,11 @@ class Container:
if level == 'fatal': if level == 'fatal':
raise ApplicationLogError(msg) raise ApplicationLogError(msg)
if patt_matcher(msg): if await patt_matcher(msg):
return True return True
# do a checkpoint so we don't block if cancelled B) # do a checkpoint so we don't block if cancelled B)
await trio.sleep(0.01) await trio.sleep(0.1)
return False return False
@ -321,10 +324,13 @@ async def open_ahabd(
with trio.move_on_after(start_timeout): with trio.move_on_after(start_timeout):
found = await cntr.process_logs_until(start_lambda) found = await cntr.process_logs_until(start_lambda)
if not found and cntr not in client.containers.list(): if not found and dcntr not in client.containers.list():
raise RuntimeError( for entry in cntr.seen_so_far:
'Failed to start `marketstore` check logs deats' log.info(entry)
)
raise RuntimeError(
f'Failed to start {dcntr.id} check logs deats'
)
await ctx.started(( await ctx.started((
cntr.cntr.id, cntr.cntr.id,

View File

@ -47,7 +47,7 @@ from piker.log import (
get_console_log get_console_log
) )
from elasticsearch import Elasticsearch import asks
log = get_logger(__name__) log = get_logger(__name__)
@ -88,15 +88,33 @@ def start_elasticsearch(
dcntr: DockerContainer = client.containers.run( dcntr: DockerContainer = client.containers.run(
'piker:elastic', 'piker:elastic',
name='piker-elastic',
network='host', network='host',
detach=True, detach=True,
remove=True, remove=True
) )
async def start_matcher(msg: str):
try:
health = (await asks.get(
f'http://localhost:19200/_cat/health',
params={'format': 'json'}
)).json()
except OSError:
log.error('couldnt reach elastic container')
return False
log.info(health)
return health[0]['status'] == 'green'
async def stop_matcher(msg: str):
return msg == 'closed'
return ( return (
dcntr, dcntr,
{}, {},
# expected startup and stop msgs # expected startup and stop msgs
lambda start_msg: start_msg == "started", start_matcher,
lambda stop_msg: stop_msg == "closed", stop_matcher,
) )

View File

@ -189,13 +189,20 @@ def start_marketstore(
init=True, init=True,
# remove=True, # remove=True,
) )
async def start_matcher(msg: str):
return "launching tcp listener for all services..." in msg
async def stop_matcher(msg: str):
return "exiting..." in msg
return ( return (
dcntr, dcntr,
_config, _config,
# expected startup and stop msgs # expected startup and stop msgs
lambda start_msg: "launching tcp listener for all services..." in start_msg, start_matcher,
lambda stop_msg: "exiting..." in stop_msg, stop_matcher,
) )

View File

@ -119,8 +119,6 @@ def cse_symbols():
@acm @acm
async def _open_test_pikerd( async def _open_test_pikerd(
tsdb: bool = False,
es: bool = False,
reg_addr: tuple[str, int] | None = None, reg_addr: tuple[str, int] | None = None,
**kwargs, **kwargs,
@ -145,8 +143,6 @@ async def _open_test_pikerd(
# try: # try:
async with ( async with (
maybe_open_pikerd( maybe_open_pikerd(
tsdb=tsdb,
es=es,
registry_addr=reg_addr, registry_addr=reg_addr,
**kwargs, **kwargs,
) as service_manager, ) as service_manager,

View File

@ -6,6 +6,8 @@ from typing import AsyncContextManager
from piker._daemon import Services from piker._daemon import Services
from piker.log import get_logger from piker.log import get_logger
from elasticsearch import Elasticsearch
# def test_marketstore( open_test_pikerd: AsyncContextManager): # def test_marketstore( open_test_pikerd: AsyncContextManager):
@ -16,30 +18,27 @@ Verify marketstore starts and closes correctly
def test_elasticsearch( def test_elasticsearch(
open_test_pikerd: AsyncContextManager, open_test_pikerd: AsyncContextManager,
): ):
''' '''
Verify elasticsearch starts and closes correctly Verify elasticsearch starts and closes correctly
''' '''
# log = get_logger(__name__) log = get_logger(__name__)
# log.info('#################### Starting test ####################') # log.info('#################### Starting test ####################')
async def main(): async def main():
port = 19200 port = 19200
daemon_addr = ('127.0.0.1', port)
async with ( async with open_test_pikerd(
open_test_pikerd( loglevel='info',
tsdb=False, es=True
es=True, ) as (s, i, pikerd_portal, services):
reg_addr=daemon_addr,
) as (s, i, pikerd_portal, services), es = Elasticsearch(hosts=[f'http://localhost:{port}'])
# pikerd(), assert es.info()['version']['number'] == '7.17.4'
):
assert pikerd_portal.channel.raddr == daemon_addr
trio.run(main) trio.run(main)