Task ID: task_e_682e9fd04e8883238b9cc504320730c7
python-dateutil ------------------------------ 77.41 KiB/224.50 KiB
pytest ------------------------------ 109.56 KiB/335.58 KiB
greenlet ------------------------------ 108.30 KiB/589.71 KiB
pyright ------------------------------ 32.00 KiB/5.31 MiB
ruff ------------------------------ 348.88 KiB/11.02 MiB
duckdb ------------------------------ 333.61 KiB/19.27 MiB
playwright ------------------------------ 46.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
pyee ------------------------------ 15.36 KiB/15.36 KiB
itsdangerous ------------------------------ 15.85 KiB/15.85 KiB
pluggy ------------------------------ 20.06 KiB/20.06 KiB
markupsafe ------------------------------ 22.58 KiB/22.58 KiB
execnet ------------------------------ 39.66 KiB/39.66 KiB
requests ------------------------------ 63.41 KiB/63.41 KiB
packaging ------------------------------ 64.91 KiB/64.91 KiB
idna ------------------------------ 68.79 KiB/68.79 KiB
flask ------------------------------ 78.88 KiB/100.88 KiB
urllib3 ------------------------------ 78.81 KiB/125.66 KiB
jinja2 ------------------------------ 109.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 94.23 KiB/145.08 KiB
werkzeug ------------------------------ 93.75 KiB/219.24 KiB
python-dateutil ------------------------------ 77.41 KiB/224.50 KiB
pytest ------------------------------ 109.56 KiB/335.58 KiB
greenlet ------------------------------ 140.30 KiB/589.71 KiB
pyright ------------------------------ 48.00 KiB/5.31 MiB
ruff ------------------------------ 380.88 KiB/11.02 MiB
duckdb ------------------------------ 365.61 KiB/19.27 MiB
playwright ------------------------------ 46.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
pyee ------------------------------ 15.36 KiB/15.36 KiB
pluggy ------------------------------ 20.06 KiB/20.06 KiB
markupsafe ------------------------------ 22.58 KiB/22.58 KiB
execnet ------------------------------ 39.66 KiB/39.66 KiB
requests ------------------------------ 63.41 KiB/63.41 KiB
packaging ------------------------------ 64.91 KiB/64.91 KiB
idna ------------------------------ 68.79 KiB/68.79 KiB
flask ------------------------------ 78.88 KiB/100.88 KiB
urllib3 ------------------------------ 78.81 KiB/125.66 KiB
jinja2 ------------------------------ 109.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 126.23 KiB/145.08 KiB
werkzeug ------------------------------ 93.75 KiB/219.24 KiB
python-dateutil ------------------------------ 93.41 KiB/224.50 KiB
pytest ------------------------------ 109.56 KiB/335.58 KiB
greenlet ------------------------------ 140.30 KiB/589.71 KiB
pyright ------------------------------ 79.73 KiB/5.31 MiB
ruff ------------------------------ 380.88 KiB/11.02 MiB
duckdb ------------------------------ 365.61 KiB/19.27 MiB
playwright ------------------------------ 46.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
pluggy ------------------------------ 20.06 KiB/20.06 KiB
markupsafe ------------------------------ 22.58 KiB/22.58 KiB
execnet ------------------------------ 39.66 KiB/39.66 KiB
requests ------------------------------ 63.41 KiB/63.41 KiB
packaging ------------------------------ 64.91 KiB/64.91 KiB
idna ------------------------------ 68.79 KiB/68.79 KiB
flask ------------------------------ 78.88 KiB/100.88 KiB
urllib3 ------------------------------ 78.81 KiB/125.66 KiB
jinja2 ------------------------------ 125.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 126.23 KiB/145.08 KiB
werkzeug ------------------------------ 93.75 KiB/219.24 KiB
python-dateutil ------------------------------ 93.41 KiB/224.50 KiB
pytest ------------------------------ 125.56 KiB/335.58 KiB
greenlet ------------------------------ 156.30 KiB/589.71 KiB
pyright ------------------------------ 79.73 KiB/5.31 MiB
ruff ------------------------------ 396.88 KiB/11.02 MiB
duckdb ------------------------------ 381.61 KiB/19.27 MiB
playwright ------------------------------ 46.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
pluggy ------------------------------ 20.06 KiB/20.06 KiB
execnet ------------------------------ 39.66 KiB/39.66 KiB
requests ------------------------------ 63.41 KiB/63.41 KiB
packaging ------------------------------ 64.91 KiB/64.91 KiB
idna ------------------------------ 68.79 KiB/68.79 KiB
flask ------------------------------ 78.88 KiB/100.88 KiB
urllib3 ------------------------------ 78.81 KiB/125.66 KiB
jinja2 ------------------------------ 125.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 126.23 KiB/145.08 KiB
werkzeug ------------------------------ 93.75 KiB/219.24 KiB
python-dateutil ------------------------------ 93.41 KiB/224.50 KiB
pytest ------------------------------ 125.56 KiB/335.58 KiB
greenlet ------------------------------ 172.30 KiB/589.71 KiB
pyright ------------------------------ 79.73 KiB/5.31 MiB
ruff ------------------------------ 396.88 KiB/11.02 MiB
duckdb ------------------------------ 381.61 KiB/19.27 MiB
playwright ------------------------------ 46.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
execnet ------------------------------ 39.66 KiB/39.66 KiB
requests ------------------------------ 63.41 KiB/63.41 KiB
packaging ------------------------------ 64.91 KiB/64.91 KiB
idna ------------------------------ 68.79 KiB/68.79 KiB
flask ------------------------------ 78.88 KiB/100.88 KiB
urllib3 ------------------------------ 78.81 KiB/125.66 KiB
jinja2 ------------------------------ 125.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 126.23 KiB/145.08 KiB
werkzeug ------------------------------ 93.75 KiB/219.24 KiB
python-dateutil ------------------------------ 93.41 KiB/224.50 KiB
pytest ------------------------------ 125.56 KiB/335.58 KiB
greenlet ------------------------------ 172.30 KiB/589.71 KiB
pyright ------------------------------ 79.73 KiB/5.31 MiB
ruff ------------------------------ 396.88 KiB/11.02 MiB
duckdb ------------------------------ 381.61 KiB/19.27 MiB
playwright ------------------------------ 46.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
execnet ------------------------------ 39.66 KiB/39.66 KiB
requests ------------------------------ 63.41 KiB/63.41 KiB
packaging ------------------------------ 64.91 KiB/64.91 KiB
flask ------------------------------ 78.88 KiB/100.88 KiB
urllib3 ------------------------------ 94.81 KiB/125.66 KiB
jinja2 ------------------------------ 125.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 126.23 KiB/145.08 KiB
werkzeug ------------------------------ 93.75 KiB/219.24 KiB
python-dateutil ------------------------------ 109.41 KiB/224.50 KiB
pytest ------------------------------ 125.56 KiB/335.58 KiB
greenlet ------------------------------ 172.30 KiB/589.71 KiB
pyright ------------------------------ 95.73 KiB/5.31 MiB
ruff ------------------------------ 412.88 KiB/11.02 MiB
duckdb ------------------------------ 397.61 KiB/19.27 MiB
playwright ------------------------------ 46.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
execnet ------------------------------ 39.66 KiB/39.66 KiB
requests ------------------------------ 63.41 KiB/63.41 KiB
flask ------------------------------ 78.88 KiB/100.88 KiB
urllib3 ------------------------------ 94.81 KiB/125.66 KiB
jinja2 ------------------------------ 125.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 126.23 KiB/145.08 KiB
werkzeug ------------------------------ 93.75 KiB/219.24 KiB
python-dateutil ------------------------------ 109.41 KiB/224.50 KiB
pytest ------------------------------ 125.56 KiB/335.58 KiB
greenlet ------------------------------ 172.30 KiB/589.71 KiB
pyright ------------------------------ 95.73 KiB/5.31 MiB
ruff ------------------------------ 412.88 KiB/11.02 MiB
duckdb ------------------------------ 397.61 KiB/19.27 MiB
playwright ------------------------------ 46.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
execnet ------------------------------ 39.66 KiB/39.66 KiB
flask ------------------------------ 78.88 KiB/100.88 KiB
urllib3 ------------------------------ 94.81 KiB/125.66 KiB
jinja2 ------------------------------ 125.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 126.23 KiB/145.08 KiB
werkzeug ------------------------------ 109.75 KiB/219.24 KiB
python-dateutil ------------------------------ 109.41 KiB/224.50 KiB
pytest ------------------------------ 125.56 KiB/335.58 KiB
greenlet ------------------------------ 172.30 KiB/589.71 KiB
pyright ------------------------------ 95.73 KiB/5.31 MiB
ruff ------------------------------ 412.88 KiB/11.02 MiB
duckdb ------------------------------ 397.61 KiB/19.27 MiB
playwright ------------------------------ 62.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
flask ------------------------------ 94.88 KiB/100.88 KiB
urllib3 ------------------------------ 94.81 KiB/125.66 KiB
jinja2 ------------------------------ 125.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 126.23 KiB/145.08 KiB
werkzeug ------------------------------ 109.75 KiB/219.24 KiB
python-dateutil ------------------------------ 173.41 KiB/224.50 KiB
pytest ------------------------------ 141.56 KiB/335.58 KiB
greenlet ------------------------------ 236.30 KiB/589.71 KiB
pyright ------------------------------ 143.73 KiB/5.31 MiB
ruff ------------------------------ 460.88 KiB/11.02 MiB
duckdb ------------------------------ 445.61 KiB/19.27 MiB
playwright ------------------------------ 62.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
flask ------------------------------ 94.88 KiB/100.88 KiB
urllib3 ------------------------------ 94.81 KiB/125.66 KiB
jinja2 ------------------------------ 125.85 KiB/131.74 KiB
charset-normalizer ------------------------------ 126.23 KiB/145.08 KiB
werkzeug ------------------------------ 125.75 KiB/219.24 KiB
python-dateutil ------------------------------ 189.41 KiB/224.50 KiB
pytest ------------------------------ 157.56 KiB/335.58 KiB
greenlet ------------------------------ 252.30 KiB/589.71 KiB
pyright ------------------------------ 175.73 KiB/5.31 MiB
ruff ------------------------------ 476.88 KiB/11.02 MiB
duckdb ------------------------------ 461.61 KiB/19.27 MiB
playwright ------------------------------ 62.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
flask ------------------------------ 100.88 KiB/100.88 KiB
urllib3 ------------------------------ 110.81 KiB/125.66 KiB
charset-normalizer ------------------------------ 145.08 KiB/145.08 KiB
werkzeug ------------------------------ 141.75 KiB/219.24 KiB
python-dateutil ------------------------------ 221.41 KiB/224.50 KiB
pytest ------------------------------ 189.56 KiB/335.58 KiB
greenlet ------------------------------ 412.30 KiB/589.71 KiB
pyright ------------------------------ 335.73 KiB/5.31 MiB
ruff ------------------------------ 652.88 KiB/11.02 MiB
duckdb ------------------------------ 637.61 KiB/19.27 MiB
playwright ------------------------------ 78.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
urllib3 ------------------------------ 110.81 KiB/125.66 KiB
charset-normalizer ------------------------------ 145.08 KiB/145.08 KiB
werkzeug ------------------------------ 141.75 KiB/219.24 KiB
python-dateutil ------------------------------ 224.50 KiB/224.50 KiB
pytest ------------------------------ 189.56 KiB/335.58 KiB
greenlet ------------------------------ 460.30 KiB/589.71 KiB
pyright ------------------------------ 399.73 KiB/5.31 MiB
ruff ------------------------------ 716.88 KiB/11.02 MiB
duckdb ------------------------------ 717.61 KiB/19.27 MiB
playwright ------------------------------ 78.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
urllib3 ------------------------------ 110.81 KiB/125.66 KiB
charset-normalizer ------------------------------ 145.08 KiB/145.08 KiB
werkzeug ------------------------------ 141.75 KiB/219.24 KiB
pytest ------------------------------ 189.56 KiB/335.58 KiB
greenlet ------------------------------ 460.30 KiB/589.71 KiB
pyright ------------------------------ 447.73 KiB/5.31 MiB
ruff ------------------------------ 764.88 KiB/11.02 MiB
duckdb ------------------------------ 749.61 KiB/19.27 MiB
playwright ------------------------------ 94.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
urllib3 ------------------------------ 110.81 KiB/125.66 KiB
werkzeug ------------------------------ 141.75 KiB/219.24 KiB
pytest ------------------------------ 189.56 KiB/335.58 KiB
greenlet ------------------------------ 460.30 KiB/589.71 KiB
pyright ------------------------------ 447.73 KiB/5.31 MiB
ruff ------------------------------ 780.88 KiB/11.02 MiB
duckdb ------------------------------ 749.61 KiB/19.27 MiB
playwright ------------------------------ 94.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
urllib3 ------------------------------ 125.66 KiB/125.66 KiB
werkzeug ------------------------------ 157.75 KiB/219.24 KiB
pytest ------------------------------ 221.56 KiB/335.58 KiB
greenlet ------------------------------ 476.30 KiB/589.71 KiB
pyright ------------------------------ 639.73 KiB/5.31 MiB
ruff ------------------------------ 956.88 KiB/11.02 MiB
duckdb ------------------------------ 941.61 KiB/19.27 MiB
playwright ------------------------------ 94.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
werkzeug ------------------------------ 157.75 KiB/219.24 KiB
pytest ------------------------------ 221.56 KiB/335.58 KiB
greenlet ------------------------------ 476.30 KiB/589.71 KiB
pyright ------------------------------ 671.73 KiB/5.31 MiB
ruff ------------------------------ 988.88 KiB/11.02 MiB
duckdb ------------------------------ 973.61 KiB/19.27 MiB
playwright ------------------------------ 110.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
werkzeug ------------------------------ 219.24 KiB/219.24 KiB
pytest ------------------------------ 285.56 KiB/335.58 KiB
greenlet ------------------------------ 492.30 KiB/589.71 KiB
pyright ------------------------------ 1.09 MiB/5.31 MiB
ruff ------------------------------ 1.40 MiB/11.02 MiB
duckdb ------------------------------ 1.37 MiB/19.27 MiB
playwright ------------------------------ 254.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
pytest ------------------------------ 317.56 KiB/335.58 KiB
greenlet ------------------------------ 492.30 KiB/589.71 KiB
pyright ------------------------------ 1.22 MiB/5.31 MiB
ruff ------------------------------ 1.52 MiB/11.02 MiB
duckdb ------------------------------ 1.50 MiB/19.27 MiB
playwright ------------------------------ 366.91 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
greenlet ------------------------------ 540.30 KiB/589.71 KiB
pyright ------------------------------ 1.55 MiB/5.31 MiB
ruff ------------------------------ 1.99 MiB/11.02 MiB
duckdb ------------------------------ 1.97 MiB/19.27 MiB
playwright ------------------------------ 824.56 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠹ Preparing packages... (3/33)
greenlet ------------------------------ 540.30 KiB/589.71 KiB
pyright ------------------------------ 1.55 MiB/5.31 MiB
ruff ------------------------------ 1.99 MiB/11.02 MiB
duckdb ------------------------------ 1.97 MiB/19.27 MiB
playwright ------------------------------ 840.56 KiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠸ Preparing packages... (26/33)
greenlet ------------------------------ 589.71 KiB/589.71 KiB
pyright ------------------------------ 1.80 MiB/5.31 MiB
ruff ------------------------------ 2.63 MiB/11.02 MiB
duckdb ------------------------------ 2.61 MiB/19.27 MiB
playwright ------------------------------ 1.45 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠸ Preparing packages... (26/33)
pyright ------------------------------ 1.80 MiB/5.31 MiB
ruff ------------------------------ 2.91 MiB/11.02 MiB
duckdb ------------------------------ 2.90 MiB/19.27 MiB
playwright ------------------------------ 1.72 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠸ Preparing packages... (26/33)
pyright ------------------------------ 1.81 MiB/5.31 MiB
ruff ------------------------------ 3.26 MiB/11.02 MiB
duckdb ------------------------------ 3.27 MiB/19.27 MiB
playwright ------------------------------ 2.08 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠸ Preparing packages... (26/33)
pyright ------------------------------ 1.84 MiB/5.31 MiB
ruff ------------------------------ 4.02 MiB/11.02 MiB
duckdb ------------------------------ 4.01 MiB/19.27 MiB
playwright ------------------------------ 2.81 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠸ Preparing packages... (26/33)
pyright ------------------------------ 1.87 MiB/5.31 MiB
ruff ------------------------------ 4.62 MiB/11.02 MiB
duckdb ------------------------------ 4.61 MiB/19.27 MiB
playwright ------------------------------ 3.42 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠼ Preparing packages... (28/33)
pyright ------------------------------ 1.91 MiB/5.31 MiB
ruff ------------------------------ 5.42 MiB/11.02 MiB
duckdb ------------------------------ 5.40 MiB/19.27 MiB
playwright ------------------------------ 4.22 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠼ Preparing packages... (28/33)
pyright ------------------------------ 1.94 MiB/5.31 MiB
ruff ------------------------------ 6.12 MiB/11.02 MiB
duckdb ------------------------------ 6.14 MiB/19.27 MiB
playwright ------------------------------ 4.95 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠼ Preparing packages... (28/33)
pyright ------------------------------ 1.97 MiB/5.31 MiB
ruff ------------------------------ 6.86 MiB/11.02 MiB
duckdb ------------------------------ 6.89 MiB/19.27 MiB
playwright ------------------------------ 5.69 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠼ Preparing packages... (28/33)
pyright ------------------------------ 2.00 MiB/5.31 MiB
ruff ------------------------------ 7.64 MiB/11.02 MiB
duckdb ------------------------------ 7.65 MiB/19.27 MiB
playwright ------------------------------ 6.47 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠴ Preparing packages... (28/33)
pyright ------------------------------ 2.03 MiB/5.31 MiB
ruff ------------------------------ 8.39 MiB/11.02 MiB
duckdb ------------------------------ 8.40 MiB/19.27 MiB
playwright ------------------------------ 7.22 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠴ Preparing packages... (28/33)
pyright ------------------------------ 2.08 MiB/5.31 MiB
ruff ------------------------------ 9.14 MiB/11.02 MiB
duckdb ------------------------------ 9.17 MiB/19.27 MiB
playwright ------------------------------ 7.97 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠴ Preparing packages... (28/33)
pyright ------------------------------ 2.11 MiB/5.31 MiB
ruff ------------------------------ 9.89 MiB/11.02 MiB
duckdb ------------------------------ 9.89 MiB/19.27 MiB
playwright ------------------------------ 8.71 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠴ Preparing packages... (28/33)
pyright ------------------------------ 2.12 MiB/5.31 MiB
ruff ------------------------------ 10.69 MiB/11.02 MiB
duckdb ------------------------------ 10.67 MiB/19.27 MiB
playwright ------------------------------ 9.50 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠦ Preparing packages... (28/33)
pyright ------------------------------ 2.14 MiB/5.31 MiB
duckdb ------------------------------ 11.06 MiB/19.27 MiB
playwright ------------------------------ 9.89 MiB/43.05 MiB
Building scubaduck @ file:///workspace/scubaduck
⠦ Preparing packages... (28/33)
pyright ------------------------------ 2.14 MiB/5.31 MiB
duckdb ------------------------------ 11.74 MiB/19.27 MiB
playwright ------------------------------ 10.56 MiB/43.05 MiB
Built scubaduck @ file:///workspace/scubaduck
⠦ Preparing packages... (28/33)
pyright ------------------------------ 2.14 MiB/5.31 MiB
duckdb ------------------------------ 11.85 MiB/19.27 MiB
playwright ------------------------------ 10.67 MiB/43.05 MiB
⠦ Preparing packages... (28/33)
pyright ------------------------------ 2.16 MiB/5.31 MiB
duckdb ------------------------------ 12.71 MiB/19.27 MiB
playwright ------------------------------ 11.55 MiB/43.05 MiB
⠦ Preparing packages... (28/33)
pyright ------------------------------ 2.17 MiB/5.31 MiB
duckdb ------------------------------ 14.00 MiB/19.27 MiB
playwright ------------------------------ 12.82 MiB/43.05 MiB
⠦ Preparing packages... (28/33)
pyright ------------------------------ 2.19 MiB/5.31 MiB
duckdb ------------------------------ 15.22 MiB/19.27 MiB
playwright ------------------------------ 14.03 MiB/43.05 MiB
⠧ Preparing packages... (30/33)
pyright ------------------------------ 2.20 MiB/5.31 MiB
duckdb ------------------------------ 16.29 MiB/19.27 MiB
playwright ------------------------------ 15.16 MiB/43.05 MiB
⠧ Preparing packages... (30/33)
pyright ------------------------------ 2.22 MiB/5.31 MiB
duckdb ------------------------------ 17.42 MiB/19.27 MiB
playwright ------------------------------ 16.30 MiB/43.05 MiB
⠧ Preparing packages... (30/33)
pyright ------------------------------ 2.25 MiB/5.31 MiB
duckdb ------------------------------ 18.61 MiB/19.27 MiB
playwright ------------------------------ 17.46 MiB/43.05 MiB
⠧ Preparing packages... (30/33)
pyright ------------------------------ 2.25 MiB/5.31 MiB
duckdb ------------------------------ 19.20 MiB/19.27 MiB
playwright ------------------------------ 18.74 MiB/43.05 MiB
⠇ Preparing packages... (30/33)
pyright ------------------------------ 2.28 MiB/5.31 MiB
playwright ------------------------------ 19.73 MiB/43.05 MiB
⠇ Preparing packages... (30/33)
pyright ------------------------------ 2.30 MiB/5.31 MiB
playwright ------------------------------ 20.38 MiB/43.05 MiB
⠇ Preparing packages... (30/33)
pyright ------------------------------ 2.37 MiB/5.31 MiB
playwright ------------------------------ 22.40 MiB/43.05 MiB
⠇ Preparing packages... (30/33)
pyright ------------------------------ 2.41 MiB/5.31 MiB
playwright ------------------------------ 24.65 MiB/43.05 MiB
⠇ Preparing packages... (30/33)
pyright ------------------------------ 2.42 MiB/5.31 MiB
playwright ------------------------------ 26.65 MiB/43.05 MiB
⠋ Preparing packages... (31/33)
pyright ------------------------------ 2.45 MiB/5.31 MiB
playwright ------------------------------ 28.92 MiB/43.05 MiB
⠋ Preparing packages... (31/33)
pyright ------------------------------ 2.47 MiB/5.31 MiB
playwright ------------------------------ 31.03 MiB/43.05 MiB
⠋ Preparing packages... (31/33)
pyright ------------------------------ 2.50 MiB/5.31 MiB
playwright ------------------------------ 33.21 MiB/43.05 MiB
⠋ Preparing packages... (31/33)
pyright ------------------------------ 2.52 MiB/5.31 MiB
playwright ------------------------------ 35.11 MiB/43.05 MiB
⠙ Preparing packages... (31/33)
pyright ------------------------------ 2.55 MiB/5.31 MiB
playwright ------------------------------ 37.12 MiB/43.05 MiB
⠙ Preparing packages... (31/33)
pyright ------------------------------ 2.58 MiB/5.31 MiB
playwright ------------------------------ 39.16 MiB/43.05 MiB
⠙ Preparing packages... (31/33)
pyright ------------------------------ 2.61 MiB/5.31 MiB
playwright ------------------------------ 40.90 MiB/43.05 MiB
⠙ Preparing packages... (31/33)
pyright ------------------------------ 2.70 MiB/5.31 MiB
playwright ------------------------------ 41.39 MiB/43.05 MiB
⠹ Preparing packages... (31/33)
pyright ------------------------------ 2.77 MiB/5.31 MiB
playwright ------------------------------ 42.42 MiB/43.05 MiB
⠹ Preparing packages... (31/33)
pyright ------------------------------ 2.78 MiB/5.31 MiB
⠹ Preparing packages... (31/33)
pyright ------------------------------ 2.83 MiB/5.31 MiB
⠹ Preparing packages... (31/33)
pyright ------------------------------ 2.95 MiB/5.31 MiB
⠸ Preparing packages... (32/33)
pyright ------------------------------ 3.05 MiB/5.31 MiB
⠸ Preparing packages... (32/33)
pyright ------------------------------ 3.11 MiB/5.31 MiB
⠸ Preparing packages... (32/33)
pyright ------------------------------ 3.14 MiB/5.31 MiB
⠸ Preparing packages... (32/33)
pyright ------------------------------ 3.18 MiB/5.31 MiB
⠸ Preparing packages... (32/33)
pyright ------------------------------ 3.25 MiB/5.31 MiB
⠼ Preparing packages... (32/33)
pyright ------------------------------ 3.33 MiB/5.31 MiB
⠼ Preparing packages... (32/33)
pyright ------------------------------ 3.45 MiB/5.31 MiB
⠼ Preparing packages... (32/33)
pyright ------------------------------ 3.56 MiB/5.31 MiB
⠼ Preparing packages... (32/33)
pyright ------------------------------ 3.70 MiB/5.31 MiB
⠴ Preparing packages... (32/33)
pyright ------------------------------ 3.79 MiB/5.31 MiB
⠴ Preparing packages... (32/33)
pyright ------------------------------ 4.11 MiB/5.31 MiB
⠴ Preparing packages... (32/33)
pyright ------------------------------ 4.25 MiB/5.31 MiB
⠴ Preparing packages... (32/33)
pyright ------------------------------ 4.39 MiB/5.31 MiB
⠦ Preparing packages... (32/33)
pyright ------------------------------ 4.48 MiB/5.31 MiB
⠦ Preparing packages... (32/33)
pyright ------------------------------ 4.56 MiB/5.31 MiB
⠦ Preparing packages... (32/33)
Prepared 33 packages in 2.93s
░░░░░░░░░░░░░░░░░░░░ [0/0] Installing wheels...
░░░░░░░░░░░░░░░░░░░░ [0/33] Installing wheels...
░░░░░░░░░░░░░░░░░░░░ [0/33] idna==3.10
░░░░░░░░░░░░░░░░░░░░ [1/33] idna==3.10
░░░░░░░░░░░░░░░░░░░░ [1/33] pytest-xdist==3.6.1
█░░░░░░░░░░░░░░░░░░░ [2/33] pytest-xdist==3.6.1
█░░░░░░░░░░░░░░░░░░░ [2/33] ruff==0.11.10
█░░░░░░░░░░░░░░░░░░░ [3/33] ruff==0.11.10
█░░░░░░░░░░░░░░░░░░░ [3/33] packaging==25.0
██░░░░░░░░░░░░░░░░░░ [4/33] packaging==25.0
██░░░░░░░░░░░░░░░░░░ [4/33] urllib3==2.4.0
███░░░░░░░░░░░░░░░░░ [5/33] urllib3==2.4.0
███░░░░░░░░░░░░░░░░░ [5/33] six==1.17.0
███░░░░░░░░░░░░░░░░░ [6/33] six==1.17.0
███░░░░░░░░░░░░░░░░░ [6/33] requests==2.32.3
████░░░░░░░░░░░░░░░░ [7/33] requests==2.32.3
████░░░░░░░░░░░░░░░░ [7/33] scubaduck==0.1.0 (from file:///workspace/scubaduck)
████░░░░░░░░░░░░░░░░ [8/33] scubaduck==0.1.0 (from file:///workspace/scubaduck)
████░░░░░░░░░░░░░░░░ [8/33] pytest-base-url==2.1.0
█████░░░░░░░░░░░░░░░ [9/33] pytest-base-url==2.1.0
███████████████████░ [32/33] pyright==1.1.400
████████████████████ [33/33] pyright==1.1.400
Installed 33 packages in 121ms
+ blinker==1.9.0
+ certifi==2025.4.26
+ charset-normalizer==3.4.2
+ click==8.2.0
+ duckdb==1.2.2
+ execnet==2.1.1
+ flask==3.1.1
+ greenlet==3.2.2
+ idna==3.10
+ iniconfig==2.1.0
+ itsdangerous==2.2.0
+ jinja2==3.1.6
+ markupsafe==3.0.2
+ nodeenv==1.9.1
+ packaging==25.0
+ playwright==1.52.0
+ pluggy==1.6.0
+ pyee==13.0.0
+ pyright==1.1.400
+ pytest==8.3.5
+ pytest-base-url==2.1.0
+ pytest-playwright==0.7.0
+ pytest-xdist==3.6.1
+ python-dateutil==2.9.0.post0
+ python-slugify==8.0.4
+ requests==2.32.3
+ ruff==0.11.10
+ scubaduck==0.1.0 (from file:///workspace/scubaduck)
+ six==1.17.0
+ text-unidecode==1.3
+ typing-extensions==4.13.2
+ urllib3==2.4.0
+ werkzeug==3.1.3
++ source .venv/bin/activate
+++ '[' -z '' ']'
+++ '[' -n x ']'
+++ SCRIPT_PATH=.venv/bin/activate
+++ '[' .venv/bin/activate = /tmp/965MQ7-setup_script.sh ']'
+++ deactivate nondestructive
+++ unset -f pydoc
+++ '[' -z '' ']'
+++ '[' -z '' ']'
+++ hash -r
+++ '[' -z '' ']'
+++ unset VIRTUAL_ENV
+++ unset VIRTUAL_ENV_PROMPT
+++ '[' '!' nondestructive = nondestructive ']'
+++ VIRTUAL_ENV=/workspace/scubaduck/.venv
+++ '[' linux-gnu = cygwin ']'
+++ '[' linux-gnu = msys ']'
+++ export VIRTUAL_ENV
+++ '[' -z '' ']'
+++ unset SCRIPT_PATH
+++ _OLD_VIRTUAL_PATH=/root/.cargo/bin:/root/.rbenv/shims:/root/.rbenv/bin:/root/.rbenv/shims:/root/.local/share/swiftly/bin:/root/.bun/bin:/root/.nvm/versions/node/v22.15.1/bin:/root/.pyenv/shims:3441PYENV_ROOT/shims:/root/.pyenv/bin:/usr/local/go/bin:/root/go/bin:/root/.rbenv/bin:/root/.rbenv/shims:/root/.bun/bin:/root/.local/bin:/root/.pyenv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+++ PATH=/workspace/scubaduck/.venv/bin:/root/.cargo/bin:/root/.rbenv/shims:/root/.rbenv/bin:/root/.rbenv/shims:/root/.local/share/swiftly/bin:/root/.bun/bin:/root/.nvm/versions/node/v22.15.1/bin:/root/.pyenv/shims:3441PYENV_ROOT/shims:/root/.pyenv/bin:/usr/local/go/bin:/root/go/bin:/root/.rbenv/bin:/root/.rbenv/shims:/root/.bun/bin:/root/.local/bin:/root/.pyenv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+++ export PATH
+++ '[' xscubaduck '!=' x ']'
+++ VIRTUAL_ENV_PROMPT='(scubaduck) '
+++ export VIRTUAL_ENV_PROMPT
+++ '[' -z '' ']'
+++ '[' -z '' ']'
+++ _OLD_VIRTUAL_PS1=
+++ PS1='(scubaduck) '
+++ export PS1
+++ alias pydoc
+++ true
+++ hash -r
++ playwright install chromium
Downloading Chromium 136.0.7103.25 (playwright build v1169) from https://cdn.playwright.dev/dbazure/download/playwright/builds/chromium/1169/chromium-linux.zip
167.7 MiB [] 0% 0.0s167.7 MiB [] 0% 23.5s167.7 MiB [] 0% 15.8s167.7 MiB [] 0% 10.3s167.7 MiB [] 1% 6.6s167.7 MiB [] 1% 4.9s167.7 MiB [] 2% 4.4s167.7 MiB [] 3% 3.3s167.7 MiB [] 4% 2.6s167.7 MiB [] 5% 2.7s167.7 MiB [] 5% 2.6s167.7 MiB [] 6% 2.6s167.7 MiB [] 6% 2.8s167.7 MiB [] 7% 3.0s167.7 MiB [] 7% 3.1s167.7 MiB [] 8% 2.9s167.7 MiB [] 8% 3.0s167.7 MiB [] 8% 3.2s167.7 MiB [] 9% 3.1s167.7 MiB [] 10% 2.9s167.7 MiB [] 12% 2.6s167.7 MiB [] 13% 2.5s167.7 MiB [] 15% 2.3s167.7 MiB [] 15% 2.2s167.7 MiB [] 16% 2.2s167.7 MiB [] 17% 2.1s167.7 MiB [] 18% 2.0s167.7 MiB [] 19% 2.0s167.7 MiB [] 20% 1.9s167.7 MiB [] 21% 1.9s167.7 MiB [] 22% 1.9s167.7 MiB [] 23% 1.9s167.7 MiB [] 25% 1.8s167.7 MiB [] 26% 1.7s167.7 MiB [] 27% 1.7s167.7 MiB [] 28% 1.7s167.7 MiB [] 30% 1.6s167.7 MiB [] 31% 1.6s167.7 MiB [] 33% 1.5s167.7 MiB [] 34% 1.5s167.7 MiB [] 35% 1.4s167.7 MiB [] 36% 1.3s167.7 MiB [] 38% 1.3s167.7 MiB [] 39% 1.3s167.7 MiB [] 40% 1.3s167.7 MiB [] 41% 1.2s167.7 MiB [] 42% 1.2s167.7 MiB [] 44% 1.2s167.7 MiB [] 45% 1.1s167.7 MiB [] 46% 1.1s167.7 MiB [] 48% 1.0s167.7 MiB [] 49% 1.0s167.7 MiB [] 50% 1.0s167.7 MiB [] 51% 1.0s167.7 MiB [] 53% 0.9s167.7 MiB [] 54% 0.9s167.7 MiB [] 55% 0.9s167.7 MiB [] 56% 0.8s167.7 MiB [] 57% 0.8s167.7 MiB [] 59% 0.8s167.7 MiB [] 60% 0.7s167.7 MiB [] 61% 0.7s167.7 MiB [] 62% 0.7s167.7 MiB [] 64% 0.7s167.7 MiB [] 66% 0.6s167.7 MiB [] 67% 0.6s167.7 MiB [] 68% 0.6s167.7 MiB [] 69% 0.6s167.7 MiB [] 70% 0.5s167.7 MiB [] 72% 0.5s167.7 MiB [] 74% 0.5s167.7 MiB [] 75% 0.5s167.7 MiB [] 76% 0.4s167.7 MiB [] 78% 0.4s167.7 MiB [] 79% 0.4s167.7 MiB [] 80% 0.4s167.7 MiB [] 82% 0.3s167.7 MiB [] 83% 0.3s167.7 MiB [] 84% 0.3s167.7 MiB [] 86% 0.3s167.7 MiB [] 87% 0.2s167.7 MiB [] 88% 0.2s167.7 MiB [] 89% 0.2s167.7 MiB [] 90% 0.2s167.7 MiB [] 91% 0.2s167.7 MiB [] 92% 0.1s167.7 MiB [] 93% 0.1s167.7 MiB [] 95% 0.1s167.7 MiB [] 96% 0.1s167.7 MiB [] 98% 0.0s167.7 MiB [] 99% 0.0s167.7 MiB [] 100% 0.0s
Chromium 136.0.7103.25 (playwright build v1169) downloaded to /root/.cache/ms-playwright/chromium-1169
Downloading FFMPEG playwright build v1011 from https://cdn.playwright.dev/dbazure/download/playwright/builds/ffmpeg/1011/ffmpeg-linux.zip
2.3 MiB [] 0% 0.0s2.3 MiB [] 5% 0.3s2.3 MiB [] 17% 0.2s2.3 MiB [] 47% 0.1s2.3 MiB [] 54% 0.1s2.3 MiB [] 100% 0.0s
FFMPEG playwright build v1011 downloaded to /root/.cache/ms-playwright/ffmpeg-1011
Downloading Chromium Headless Shell 136.0.7103.25 (playwright build v1169) from https://cdn.playwright.dev/dbazure/download/playwright/builds/chromium/1169/chromium-headless-shell-linux.zip
101.4 MiB [] 0% 0.0s101.4 MiB [] 0% 15.6s101.4 MiB [] 0% 12.9s101.4 MiB [] 0% 10.0s101.4 MiB [] 0% 6.8s101.4 MiB [] 1% 4.5s101.4 MiB [] 2% 3.2s101.4 MiB [] 3% 3.1s101.4 MiB [] 5% 2.4s101.4 MiB [] 7% 1.9s101.4 MiB [] 8% 1.7s101.4 MiB [] 10% 1.6s101.4 MiB [] 11% 1.5s101.4 MiB [] 13% 1.4s101.4 MiB [] 16% 1.2s101.4 MiB [] 17% 1.1s101.4 MiB [] 20% 1.0s101.4 MiB [] 22% 0.9s101.4 MiB [] 25% 0.9s101.4 MiB [] 26% 0.9s101.4 MiB [] 27% 0.8s101.4 MiB [] 30% 0.8s101.4 MiB [] 33% 0.7s101.4 MiB [] 35% 0.7s101.4 MiB [] 38% 0.6s101.4 MiB [] 41% 0.6s101.4 MiB [] 44% 0.5s101.4 MiB [] 45% 0.5s101.4 MiB [] 46% 0.5s101.4 MiB [] 48% 0.5s101.4 MiB [] 49% 0.5s101.4 MiB [] 52% 0.4s101.4 MiB [] 53% 0.4s101.4 MiB [] 55% 0.4s101.4 MiB [] 58% 0.4s101.4 MiB [] 61% 0.3s101.4 MiB [] 64% 0.3s101.4 MiB [] 67% 0.3s101.4 MiB [] 69% 0.3s101.4 MiB [] 72% 0.2s101.4 MiB [] 74% 0.2s101.4 MiB [] 77% 0.2s101.4 MiB [] 80% 0.2s101.4 MiB [] 83% 0.1s101.4 MiB [] 85% 0.1s101.4 MiB [] 88% 0.1s101.4 MiB [] 92% 0.1s101.4 MiB [] 93% 0.1s101.4 MiB [] 96% 0.0s101.4 MiB [] 99% 0.0s101.4 MiB [] 100% 0.0s
Chromium Headless Shell 136.0.7103.25 (playwright build v1169) downloaded to /root/.cache/ms-playwright/chromium_headless_shell-1169
++ echo 'source .venv/bin/activate'
I'm checking out the AGENTS.md file in the repository root.
bash: .venv/bin/activate: No such file or directory
root@29cb09005028:/workspace# ls
scubaduck
root@29cb09005028:/workspace# cd scubaduck
root@29cb09005028:/workspace/scubaduck# ls
AGENTS.md codex_setup.sh scubaduck tests
README.md pyproject.toml stubs uv.lock
root@29cb09005028:/workspace/scubaduck# cat AGENTS.md
- Use "source .venv/bin/activate" to activate the virtual environment before
running any commands
- ONLY use non-interactive commands like cat, sed, apply_patch to do edits.
Do NOT use interactive editors.
- Do NOT attempt to install packages. Only the packages specified in
pyproject.toml are available. You cannot add new packages. If you
desperately want another package, make a note of it in the final PR
description.
- Use conventional commits to format PR title
- There are no nested AGENTS.md files, this is the only agents file
- When using Playwright, ONLY use chromium browser. This browser is already
installed.
- Use "ruff check" to check lint, "ruff format" to autoformat files and
"pyright" to typecheck.
- When writing the PR description, include the original prompt supplied by the
user.
- When you add functionality to the server, add server tests to
tests/test_server.py. When you add features to the frontend, add frontend
tests to tests/test_web.py
The AGENTS file outlines specific workspace tasks like using "source .venv/bin/activate" and ruff tools.
root@29cb09005028:/workspace/scubaduck# ls -R
.:
AGENTS.md codex_setup.sh scubaduck tests
README.md pyproject.toml stubs uv.lock
./scubaduck:
__init__.py sample.csv server.py static
./scubaduck/static:
index.html js
./scubaduck/static/js:
chip_input.js timeseries_chart.js
./stubs:
duckdb
./stubs/duckdb:
__init__.pyi
./tests:
conftest.py test_server.py test_web.py
I’ll see how the tests are doing now.
root@29cb09005028:/workspace/scubaduck# sed -n '1,160p' tests/test_server.py
from __future__ import annotations
import json
from pathlib import Path
from typing import cast
import duckdb
from scubaduck import server
import pytest
def test_basic_query() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-02 00:00:00",
"order_by": "timestamp",
"order_dir": "ASC",
"limit": 10,
"columns": ["timestamp", "event", "value", "user"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert data
rows = data["rows"]
# We expect first three rows (until 2024-01-02 00:00:00)
assert len(rows) == 3
assert rows[0][1] == "login"
assert rows[1][1] == "logout"
def test_js_served() -> None:
app = server.app
client = app.test_client()
rv = client.get("/js/chip_input.js")
assert rv.status_code == 200
assert b"initChipInput" in rv.data
def test_filter_multi_token() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-02 03:00:00",
"order_by": "timestamp",
"limit": 10,
"columns": ["timestamp", "event", "value", "user"],
"filters": [{"column": "user", "op": "=", "value": ["alice", "charlie"]}
],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert data
rows = data["rows"]
# Should only return rows for alice and charlie
assert len(rows) == 3
assert rows[0][3] == "alice"
assert rows[-1][3] == "charlie"
def test_empty_filter_is_noop() -> None:
app = server.app
client = app.test_client()
base_payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"limit": 100,
"columns": ["timestamp", "event", "value", "user"],
}
no_filter = {**base_payload, "filters": []}
empty_filter = {
**base_payload,
"filters": [{"column": "user", "op": "=", "value": None}],
}
rv1 = client.post(
"/api/query", data=json.dumps(no_filter), content_type="application/json
"
)
rv2 = client.post(
"/api/query", data=json.dumps(empty_filter), content_type="application/j
son"
)
rows1 = rv1.get_json()["rows"]
rows2 = rv2.get_json()["rows"]
assert rows1 == rows2
def test_select_columns() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"order_by": "timestamp",
"limit": 10,
"columns": ["timestamp", "user"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert data
rows = data["rows"]
assert len(rows[0]) == 2
assert rows[0][1] == "alice"
def test_string_filter_ops() -> None:
app = server.app
client = app.test_client()
base = {
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"order_by": "timestamp",
"limit": 100,
"columns": ["timestamp", "event", "value", "user"],
}
contains = {
**base,
"filters": [{"column": "user", "op": "contains", "value": "ali"}],
}
rv = client.post(
"/api/query", data=json.dumps(contains), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert all("ali" in r[3] for r in rows)
regex = {
**base,
"filters": [{"column": "user", "op": "~", "value": "^a.*"}],
}
rv = client.post(
"/api/query", data=json.dumps(regex), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert all(r[3].startswith("a") for r in rows)
assert len(rows) == 2
not_empty = {**base, "filters": [{"column": "user", "op": "!empty"}]}
rv = client.post(
"/api/query", data=json.dumps(not_empty), content_type="application/json
"
)
assert len(rv.get_json()["rows"]) == 4
def _make_payload() -> dict[str, object]:
return {
"table": "events",
"start": "2024-01-01 00:00:00",
root@29cb09005028:/workspace/scubaduck# sed -n '161,320p' tests/test_server.py
"end": "2024-01-02 00:00:00",
"order_by": "timestamp",
"order_dir": "ASC",
"limit": 10,
"columns": ["timestamp", "event", "value", "user"],
"filters": [],
}
def test_database_types(tmp_path: Path) -> None:
csv_file = tmp_path / "events.csv"
csv_file.write_text(Path("scubaduck/sample.csv").read_text())
sqlite_file = tmp_path / "events.sqlite"
import sqlite3
conn = sqlite3.connect(sqlite_file)
conn.execute(
"CREATE TABLE events (timestamp TEXT, event TEXT, value INTEGER, user TE
XT)"
)
with open(csv_file) as f:
next(f)
for line in f:
ts, ev, val, user = line.strip().split(",")
conn.execute(
"INSERT INTO events VALUES (?, ?, ?, ?)", (ts, ev, int(val), use
r)
)
conn.commit()
conn.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcce
ssIssue]
duckdb_file = tmp_path / "events.duckdb"
con = duckdb.connect(duckdb_file)
con.execute(
f"CREATE TABLE events AS SELECT * FROM read_csv_auto('{csv_file.as_posix
()}')"
)
con.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcces
sIssue]
for db in (csv_file, sqlite_file, duckdb_file):
app = server.create_app(db)
client = app.test_client()
payload = _make_payload()
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/js
on"
)
rows = rv.get_json()["rows"]
assert len(rows) == 3
def test_sqlite_longvarchar(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) ->
None:
sqlite_file = tmp_path / "events.sqlite"
import sqlite3
conn = sqlite3.connect(sqlite_file)
conn.execute(
"CREATE TABLE events (timestamp TEXT, url LONGVARCHAR, title VARCHAR(10)
)"
)
conn.execute(
"INSERT INTO events VALUES ('2024-01-01 00:00:00', 'https://a.com', 'Hom
e')"
)
conn.commit()
conn.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcce
ssIssue]
from typing import Any
real_connect = duckdb.connect
def failing_connect(*args: Any, **kwargs: Any) -> Any:
real = real_connect(*args, **kwargs)
class Wrapper:
def __init__(self, con: duckdb.DuckDBPyConnection) -> None:
self.con = con
self._failed = False
def execute(self, sql: str, *a: Any, **kw: Any):
if not self._failed and sql == "LOAD sqlite":
self._failed = True
raise RuntimeError("fail")
return self.con.execute(sql, *a, **kw)
def __getattr__(self, name: str) -> object:
return getattr(self.con, name)
return Wrapper(real)
monkeypatch.setattr(server.duckdb, "connect", failing_connect)
app = server.create_app(sqlite_file)
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-01 01:00:00",
"order_by": "timestamp",
"columns": ["timestamp", "url", "title"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == "https://a.com"
def test_sqlite_bigint(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
sqlite_file = tmp_path / "big.sqlite"
import sqlite3
conn = sqlite3.connect(sqlite_file)
conn.execute("CREATE TABLE events (timestamp TEXT, value INTEGER)")
big_value = 13385262862605259
conn.execute(
"INSERT INTO events VALUES ('2024-01-01 00:00:00', ?)",
(big_value,),
)
conn.commit()
conn.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcce
ssIssue]
from typing import Any
real_connect = duckdb.connect
def failing_connect(*args: Any, **kwargs: Any) -> Any:
real = real_connect(*args, **kwargs)
class Wrapper:
def __init__(self, con: duckdb.DuckDBPyConnection) -> None:
self.con = con
self._failed = False
def execute(self, sql: str, *a: Any, **kw: Any):
if not self._failed and sql == "LOAD sqlite":
self._failed = True
raise RuntimeError("fail")
return self.con.execute(sql, *a, **kw)
def __getattr__(self, name: str) -> object:
return getattr(self.con, name)
return Wrapper(real)
monkeypatch.setattr(server.duckdb, "connect", failing_connect)
app = server.create_app(sqlite_file)
client = app.test_client()
payload = {
"table": "events",
"order_by": "timestamp",
"columns": ["timestamp", "value"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == big_value
def test_envvar_db(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
csv_file = tmp_path / "custom.csv"
root@29cb09005028:/workspace/scubaduck# sed -n '321,640p' tests/test_server.py
csv_file.write_text("timestamp,event,value,user\n2024-01-01 00:00:00,login,5
,bob\n")
monkeypatch.setenv("SCUBADUCK_DB", str(csv_file))
app = server.create_app()
client = app.test_client()
payload = _make_payload()
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert len(rows) == 1
def test_envvar_db_missing(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> N
one:
missing = tmp_path / "missing.sqlite"
monkeypatch.setenv("SCUBADUCK_DB", str(missing))
with pytest.raises(FileNotFoundError):
server.create_app()
def test_group_by_table() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "table",
"order_by": "user",
"limit": 10,
"columns": ["value"],
"group_by": ["user"],
"aggregate": "Sum",
"show_hits": True,
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert rows[0][0] == "alice"
assert rows[0][1] == 2
assert rows[0][2] == 40
def test_relative_time_query(monkeypatch: pytest.MonkeyPatch) -> None:
app = server.app
client = app.test_client()
from datetime import datetime
fixed_now = datetime(2024, 1, 2, 4, 0, 0)
class FixedDateTime(datetime):
@classmethod
def now(cls, tz=None): # type: ignore[override]
return fixed_now if tz is None else fixed_now.astimezone(tz)
monkeypatch.setattr(server, "datetime", FixedDateTime)
payload = {
"table": "events",
"start": "-1 hour",
"end": "now",
"order_by": "timestamp",
"limit": 100,
"columns": ["timestamp", "event", "value", "user"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert len(data["rows"]) == 1
assert data["rows"][0][3] == "charlie"
def test_invalid_time_error() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "nonsense",
"end": "now",
"order_by": "timestamp",
"limit": 10,
"columns": ["timestamp"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "error" in data
def test_query_error_returns_sql_and_traceback() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"columns": ["event"],
"group_by": ["user"],
"aggregate": "avg",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "error" in data
def test_table_unknown_column_error() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
...
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert len(data["rows"]) == 4
def test_timeseries_orders_by_xaxis() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"limit": 100,
"columns": ["value"],
"x_axis": "timestamp",
"granularity": "1 day",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
rows = data["rows"]
from dateutil import parser
timestamps = [parser.parse(r[0]).replace(tzinfo=None) for r in rows]
assert timestamps == sorted(timestamps)
def test_timeseries_limit_applies_to_series() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"limit": 1,
"order_by": "user",
"group_by": ["user"],
"aggregate": "Count",
"columns": ["value"],
"x_axis": "timestamp",
"granularity": "1 day",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert len(data["rows"]) == 2
assert all(r[1] == "alice" for r in data["rows"])
def test_timeseries_auto_and_fine_buckets() -> None:
app = server.app
client = app.test_client()
def run(gran: str) -> None:
payload = {
"start": "2024-01-01 00:00:00",
"end": "2024-01-02 03:00:00",
"graph_type": "timeseries",
"columns": ["value"],
"x_axis": "timestamp",
"granularity": gran,
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/js
on"
)
data = rv.get_json()
assert rv.status_code == 200
from dateutil import parser
start = parser.parse(cast(str, payload["start"])).replace(tzinfo=None)
buckets = [
parser.parse(cast(str, r[0])).replace(tzinfo=None) for r in data["ro
ws"]
]
assert buckets[0] == start
if len(buckets) > 1:
step = (buckets[1] - buckets[0]).total_seconds()
assert step % data["bucket_size"] == 0
assert any(r[1] != 0 for r in data["rows"])
run("Auto")
run("Fine")
def test_timeseries_string_column_error() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"limit": 7,
"columns": ["timestamp", "event", "value", "user"],
"x_axis": "timestamp",
"granularity": "1 hour",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "Aggregate" in data["error"]
def test_derived_column_basic() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"limit": 10,
"columns": ["timestamp"],
"derived_columns": {"val2": "value * 2"},
root@29cb09005028:/workspace/scubaduck# sed -n '641,960p' tests/test_server.py
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == 20
def test_timeseries_derived_column() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"granularity": "1 hour",
"limit": 7,
"columns": ["value"],
"derived_columns": {"derived_1": "value * 2"},
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
rows = data["rows"]
assert all(r[2] == r[1] * 2 for r in rows)
def test_default_start_end_returned() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"order_by": "timestamp",
"limit": 5,
"columns": ["timestamp"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["start"] == "2024-01-01 00:00:00"
assert data["end"] == "2024-01-02 03:00:00"
root@29cb09005028:/workspace/scubaduck# sed -n '961,1280p' tests/test_server.py
I’ve reached the end of the file at line 960. Now, I'll open test_web.py.
root@29cb09005028:/workspace/scubaduck# sed -n '1,200p' tests/test_web.py
from __future__ import annotations
from typing import Any
def select_value(page: Any, selector: str, value: str) -> None:
page.evaluate(
"arg => setSelectValue(arg.sel, arg.val)",
{"sel": selector, "val": value},
)
def run_query(
page: Any,
url: str,
*,
start: str | None = None,
end: str | None = None,
order_by: str | None = None,
order_dir: str | None = "ASC",
limit: int | None = None,
group_by: list[str] | None = None,
aggregate: str | None = None,
) -> dict[str, Any]:
page.goto(url)
page.wait_for_selector("#order_by option", state="attached")
page.wait_for_selector("#order_dir", state="attached")
page.wait_for_function("window.lastResults !== undefined")
if start is not None:
page.fill("#start", start)
if end is not None:
page.fill("#end", end)
if order_by is not None:
select_value(page, "#order_by", order_by)
if order_dir is not None and order_dir == "DESC":
page.click("#order_dir")
if limit is not None:
page.fill("#limit", str(limit))
if group_by is not None:
select_value(page, "#graph_type", "table")
page.evaluate(
"g => { groupBy.chips = g; groupBy.renderChips(); }",
group_by,
)
if aggregate is not None:
select_value(page, "#graph_type", "table")
select_value(page, "#aggregate", aggregate)
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
return page.evaluate("window.lastResults")
def test_range_filters(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="2024-01-02 00:00:00",
end="2024-01-02 04:00:00",
order_by="timestamp",
limit=100,
)
assert len(data["rows"]) == 2
from dateutil import parser
timestamps = [parser.parse(row[0]).replace(tzinfo=None) for row in data["row
s"]]
assert timestamps == [
parser.parse("2024-01-02 00:00:00"),
parser.parse("2024-01-02 03:00:00"),
]
def test_order_by(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="2024-01-01 00:00:00",
end="2024-01-03 00:00:00",
order_by="value",
order_dir="DESC",
limit=100,
)
values = [row[2] for row in data["rows"]]
assert values == sorted(values, reverse=True)
def test_limit(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="2024-01-01 00:00:00",
end="2024-01-03 00:00:00",
order_by="timestamp",
limit=2,
)
assert len(data["rows"]) == 2
def test_simple_filter(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Add Filter")
filter_el = page.query_selector("#filters .filter:last-child")
assert filter_el
page.evaluate(
"arg => setSelectValue(arg.el.querySelector('.f-col'), arg.val)",
{"el": filter_el, "val": "user"},
)
val_input = filter_el.query_selector(".f-val")
val_input.click()
page.keyboard.type("alice")
page.keyboard.press("Enter")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
data = page.evaluate("window.lastResults")
assert len(data["rows"]) == 2
assert all(row[3] == "alice" for row in data["rows"])
def test_default_filter_and_layout(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
count = page.evaluate("document.querySelectorAll('#filters .filter').length"
)
assert count == 1
last_is_button = page.evaluate(
"document.querySelector('#filters').lastElementChild.id === 'add_filter'
"
)
assert last_is_button
position = page.evaluate(
"getComputedStyle(document.querySelector('#filters .filter button.remove
')).position"
)
assert position == "static"
def test_filter_remove_alignment(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
diff = page.evaluate(
"() => { const r=document.querySelector('#filters .filter-row').getBound
ingClientRect(); const x=document.querySelector('#filters .filter-row button.rem
ove').getBoundingClientRect(); return Math.abs(r.right - x.right); }"
)
assert diff <= 1
def test_header_and_tabs(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
header = page.text_content("#header")
assert "sample.csv" in header
assert "events" in header
assert page.is_visible("#settings")
assert page.is_hidden("#columns")
page.click("text=Columns")
assert page.is_visible("#columns")
cols = [c.strip() for c in page.locator("#column_groups li").all_inner_texts
()]
assert "timestamp" in cols
assert "event" in cols
page.click("text=View Settings")
assert page.is_visible("#settings")
btn_color = page.evaluate(
"getComputedStyle(document.querySelector('#dive')).backgroundColor"
)
assert "rgb(0, 128, 0)" == btn_color
sidebar_overflow = page.evaluate(
"getComputedStyle(document.querySelector('#sidebar')).overflowY"
)
view_overflow = page.evaluate(
"getComputedStyle(document.querySelector('#view')).overflowY"
)
assert sidebar_overflow == "auto"
assert view_overflow == "auto"
def test_graph_type_table_fields(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "table")
assert page.is_visible("#group_by_field")
assert page.is_visible("#aggregate_field")
assert page.is_visible("#show_hits_field")
page.click("text=Columns")
assert not page.is_visible("text=Strings:")
def test_graph_type_timeseries_fields(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
assert page.is_visible("#group_by_field")
assert page.is_visible("#aggregate_field")
assert page.is_visible("#x_axis_field")
assert page.is_visible("#granularity_field")
assert page.is_visible("#fill_field")
def test_timeseries_default_query(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_function("window.lastResults !== undefined")
data = page.evaluate("window.lastResults")
assert "error" not in data
assert page.is_visible("#chart")
page.click("text=Columns")
assert not page.is_checked("#column_groups input[value='timestamp']")
def test_timeseries_single_bucket(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
page.fill("#start", "2024-01-01 00:00:00")
page.fill("#end", "2024-01-01 00:00:00")
select_value(page, "#graph_type", "timeseries")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
path = page.get_attribute("#chart path", "d")
assert path is not None and "NaN" not in path
def test_timeseries_fill_options(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
page.fill("#start", "2024-01-01 00:00:00")
page.fill("#end", "2024-01-02 03:00:00")
select_value(page, "#graph_type", "timeseries")
select_value(page, "#granularity", "1 hour")
select_value(page, "#fill", "0")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
path_zero = page.get_attribute("#chart path", "d")
assert path_zero is not None and path_zero.count("L") > 20
select_value(page, "#fill", "connect")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
path_conn = page.get_attribute("#chart path", "d")
assert path_conn is not None and path_conn.count("M") == 1
select_value(page, "#fill", "blank")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
path_blank = page.get_attribute("#chart path", "d")
assert path_blank is not None and path_blank.count("M") > 1
def test_timeseries_hover_highlight(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
page.wait_for_selector("#chart path", state="attached")
path_el = page.query_selector("#chart path")
assert path_el
page.evaluate(
"el => el.dispatchEvent(new MouseEvent('mouseenter', {bubbles: true}))",
path_el,
)
width = page.evaluate(
"getComputedStyle(document.querySelector('#chart path')).strokeWidth"
)
assert "3" in width
color = page.evaluate(
"getComputedStyle(document.querySelector('#legend .legend-item')).backgr
oundColor"
)
assert "221, 221, 221" in color
def test_timeseries_crosshair(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
page.wait_for_selector("#chart path", state="attached")
page.eval_on_selector(
"#chart",
"el => { const r = el.getBoundingClientRect(); el.dispatchEvent(new Mous
eEvent('mousemove', {clientX: r.left + r.width/2, clientY: r.top + r.height/2, b
ubbles: true})); }",
)
line_display = page.evaluate(
"document.getElementById('crosshair_line').style.display"
)
assert line_display != "none"
count = page.eval_on_selector_all("#crosshair_dots circle", "els => els.leng
th")
assert count > 0
page.eval_on_selector(
"#chart",
"el => el.dispatchEvent(new MouseEvent('mouseleave', {bubbles: true}))",
)
line_display = page.evaluate(
"document.getElementById('crosshair_line').style.display"
)
assert line_display == "none"
def test_timeseries_auto_timezone(browser: Any, server_url: str) -> None:
context = browser.new_context(timezone_id="America/New_York")
page = context.new_page()
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
path = page.get_attribute("#chart path", "d")
context.close()
assert path is not None
coords = [float(p.split(" ")[1]) for p in path.replace("M", "L").split("L")[
1:]]
assert max(coords) > min(coords)
def test_timeseries_multi_series(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.click("text=Columns")
page.click("text=Add Derived")
expr = page.query_selector("#derived_list .derived textarea")
assert expr
name_inp = page.query_selector("#derived_list .derived .d-name")
assert name_inp
name_inp.fill("value_2")
expr.fill("value * 2")
page.click("text=View Settings")
page.fill("#start", "2024-01-01 00:00:00")
page.fill("#end", "2024-01-03 00:00:00")
select_value(page, "#granularity", "1 hour")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
count = page.eval_on_selector_all("#chart path", "els => els.length")
assert count == 2
def test_help_and_alignment(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
titles = page.evaluate(
"Array.from(document.querySelectorAll('#settings .help')).map(e => e.tit
le)"
)
assert any("start/end of the time range" in t for t in titles)
text_align = page.evaluate(
"getComputedStyle(document.querySelector('#settings label')).textAlign"
)
assert text_align == "right"
def test_table_sorting(page: Any, server_url: str) -> None:
run_query(
page,
server_url,
start="2024-01-01 00:00:00",
end="2024-01-03 00:00:00",
order_by="timestamp",
order_dir="ASC",
limit=100,
)
# header alignment
align = page.evaluate(
"getComputedStyle(document.querySelector('#results th')).textAlign"
)
assert align == "right"
header = page.locator("#results th").nth(3)
def values() -> list[str]:
return page.locator("#results td:nth-child(4)").all_inner_texts()
orig_rows = values()
assert orig_rows == ["alice", "bob", "alice", "charlie"]
first_sql = page.evaluate("window.lastResults.sql")
header.click()
assert values() == sorted(orig_rows)
assert header.inner_text().endswith("▲")
color = page.evaluate(
"getComputedStyle(document.querySelector('#results th:nth-child(4)')).co
lor"
)
assert "0, 0, 255" in color
assert page.evaluate("window.lastResults.sql") == first_sql
header.click()
assert values() == sorted(orig_rows, reverse=True)
assert header.inner_text().endswith("▼")
header.click()
assert page.input_value("#start") == "-3 hours"
def test_end_dropdown_now(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click('[data-target="end-select"]')
page.click("#end-select div:text('now')")
assert page.input_value("#end") == "now"
def test_invalid_time_error_shown(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="nonsense",
end="now",
order_by="timestamp",
)
assert "error" in data
msg = page.text_content("#view")
assert "nonsense" in msg
def test_table_avg_group_by(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="2024-01-01 00:00:00",
end="2024-01-03 00:00:00",
order_by="timestamp",
group_by=["user"],
aggregate="Avg",
)
assert "error" not in data
assert len(data["rows"]) == 3
def test_column_toggle_and_selection(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Columns")
page.wait_for_selector("#column_groups input", state="attached")
count = page.evaluate(
"document.querySelectorAll('#column_groups input:checked').length"
)
assert count == 4
page.click("#columns_none")
count = page.evaluate(
"document.querySelectorAll('#column_groups input:checked').length"
)
assert count == 0
page.click("#columns_all")
count = page.evaluate(
"document.querySelectorAll('#column_groups input:checked').length"
)
assert count == 4
page.uncheck("#column_groups input[value='value']")
page.click("text=View Settings")
page.fill("#start", "2024-01-01 00:00:00")
page.fill("#end", "2024-01-02 00:00:00")
select_value(page, "#order_by", "timestamp")
page.fill("#limit", "10")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
data = page.evaluate("window.lastResults")
assert len(data["rows"][0]) == 3
headers = page.locator("#results th").all_inner_texts()
assert "value" not in headers
def test_columns_links_alignment(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Columns")
page.wait_for_selector("#column_groups input", state="attached")
tag = page.evaluate("document.getElementById('columns_all').tagName")
assert tag == "A"
align = page.evaluate(
"getComputedStyle(document.querySelector('#column_actions')).textAlign"
)
assert align == "right"
def test_column_group_links(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Columns")
page.wait_for_selector("#column_groups a", state="attached")
tag = page.evaluate("document.querySelector('#column_groups .col-group a').t
agName")
assert tag == "A"
def test_column_group_links_float_right(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Columns")
page.wait_for_selector("#column_groups .col-group .links", state="attached")
float_val = page.evaluate(
"getComputedStyle(document.querySelector('#column_groups .col-group .lin
ks')).float"
)
assert float_val == "right"
def test_chip_dropdown_navigation(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Add Filter")
f = page.query_selector("#filters .filter:last-child")
assert f
page.evaluate(
"arg => setSelectValue(arg.el.querySelector('.f-col'), arg.val)",
{"el": f, "val": "user"},
)
inp = f.query_selector(".f-val")
inp.click()
page.wait_for_selector("#filters .filter:last-child .chip-dropdown div")
page.keyboard.type("ali")
...
page.evaluate(
"arg => setSelectValue(arg.el.querySelector('.f-col'), arg.val)",
{"el": f, "val": "user"},
)
inp = f.query_selector(".f-val")
inp.click()
page.wait_for_selector("#filters .filter:last-child .chip-dropdown")
page.keyboard.type("ali")
page.wait_for_selector(
"#filters .filter:last-child .chip-dropdown div:text('alice')"
)
page.click("#filters .filter:last-child .chip-dropdown div:text('alice')")
focused = page.evaluate(
"document.activeElement === document.querySelector('#filters .filter:las
t-child .f-val')"
)
assert not focused
visible = page.evaluate(
"getComputedStyle(document.querySelector('#filters .filter:last-child .c
hip-dropdown')).display"
)
assert visible == "none"
def test_chip_dropdown_hides_on_column_click(page: Any, server_url: str) -> None
:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Add Filter")
f = page.query_selector("#filters .filter:last-child")
assert f
page.evaluate(
"arg => setSelectValue(arg.el.querySelector('.f-col'), arg.val)",
{"el": f, "val": "user"},
)
inp = f.query_selector(".f-val")
inp.click()
page.wait_for_selector("#filters .filter:last-child .chip-dropdown div")
f.query_selector(".f-col + .dropdown-display").click()
page.wait_for_selector("#filters .filter:last-child .chip-dropdown", state="
hidden")
def test_chip_backspace_keeps_dropdown(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Add Filter")
f = page.query_selector("#filters .filter:last-child")
assert f
page.evaluate(
"arg => setSelectValue(arg.el.querySelector('.f-col'), arg.val)",
{"el": f, "val": "user"},
)
inp = f.query_selector(".f-val")
inp.click()
page.keyboard.type("alice")
page.keyboard.press("Enter")
page.keyboard.type("b")
page.wait_for_selector("#filters .filter:last-child .chip-dropdown div")
page.keyboard.press("Backspace")
page.wait_for_function(
"document.querySelector('#filters .filter:last-child .f-val').value ===
''"
)
focused = page.evaluate(
"document.activeElement === document.querySelector('#filters .filter:las
t-child .f-val')"
)
assert focused
visible = page.evaluate(
"getComputedStyle(document.querySelector('#filters .filter:last-child .c
hip-dropdown')).display"
)
assert visible == "block"
def test_chip_duplicate_toggles(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Add Filter")
f = page.query_selector("#filters .filter:last-child")
assert f
page.evaluate(
"arg => setSelectValue(arg.el.querySelector('.f-col'), arg.val)",
{"el": f, "val": "user"},
)
inp = f.query_selector(".f-val")
inp.click()
page.keyboard.type("alice")
page.keyboard.press("Enter")
chips = page.evaluate(
"Array.from(document.querySelectorAll('#filters .filter:last-child .chip
')).map(c => c.firstChild.textContent)"
)
assert chips == ["alice"]
inp.click()
page.keyboard.type("alice")
page.keyboard.press("Enter")
chips = page.evaluate(
"Array.from(document.querySelectorAll('#filters .filter:last-child .chip
')).map(c => c.firstChild.textContent)"
)
assert chips == []
def test_table_enhancements(page: Any, server_url: str) -> None:
run_query(
page,
server_url,
start="2024-01-01 00:00:00",
end="2024-01-03 00:00:00",
order_by="timestamp",
limit=10,
)
border = page.evaluate(
"getComputedStyle(document.querySelector('#results td')).borderStyle"
)
assert border == "solid"
color1 = page.evaluate(
"getComputedStyle(document.querySelector('#results tr:nth-child(2) td'))
.backgroundColor"
)
color2 = page.evaluate(
"var v=document.getElementById('view'); v.scrollWidth > v.clientWidth"
)
assert not overflow
def test_table_single_selection(page: Any, server_url: str) -> None:
run_query(
page,
server_url,
start="2024-01-01 00:00:00",
end="2024-01-03 00:00:00",
order_by="timestamp",
limit=10,
)
page.click("#results tr:nth-child(2)")
page.click("#results tr:nth-child(3)")
count = page.evaluate("document.querySelectorAll('#results tr.selected').len
gth")
assert count == 1
is_third = page.evaluate(
"document.querySelector('#results tr:nth-child(3)').classList.contains('
selected')"
)
assert is_third
def test_timestamp_rendering(page: Any, server_url: str) -> None:
run_query(
page,
server_url,
start="2024-01-01 00:00:00",
end="2024-01-02 00:00:00",
order_by="timestamp",
limit=1,
)
cell = page.text_content("#results td")
assert cell != "Invalid Date"
valid = page.evaluate("v => !isNaN(Date.parse(v))", cell)
assert valid
def test_url_query_persistence(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.wait_for_function("window.lastResults !== undefined")
page.fill("#start", "2024-01-01 00:00:00")
page.fill("#end", "2024-01-02 00:00:00")
page.fill("#limit", "1")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
first_url = page.url
first_rows = page.evaluate("window.lastResults.rows.length")
page.fill("#limit", "2")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
second_url = page.url
second_rows = page.evaluate("window.lastResults.rows.length")
assert second_rows != first_rows
assert first_url != second_url
page.go_back()
page.wait_for_function("window.lastResults !== undefined")
assert page.url == first_url
assert page.evaluate("window.lastResults.rows.length") == first_rows
def test_load_from_url(page: Any, server_url: str) -> None:
url = (
f"{server_url}?start=2024-01-01%2000:00:00&end=2024-01-02%2000:00:00"
"&order_by=timestamp&limit=2"
)
page.goto(url)
page.wait_for_selector("#order_by option", state="attached")
page.wait_for_function("window.lastResults !== undefined")
assert page.input_value("#start") == "2024-01-01 00:00:00"
assert page.input_value("#end") == "2024-01-02 00:00:00"
assert page.input_value("#limit") == "2"
assert page.evaluate("window.lastResults.rows.length") == 2
def test_empty_data_message(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="2025-01-01 00:00:00",
end="2025-01-02 00:00:00",
order_by="timestamp",
limit=100,
)
assert data["rows"] == []
msg = page.text_content("#view")
assert "Empty data provided to table" in msg
def test_group_by_chip_from_url(page: Any, server_url: str) -> None:
url = f"{server_url}?graph_type=table&group_by=user&order_by=user&limit=10"
page.goto(url)
page.wait_for_selector("#group_by_field .chip", state="attached")
chips = page.evaluate(
"Array.from(document.querySelectorAll('#group_by_field .chip')).map(c =>
c.firstChild.textContent)"
)
assert chips == ["user"]
def test_group_by_autocomplete(page: Any, server_url: str) -> None:
page.goto(f"{server_url}?graph_type=table")
page.wait_for_selector("#group_by_field", state="visible")
inp = page.query_selector("#group_by_field .f-val")
assert inp
inp.click()
page.keyboard.type("us")
page.wait_for_selector("#group_by_field .chip-dropdown div")
options = page.locator("#group_by_field .chip-dropdown div").all_inner_texts
()
assert "user" in options
def test_group_by_copy_icon(page: Any, server_url: str) -> None:
page.goto(f"{server_url}?graph_type=table")
page.wait_for_selector("#group_by_field", state="visible")
...
end="2024-01-02 00:00:00",
order_by="timestamp",
limit=10,
)
sql = data["sql"]
displayed = page.text_content("#sql_query")
assert displayed is not None
assert displayed.strip() == sql
def test_timeseries_resize(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
page.wait_for_selector("#chart path", state="attached")
def chart_info() -> dict[str, float]:
return page.evaluate(
"() => {const p=document.querySelector('#chart path'); const nums=p.
getAttribute('d').match(/[-0-9.]+/g).map(parseFloat); return {width: parseFloat(
document.getElementById('chart').getAttribute('width')), last: nums[nums.length-
2]};}"
)
before = chart_info()
legend_width = page.evaluate(
"parseFloat(getComputedStyle(document.getElementById('legend')).width)"
)
assert page.evaluate(
"() => document.getElementById('legend').getBoundingClientRect().right <
= document.getElementById('chart').getBoundingClientRect().left"
)
page.evaluate("document.getElementById('sidebar').style.width='200px'")
page.wait_for_function(
"width => document.getElementById('chart').getAttribute('width') != widt
h",
arg=before["width"],
)
after = chart_info()
legend_width_after = page.evaluate(
"parseFloat(getComputedStyle(document.getElementById('legend')).width)"
)
assert after["width"] > before["width"]
assert after["last"] > before["last"]
assert legend_width_after == legend_width
def test_timeseries_no_overflow(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
overflow = page.evaluate(
"var v=document.getElementById('view'); v.scrollWidth > v.clientWidth"
)
assert not overflow
def test_timeseries_axis_ticks(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
page.wait_for_selector("#chart text.tick-label", state="attached")
count = page.eval_on_selector_all("#chart text.tick-label", "els => els.leng
th")
assert count > 2
def test_timeseries_interval_offset(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.fill("#start", "2024-01-01 00:00:00")
page.fill("#end", "2024-01-03 12:00:00")
select_value(page, "#granularity", "1 hour")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
page.wait_for_selector("#chart text.tick-label", state="attached")
labels = page.eval_on_selector_all(
"#chart text.tick-label", "els => els.map(e => e.textContent)"
)
assert labels
assert all(lbl != "00:00" for lbl in labels)
times = [lbl for lbl in labels if ":" in lbl]
assert times
for t in times:
h = int(t.split(":")[0])
assert h % 4 == 0
def test_timeseries_legend_values(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.evaluate("g => { groupBy.chips = g; groupBy.renderChips(); }", ["user"]
)
select_value(page, "#aggregate", "Avg")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
headers = page.evaluate(
"() => Array.from(document.querySelectorAll('#legend .legend-header')).m
ap(e => e.textContent)"
)
assert any(h.startswith("alice") for h in headers)
page.wait_for_selector("#chart path", state="attached")
page.eval_on_selector(
"#chart",
"el => { const r=el.getBoundingClientRect(); el.dispatchEvent(new MouseE
vent('mousemove', {clientX:r.left+r.width/2, clientY:r.top+r.height/2, bubbles:t
rue})); }",
)
value = page.evaluate("document.querySelector('#legend .legend-value').textC
ontent")
assert value != ""
I'll keep going through the file to make sure time column heuristics are tested.
root@29cb09005028:/workspace/scubaduck# sed -n '1200,1600p' tests/test_web.py
I'll open the server code since the file seems to have ended around line 1070.
from datetime import datetime, timedelta, timezone
import time
from pathlib import Path
import os
import sqlite3
import traceback
import math
import duckdb
from dateutil import parser as dtparser
from flask import Flask, jsonify, request, send_from_directory
@dataclass
class Filter:
column: str
op: str
value: str | int | float | list[str] | None
@dataclass
class QueryParams:
start: str | None = None
end: str | None = None
order_by: str | None = None
order_dir: str = "ASC"
limit: int | None = None
columns: list[str] = field(default_factory=lambda: [])
filters: list[Filter] = field(default_factory=lambda: [])
derived_columns: dict[str, str] = field(default_factory=lambda: {})
graph_type: str = "samples"
group_by: list[str] = field(default_factory=lambda: [])
aggregate: str | None = None
show_hits: bool = False
x_axis: str | None = None
granularity: str = "Auto"
fill: str = "0"
table: str = "events"
def _normalize_sqlite_type(sql: str) -> str:
"""Map arbitrary SQLite column types to DuckDB-compatible types."""
t = sql.strip().upper()
if "(" in t:
t = t.split("(", 1)[0]
if "INT" in t:
# SQLite only has a single INTEGER type which is always 64-bit.
# Use DuckDB's BIGINT to avoid overflow when values exceed INT32.
return "BIGINT"
if any(key in t for key in ("CHAR", "CLOB", "TEXT")):
return "VARCHAR"
if "BLOB" in t:
return "BLOB"
if any(key in t for key in ("DOUBLE", "REAL", "FLOA", "NUMERIC", "DECIMAL"))
:
return "DOUBLE"
if "BOOL" in t:
return "BOOLEAN"
if "DATE" in t or "TIME" in t:
return "TIMESTAMP" if "TIME" in t else "DATE"
return "VARCHAR"
def _load_database(path: Path) -> duckdb.DuckDBPyConnection:
if not path.exists():
raise FileNotFoundError(path)
ext = path.suffix.lower()
if ext == ".csv":
con = duckdb.connect()
con.execute(
f"CREATE TABLE events AS SELECT * FROM read_csv_auto('{path.as_posix
()}')"
)
elif ext in {".db", ".sqlite"}:
con = duckdb.connect()
try:
con.execute("LOAD sqlite")
con.execute(f"ATTACH '{path.as_posix()}' AS db (TYPE SQLITE)")
tables = [
r[0]
for r in con.execute(
"SELECT name FROM db.sqlite_master WHERE type='table'"
).fetchall()
]
for t in tables:
con.execute(f'CREATE VIEW "{t}" AS SELECT * FROM db."{t}"')
except Exception:
sconn = sqlite3.connect(path)
tables = [
r[0]
for r in sconn.execute(
"SELECT name FROM sqlite_master WHERE type='table'"
).fetchall()
]
for t in tables:
info = sconn.execute(f'PRAGMA table_info("{t}")').fetchall()
col_defs = ", ".join(
f'"{r[1]}" {_normalize_sqlite_type(cast(str, r[2]))}' for r
in info
)
sql = f'CREATE TABLE "{t}" ({col_defs})'
try:
con.execute(sql)
except Exception:
print(f"Failed SQL: {sql}")
raise
placeholders = ",".join("?" for _ in info)
for row in sconn.execute(f'SELECT * FROM "{t}"'):
con.execute(f'INSERT INTO "{t}" VALUES ({placeholders})', ro
w)
sconn.close()
else:
con = duckdb.connect(path)
return con
_REL_RE = re.compile(
r"([+-]?\d+(?:\.\d*)?)\s*(hour|hours|day|days|week|weeks|fortnight|fortnight
s)",
re.IGNORECASE,
)
def parse_time(val: str | None) -> str | None:
"""Parse an absolute or relative time string into ``YYYY-MM-DD HH:MM:SS``.""
"
if val is None or val == "":
return None
s = val.strip()
if s.lower() == "now":
dt = datetime.now(timezone.utc)
return dt.replace(microsecond=0).strftime("%Y-%m-%d %H:%M:%S")
m = _REL_RE.fullmatch(s)
if m:
qty = float(m.group(1))
unit = m.group(2).lower()
delta: timedelta
if unit.startswith("hour"):
delta = timedelta(hours=qty)
elif unit.startswith("day"):
delta = timedelta(days=qty)
elif unit.startswith("week"):
delta = timedelta(weeks=qty)
elif unit.startswith("fortnight"):
delta = timedelta(weeks=2 * qty)
else: # pragma: no cover - defensive
raise ValueError(f"Unsupported unit: {unit}")
dt = datetime.now(timezone.utc) + delta
return dt.replace(microsecond=0).strftime("%Y-%m-%d %H:%M:%S")
dt = dtparser.parse(s)
return dt.replace(microsecond=0, tzinfo=None).strftime("%Y-%m-%d %H:%M:%S")
def _granularity_seconds(granularity: str, start: str | None, end: str | None) -
> int:
gran = granularity.lower()
mapping = {
"1 second": 1,
"5 seconds": 5,
"10 seconds": 10,
"30 seconds": 30,
"1 minute": 60,
"4 minutes": 240,
"5 minutes": 300,
"10 minutes": 600,
"15 minutes": 900,
"30 minutes": 1800,
"1 hour": 3600,
"3 hours": 10800,
"6 hours": 21600,
"1 day": 86400,
"1 week": 604800,
"30 days": 2592000,
}
if gran in mapping:
return mapping[gran]
if gran in {"auto", "fine"} and start and end:
try:
s = dtparser.parse(start)
e = dtparser.parse(end)
except Exception:
return 3600
total = max((e - s).total_seconds(), 1)
buckets = 100 if gran == "auto" else 500
return max(int(total // buckets), 1)
return 3600
def build_query(params: QueryParams, column_types: Dict[str, str] | None = None)
-> str:
select_parts: list[str] = []
group_cols = params.group_by[:]
if params.graph_type == "timeseries":
sec = _granularity_seconds(params.granularity, params.start, params.end)
x_axis = params.x_axis or "timestamp"
if params.start:
bucket_expr = (
f"TIMESTAMP '{params.start}' + INTERVAL '{sec} second' * "
f"CAST(floor((epoch({x_axis}) - epoch(TIMESTAMP '{params.start}'
))/{sec}) AS BIGINT)"
)
else:
bucket_expr = (
f"TIMESTAMP 'epoch' + INTERVAL '{sec} second' * "
f"CAST(floor(epoch({x_axis})/{sec}) AS BIGINT)"
)
select_parts.append(f"{bucket_expr} AS bucket")
group_cols = ["bucket"] + group_cols
has_agg = bool(group_cols) or params.aggregate is not None
if has_agg:
select_cols = (
group_cols[1:] if params.graph_type == "timeseries" else group_cols
)
select_parts.extend(select_cols)
agg = (params.aggregate or "avg").lower()
def agg_expr(col: str) -> str:
if agg.startswith("p"):
quant = float(agg[1:]) / 100
return f"quantile({col}, {quant})"
if agg == "count distinct":
return f"count(DISTINCT {col})"
if agg == "avg" and column_types is not None:
ctype = column_types.get(col, "").upper()
if "TIMESTAMP" in ctype or "DATE" in ctype or "TIME" in ctype:
return (
"TIMESTAMP 'epoch' + INTERVAL '1 second' * "
f"CAST(avg(epoch({col})) AS BIGINT)"
)
return f"{agg}({col})"
for col in params.columns:
if col in group_cols:
continue
select_parts.append(f"{agg_expr(col)} AS {col}")
if params.show_hits:
select_parts.insert(len(group_cols), "count(*) AS Hits")
else:
select_parts.extend(params.columns)
if has_agg and params.derived_columns:
inner_params = replace(
params,
derived_columns={},
order_by=None,
limit=None,
)
inner_sql = build_query(inner_params, column_types)
outer_select = ["t.*"] + [
f"{expr} AS {name}" for name, expr in params.derived_columns.items()
]
indented_inner = "\n".join(" " + line for line in inner_sql.splitline
s())
lines = [
f"SELECT {', '.join(outer_select)}",
"FROM (",
indented_inner,
") t",
]
if params.order_by:
lines.append(f"ORDER BY {params.order_by} {params.order_dir}")
elif params.graph_type == "timeseries":
lines.append("ORDER BY bucket")
if params.limit is not None:
lines.append(f"LIMIT {params.limit}")
return "\n".join(lines)
for name, expr in params.derived_columns.items():
select_parts.append(f"{expr} AS {name}")
select_clause = ", ".join(select_parts) if select_parts else "*"
lines = [f"SELECT {select_clause}", f'FROM "{params.table}"']
where_parts: list[str] = []
if params.start:
where_parts.append(f"timestamp >= '{params.start}'")
if params.end:
where_parts.append(f"timestamp <= '{params.end}'")
for f in params.filters:
op = f.op
if op in {"empty", "!empty"}:
val = "''"
else:
if f.value is None:
continue
if isinstance(f.value, list):
if not f.value:
continue
if op == "=":
vals = " OR ".join(f"{f.column} = '{v}'" for v in f.value)
where_parts.append(f"({vals})")
continue
val = f"'{f.value}'" if isinstance(f.value, str) else str(f.value)
if op == "contains":
where_parts.append(f"{f.column} ILIKE '%' || {val} || '%'")
elif op == "!contains":
where_parts.append(f"{f.column} NOT ILIKE '%' || {val} || '%'")
elif op == "empty":
where_parts.append(f"{f.column} = {val}")
elif op == "!empty":
where_parts.append(f"{f.column} != {val}")
else:
where_parts.append(f"{f.column} {op} {val}")
if where_parts:
lines.append("WHERE " + " AND ".join(where_parts))
if group_cols:
lines.append("GROUP BY " + ", ".join(group_cols))
if params.order_by:
lines.append(f"ORDER BY {params.order_by} {params.order_dir}")
elif params.graph_type == "timeseries":
lines.append("ORDER BY bucket")
if params.limit is not None:
lines.append(f"LIMIT {params.limit}")
return "\n".join(lines)
def create_app(db_file: str | Path | None = None) -> Flask:
app = Flask(__name__, static_folder="static")
if db_file is None:
env_db = os.environ.get("SCUBADUCK_DB")
if env_db:
db_file = env_db
db_path = Path(db_file or Path(__file__).with_name("sample.csv")).resolve()
con = _load_database(db_path)
tables = [r[0] for r in con.execute("SHOW TABLES").fetchall()]
if not tables:
raise ValueError("No tables found in database")
default_table = tables[0]
columns_cache: Dict[str, Dict[str, str]] = {}
def get_columns(table: str) -> Dict[str, str]:
if table not in columns_cache:
rows = con.execute(f'PRAGMA table_info("{table}")').fetchall()
if not rows:
raise ValueError(f"Unknown table: {table}")
columns_cache[table] = {r[1]: r[2] for r in rows}
return columns_cache[table]
sample_cache: Dict[Tuple[str, str, str], Tuple[List[str], float]] = {}
CACHE_TTL = 60.0
CACHE_LIMIT = 200
@app.route("/")
def index() -> Any: # pyright: ignore[reportUnusedFunction]
assert app.static_folder is not None
return send_from_directory(app.static_folder, "index.html")
@app.route("/js/<path:filename>")
def js(filename: str) -> Any: # pyright: ignore[reportUnusedFunction]
assert app.static_folder is not None
folder = Path(app.static_folder) / "js"
return send_from_directory(folder, filename)
@app.route("/api/tables")
def tables_endpoint() -> Any: # pyright: ignore[reportUnusedFunction]
return jsonify(tables)
@app.route("/api/columns")
def columns() -> Any: # pyright: ignore[reportUnusedFunction]
table = request.args.get("table", default_table)
rows = con.execute(f'PRAGMA table_info("{table}")').fetchall()
return jsonify([{"name": r[1], "type": r[2]} for r in rows])
def _cache_get(key: Tuple[str, str, str]) -> List[str] | None:
item = sample_cache.get(key)
if item is None:
return None
vals, ts = item
if time.time() - ts > CACHE_TTL:
del sample_cache[key]
return None
sample_cache[key] = (vals, time.time())
return vals
def _cache_set(key: Tuple[str, str, str], vals: List[str]) -> None:
sample_cache[key] = (vals, time.time())
if len(sample_cache) > CACHE_LIMIT:
oldest = min(sample_cache.items(), key=lambda kv: kv[1][1])[0]
del sample_cache[oldest]
@app.route("/api/samples")
def sample_values() -> Any: # pyright: ignore[reportUnusedFunction]
table = request.args.get("table", default_table)
column = request.args.get("column")
substr = request.args.get("q", "")
column_types = get_columns(table)
if not column or column not in column_types:
return jsonify([])
ctype = column_types[column].upper()
if "CHAR" not in ctype and "STRING" not in ctype and "VARCHAR" not in ct
ype:
return jsonify([])
key = (table, column, substr)
cached = _cache_get(key)
if cached is not None:
return jsonify(cached)
rows = con.execute(
f"SELECT DISTINCT {column} FROM \"{table}\" WHERE CAST({column} AS V
ARCHAR) ILIKE '%' || ? || '%' LIMIT 20",
[substr],
).fetchall()
values = [r[0] for r in rows]
_cache_set(key, values)
root@29cb09005028:/workspace/scubaduck# sed -n '400,800p' scubaduck/server.py
_cache_set(key, values)
return jsonify(values)
@app.route("/api/query", methods=["POST"])
def query() -> Any: # pyright: ignore[reportUnusedFunction]
payload = request.get_json(force=True)
try:
start = parse_time(payload.get("start"))
end = parse_time(payload.get("end"))
except Exception as exc:
return jsonify({"error": str(exc)}), 400
params = QueryParams(
start=start,
end=end,
order_by=payload.get("order_by"),
order_dir=payload.get("order_dir", "ASC"),
limit=payload.get("limit"),
columns=payload.get("columns", []),
derived_columns=payload.get("derived_columns", {}),
graph_type=payload.get("graph_type", "samples"),
group_by=payload.get("group_by", []),
aggregate=payload.get("aggregate"),
show_hits=payload.get("show_hits", False),
x_axis=payload.get("x_axis"),
granularity=payload.get("granularity", "Auto"),
fill=payload.get("fill", "0"),
table=payload.get("table", default_table),
)
for f in payload.get("filters", []):
params.filters.append(Filter(f["column"], f["op"], f.get("value")))
if params.table not in tables:
return jsonify({"error": "Invalid table"}), 400
column_types = get_columns(params.table)
if params.graph_type not in {"table", "timeseries"} and (
params.group_by or params.aggregate or params.show_hits
):
return (
jsonify(
{
"error": "group_by, aggregate and show_hits are only val
id for table or timeseries view"
}
),
400,
)
valid_cols = set(column_types.keys())
valid_cols.update(params.derived_columns.keys())
if params.graph_type == "timeseries":
if params.x_axis is None:
for cand in ["time", "timestamp"]:
if cand in valid_cols:
params.x_axis = cand
break
if params.x_axis is None or params.x_axis not in valid_cols:
return jsonify({"error": "Invalid x_axis"}), 400
ctype = column_types.get(params.x_axis, "").upper()
if not any(t in ctype for t in ["TIMESTAMP", "DATE", "TIME"]):
return jsonify({"error": "x_axis must be a time column"}), 400
for col in params.columns:
if col not in valid_cols:
return jsonify({"error": f"Unknown column: {col}"}), 400
for col in params.group_by:
if col not in valid_cols:
return jsonify({"error": f"Unknown column: {col}"}), 400
if params.order_by and params.order_by not in valid_cols:
return jsonify({"error": f"Unknown column: {params.order_by}"}), 400
if params.group_by or params.graph_type == "timeseries":
agg = (params.aggregate or "avg").lower()
if agg.startswith("p") or agg == "sum":
need_numeric = True
allow_time = False
elif agg == "avg" or agg in {"min", "max"}:
need_numeric = False
allow_time = True
else:
need_numeric = False
allow_time = False
if need_numeric or allow_time:
for c in params.columns:
if c in params.group_by or c == params.x_axis:
continue
if c not in column_types:
continue
ctype = column_types.get(c, "").upper()
is_numeric = any(
t in ctype
for t in [
"INT",
"DECIMAL",
"REAL",
"DOUBLE",
"FLOAT",
"NUMERIC",
"HUGEINT",
]
)
is_time = "TIMESTAMP" in ctype or "DATE" in ctype or "TIME"
in ctype
if need_numeric and not is_numeric:
return (
jsonify(
{
"error": f"Aggregate {agg} cannot be applied
to column {c}",
}
),
400,
)
if allow_time and not (is_numeric or is_time):
return (
jsonify(
{
"error": f"Aggregate {agg} cannot be applied
to column {c}",
}
),
400,
)
if params.start is None or params.end is None:
axis = params.x_axis or "timestamp"
row = cast(
tuple[datetime | None, datetime | None],
con.execute(
f'SELECT min({axis}), max({axis}) FROM "{params.table}"'
).fetchall()[0],
)
mn, mx = row
if params.start is None and mn is not None:
params.start = (
mn.strftime("%Y-%m-%d %H:%M:%S") if not isinstance(mn, str)
else mn
)
if params.end is None and mx is not None:
params.end = (
mx.strftime("%Y-%m-%d %H:%M:%S") if not isinstance(mx, str)
else mx
)
bucket_size: int | None = None
if params.graph_type == "timeseries":
bucket_size = _granularity_seconds(
params.granularity,
params.start if isinstance(params.start, str) else None,
params.end if isinstance(params.end, str) else None,
)
if (
params.limit is not None
and params.start is not None
and params.end is not None
):
try:
start_dt = dtparser.parse(params.start)
end_dt = dtparser.parse(params.end)
buckets = math.ceil(
(end_dt - start_dt).total_seconds() / bucket_size
)
if buckets > 1:
params.limit *= buckets
except Exception:
pass
sql = build_query(params, column_types)
try:
rows = con.execute(sql).fetchall()
except Exception as exc:
tb = traceback.format_exc()
print(f"Query failed:\n{sql}\n{tb}")
return (
jsonify({"sql": sql, "error": str(exc), "traceback": tb}),
400,
)
result: Dict[str, Any] = {"sql": sql, "rows": rows}
if params.start is not None:
result["start"] = str(params.start)
if params.end is not None:
result["end"] = str(params.end)
if bucket_size is not None:
result["bucket_size"] = bucket_size
return jsonify(result)
return app
app = create_app()
if __name__ == "__main__":
app.run(debug=True)
I see they're using the "timestamp" column for both start and end. Need to parameterize this. Let's check static/js for related logic.
root@29cb09005028:/workspace/scubaduck# ls scubaduck/static/js
chip_input.js timeseries_chart.js
function initChipInput(filter, fetchOptions) {
const input = filter.querySelector('.f-val');
const chipsEl = filter.querySelector('.chip-input');
const dropdown = filter.querySelector('.chip-dropdown');
const copyBtn = filter.querySelector('.chip-copy');
filter.chips = [];
filter.renderChips = renderChips;
filter.addChip = addChip;
let highlight = 0;
let dropdownLocked = false;
chipsEl.addEventListener('click', () => {
input.focus();
});
function renderChips() {
chipsEl.querySelectorAll('.chip').forEach(c => c.remove());
filter.chips.forEach((v, i) => {
const span = document.createElement('span');
span.className = 'chip';
span.textContent = v;
const x = document.createElement('span');
x.className = 'x';
x.textContent = '✖';
x.addEventListener('click', e => {
e.stopPropagation();
filter.chips.splice(i, 1);
renderChips();
input.focus();
});
span.appendChild(x);
chipsEl.insertBefore(span, input);
});
}
function hideDropdown() {
dropdown.style.display = 'none';
dropdownLocked = true;
}
function showDropdown() {
if (!dropdownLocked && document.activeElement === input) {
dropdown.style.display = 'block';
}
}
function updateHighlight() {
Array.from(dropdown.children).forEach((c, i) => {
c.classList.toggle('highlight', i === highlight);
});
}
function addChip(val) {
if (!val) return;
const i = filter.chips.indexOf(val);
if (i !== -1) {
filter.chips.splice(i, 1);
} else {
filter.chips.push(val);
}
input.value = '';
renderChips();
}
copyBtn.addEventListener('click', () => {
navigator.clipboard && navigator.clipboard.writeText(filter.chips.join(','))
;
});
input.addEventListener('paste', e => {
e.preventDefault();
const text = e.clipboardData.getData('text');
if (e.shiftKey) {
addChip(text.trim());
} else {
text.split(',').forEach(t => addChip(t.trim()));
}
hideDropdown();
});
input.addEventListener('keydown', e => {
if (e.key === 'ArrowDown') {
if (dropdown.style.display !== 'none') {
highlight = Math.min(highlight + 1, dropdown.children.length - 1);
updateHighlight();
}
e.preventDefault();
} else if (e.key === 'ArrowUp') {
if (dropdown.style.display !== 'none') {
highlight = Math.max(highlight - 1, 0);
updateHighlight();
}
e.preventDefault();
} else if (e.key === 'Backspace' && input.value === '') {
if (filter.chips.length > 0) {
filter.chips.pop();
renderChips();
}
} else if (e.key === 'Enter') {
if (dropdown.style.display !== 'none' && dropdown.children.length > 0) {
const val = dropdown.children[highlight].dataset.value;
if (val !== input.value.trim()) {
addChip(val);
} else {
addChip(input.value.trim());
}
} else {
addChip(input.value.trim());
}
hideDropdown();
e.preventDefault();
}
});
function renderDropdown(vals) {
dropdown.innerHTML = '';
const typed = input.value.trim();
if (typed) {
vals.splice(1, 0, typed);
}
vals.forEach((v, i) => {
const d = document.createElement('div');
d.textContent = v;
d.dataset.value = v;
d.addEventListener('mouseover', () => {
highlight = i;
updateHighlight();
});
d.addEventListener('mousedown', evt => {
evt.preventDefault();
addChip(v);
hideDropdown();
input.blur();
});
dropdown.appendChild(d);
});
if (vals.length) {
highlight = 0;
updateHighlight();
showDropdown();
} else {
hideDropdown();
}
}
function loadOptions() {
dropdownLocked = false;
if (!fetchOptions) {
dropdown.innerHTML = '';
return;
}
Promise.resolve(fetchOptions(input.value, filter)).then(values => {
renderDropdown(Array.isArray(values) ? values : []);
});
}
input.addEventListener('focus', loadOptions);
input.addEventListener('input', loadOptions);
document.addEventListener('click', evt => {
root@29cb09005028:/workspace/scubaduck# sed -n '160,320p' scubaduck/static/js/ch
ip_input.js
document.addEventListener('click', evt => {
if (evt.target !== input) {
hideDropdown();
}
});
}
root@29cb09005028:/workspace/scubaduck# sed -n '1,200p' scubaduck/static/js/time
series_chart.js
let resizeObserver = null;
let currentChart = null;
function showTimeSeries(data) {
function parseTs(s) {
if (s.match(/GMT/) || s.endsWith('Z') || /\+\d{2}:?\d{2}$/.test(s)) {
return new Date(s).getTime();
}
return new Date(s + 'Z').getTime();
}
const view = document.getElementById('view');
if (data.rows.length === 0) {
view.innerHTML = '<p id="empty-message">Empty data provided to table</p>';
return;
}
const height = 400;
view.innerHTML =
'<div id="ts-container"><div id="legend"></div><div id="chart-wrapper"><svg
id="chart" height="' +
height +
'"></svg></div></div>';
const svg = document.getElementById('chart');
const legend = document.getElementById('legend');
const crosshairLine = document.createElementNS('http://www.w3.org/2000/svg', '
line');
crosshairLine.id = 'crosshair_line';
crosshairLine.setAttribute('stroke', '#555');
crosshairLine.style.display = 'none';
const crosshairDots = document.createElementNS('http://www.w3.org/2000/svg', '
g');
crosshairDots.id = 'crosshair_dots';
crosshairDots.style.display = 'none';
const groups = groupBy.chips || [];
const hasHits = document.getElementById('show_hits').checked ? 1 : 0;
const fill = document.getElementById('fill').value;
const bucketMs = (data.bucket_size || 3600) * 1000;
const start = data.start ? parseTs(data.start) : null;
const end = data.end ? parseTs(data.end) : null;
const startIdx = 1 + groups.length + hasHits;
const valueCols = selectedColumns.slice(groups.length + hasHits);
const series = {};
data.rows.forEach(r => {
const ts = parseTs(r[0]);
const groupKey = groups.map((_, i) => r[1 + i]).join(':') || 'all';
valueCols.forEach((name, i) => {
const val = Number(r[startIdx + i]);
const key = groupKey === 'all' ? name : groupKey + ':' + name;
if (!series[key]) series[key] = {};
series[key][ts] = val;
});
});
const buckets = [];
let minX = start !== null ? start : Infinity;
let maxX = end !== null ? end : -Infinity;
if (start !== null && end !== null) {
for (let t = start; t <= end; t += bucketMs) {
buckets.push(t);
}
} else {
Object.keys(series).forEach(k => {
const s = series[k];
Object.keys(s).forEach(t => {
const n = Number(t);
if (n < minX) minX = n;
if (n > maxX) maxX = n;
});
});
for (let t = minX; t <= maxX; t += bucketMs) {
buckets.push(t);
}
}
let minY = Infinity,
maxY = -Infinity;
Object.keys(series).forEach(key => {
const vals = series[key];
buckets.forEach(b => {
const v = vals[b];
const val = v === undefined && fill === '0' ? 0 : v;
if (val === undefined) return;
if (val < minY) minY = val;
if (val > maxY) maxY = val;
});
});
if (fill === '0') {
if (minY > 0) minY = 0;
if (maxY < 0) maxY = 0;
}
const colors = [
'#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2'
];
currentChart = {
svg,
legend,
series,
buckets,
minX,
maxX,
minY,
maxY,
fill,
colors,
height,
crosshairLine,
crosshairDots,
seriesEls: {},
bucketPixels: [],
xScale: null,
yScale: null,
selected: null
};
const intervals = [
{unit: 'second', step: 1, ms: 1000},
{unit: 'second', step: 2, ms: 2000},
{unit: 'second', step: 5, ms: 5000},
{unit: 'second', step: 10, ms: 10000},
{unit: 'second', step: 15, ms: 15000},
{unit: 'second', step: 30, ms: 30000},
{unit: 'minute', step: 1, ms: 60000},
{unit: 'minute', step: 2, ms: 120000},
{unit: 'minute', step: 5, ms: 300000},
{unit: 'minute', step: 10, ms: 600000},
{unit: 'minute', step: 15, ms: 900000},
{unit: 'minute', step: 30, ms: 1800000},
{unit: 'hour', step: 1, ms: 3600000},
{unit: 'hour', step: 2, ms: 7200000},
{unit: 'hour', step: 3, ms: 10800000},
{unit: 'hour', step: 4, ms: 14400000},
{unit: 'hour', step: 6, ms: 21600000},
{unit: 'hour', step: 12, ms: 43200000},
{unit: 'day', step: 1, ms: 86400000},
{unit: 'day', step: 2, ms: 172800000},
{unit: 'week', step: 1, ms: 604800000},
{unit: 'week', step: 2, ms: 1209600000},
{unit: 'month', step: 1},
{unit: 'month', step: 3},
{unit: 'month', step: 6},
{unit: 'year', step: 1},
{unit: 'year', step: 2},
{unit: 'year', step: 5},
{unit: 'year', step: 10}
];
function chooseInterval(start, end) {
const span = end - start;
function approxMs(i) {
if (i.ms) return i.ms;
if (i.unit === 'month') return i.step * 2629800000;
if (i.unit === 'year') return i.step * 31557600000;
return 1000;
}
let best = intervals[0];
let bestScore = Infinity;
intervals.forEach(i => {
const count = span / approxMs(i);
const score = Math.abs(count - 15);
if (score < bestScore) {
best = i;
bestScore = score;
}
});
return best;
}
function generateTicks(start, end, intv) {
const ticks = [];
if (intv.unit === 'month' || intv.unit === 'year') {
let d = new Date(start);
d.setUTCDate(1);
if (intv.unit === 'year') d.setUTCMonth(0);
let unitVal =
intv.unit === 'month'
? d.getUTCFullYear() * 12 + d.getUTCMonth()
: d.getUTCFullYear();
unitVal = Math.ceil(unitVal / intv.step) * intv.step;
while (true) {
const year =
intv.unit === 'month' ? Math.floor(unitVal / 12) : unitVal;
const month = intv.unit === 'month' ? unitVal % 12 : 0;
const t = Date.UTC(year, month, 1);
if (t > end) break;
if (t >= start) ticks.push(t);
unitVal += intv.step;
}
} else {
const step = intv.ms * intv.step;
let t = Math.ceil(start / step) * step;
if (intv.unit === 'week') {
const d = new Date(t);
const adj = (d.getUTCDay() + 6) % 7;
t = d.getTime() - adj * 86400000;
t = Math.ceil(t / step) * step;
I’ll go ahead and open the index.html file now.
root@29cb09005028:/workspace/scubaduck# sed -n '1,200p' scubaduck/static/index.h
tml
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>ScubaDuck</title>
<style>
body { margin: 0; font-family: sans-serif; height: 100vh; display: flex; fle
x-direction: column; }
#header { padding: 10px; font-weight: bold; border-bottom: 1px solid #ccc; }
#content { flex: 1; display: flex; height: calc(100vh - 42px); overflow: hid
den; }
#sidebar { width: 450px; padding: 10px; border-right: 3px solid #ccc; overfl
ow-y: auto; display: flex; flex-direction: column; box-sizing: border-box; }
#sidebar-resizer { width: 5px; cursor: col-resize; background: #ccc; }
#view { flex: 1; padding: 10px; overflow-y: auto; overflow-x: auto; }
.field { display: flex; align-items: center; margin-bottom: 10px; }
.field label { width: 80px; text-align: right; margin-right: 5px; }
.help { margin-left: 4px; cursor: help; }
.rel-btn { margin-left: 4px; }
#tabs { display: flex; align-items: center; margin-bottom: 10px; }
#tabs .tab { margin-right: 5px; background: none; border: 1px solid #ccc; pa
dding: 4px 8px; cursor: pointer; width: 120px; text-align: center; box-sizing: b
order-box; }
#tabs .tab.active { background: #eee; font-weight: bold; }
#dive { margin-left: auto; background: green; color: white; border: none; pa
dding: 5px 10px; cursor: pointer; }
.tab-content { display: none; }
.tab-content.active { display: block; }
#filter_list { display: flex; flex-direction: column; }
#filters .filter {
border: 1px solid #ccc;
padding: 5px;
margin-bottom: 5px;
position: relative;
display: flex;
flex-direction: column;
}
#derived_columns .derived {
border: 1px solid #ccc;
padding: 5px;
margin-bottom: 5px;
display: flex;
flex-direction: column;
}
#derived_columns .derived-row {
display: flex;
margin-bottom: 5px;
}
#derived_columns .derived-row input[type="text"] {
margin-left: 5px;
flex: 1;
}
#derived_columns .derived-row button.remove {
margin-left: 5px;
width: 20px;
flex: 0 0 auto;
padding: 0;
text-align: center;
line-height: 1;
}
#derived_columns textarea {
width: 100%;
box-sizing: border-box;
}
#filters .filter-row { display: flex; margin-bottom: 5px; }
#filters .filter-row .f-col { flex: 1; }
#filters .filter-row .f-op {
margin-left: 5px;
width: fit-content;
flex: 0 0 auto;
}
.chip-input input {
border: none;
flex: 1;
min-width: 60px;
margin: 2px;
outline: none;
}
.chip-box { position: relative; }
.chip-input { display: flex; flex-wrap: wrap; border: 1px solid #ccc; paddin
g: 2px; min-height: 24px; }
.chip { background: #eee; border: 1px solid #999; padding: 2px 4px; margin:
2px; border-radius: 3px; display: flex; align-items: center; }
.chip .x { margin-left: 4px; cursor: pointer; }
.chip-copy { margin-left: 4px; cursor: pointer; background: none; border: no
ne; }
.chip-dropdown { position: absolute; left: 0; right: 0; top: 100%; backgroun
d: white; border: 1px solid #ccc; max-height: 120px; overflow-y: auto; z-index:
10; display: none; }
.chip-dropdown div { padding: 2px 4px; cursor: pointer; }
.chip-dropdown div.highlight { background: #bde4ff; }
.rel-box { position: relative; display: flex; }
.rel-dropdown { position: absolute; left: 0; right: 0; top: 100%; background
: white; border: 1px solid #ccc; z-index: 10; display: none; }
.rel-dropdown div { padding: 2px 4px; cursor: pointer; }
.rel-dropdown div:hover { background: #bde4ff; }
.dropdown { position: relative; display: inline-block; }
.dropdown-display {
border: 1px solid #ccc;
padding: 2px 18px 2px 4px;
cursor: pointer;
min-width: 80px;
position: relative;
}
.dropdown-display::after {
content: '\25BC';
position: absolute;
right: 4px;
pointer-events: none;
}
.dropdown-menu { position: absolute; left: 0; right: 0; top: 100%; backgroun
d: white; border: 1px solid #ccc; z-index: 10; max-height: 160px; overflow-y: au
to; display: none; }
.dropdown-menu input { width: 100%; box-sizing: border-box; padding: 2px 4px
; border: none; border-bottom: 1px solid #ccc; }
.dropdown-menu div { padding: 2px 4px; cursor: pointer; }
.dropdown-menu div.selected { background: #bde4ff; }
.dropdown-menu .option:hover { background: #eee; }
.dropdown-menu input::placeholder { color: #999; }
#filters .filter button.remove {
margin-left: 5px;
width: 20px;
flex: 0 0 auto;
padding: 0;
text-align: center;
line-height: 1;
}
#filters h4 { margin: 0 0 5px 0; }
table { border-collapse: collapse; min-width: 100%; }
th, td { border: 1px solid #ccc; padding: 4px; box-sizing: border-box; }
th { text-align: left; cursor: pointer; position: relative; }
th.sorted { color: blue; }
tr:nth-child(even) td { background: #f9f9f9; }
tr.selected td { background: #bde4ff !important; }
tr:hover:not(.selected) td { background: #eee; }
#column_actions {
text-align: right;
margin-bottom: 5px;
}
#column_actions a {
margin-left: 5px;
}
.col-group-header {
overflow: hidden;
}
.col-group-header .links {
float: right;
}
.col-group-header .links a {
margin-left: 5px;
}
#ts-container {
display: flex;
}
#legend {
width: 150px;
flex: 0 0 150px;
margin-right: 10px;
overflow-y: auto;
}
.legend-group {
margin-bottom: 4px;
}
.legend-header {
font-weight: normal;
}
.legend-item {
display: flex;
justify-content: space-between;
padding-left: 8px;
}
.legend-value {
margin-left: 4px;
}
#chart-wrapper {
flex: 1;
}
.legend-item.highlight {
background: #ddd;
}
#chart text.tick-label {
font-size: 10px;
user-select: none;
}
#chart text.tick-label.rotated {
text-anchor: end;
}
/* Column resizer removed */
</style>
</head>
<body>
<div id="header">sample.csv - <select id="table"></select> <select id="graph_t
ype"><option value="samples">Samples</option><option value="table">Table</option
><option value="timeseries">Time Series</option></select></div>
<div id="content">
<div id="sidebar">
<div id="tabs">
<button class="tab active" data-tab="settings">View Settings</button>
<button class="tab" data-tab="columns">Columns</button>
<button id="dive" onclick="dive()">Dive</button>
</div>
<div id="settings" class="tab-content active">
<div class="field">
<label>Start<span class="help" title="Sets the start/end of the time r
ange to query. Can be any kind of datetime string. For example: 'April 23, 2014'
or 'yesterday'.">[?]</span></label>
<div class="rel-box">
<input id="start" type="text" />
<button type="button" class="rel-btn" data-target="start-select">	
660;</button>
<div id="start-select" class="rel-dropdown" data-input="start">
<div data-value="-1 hour">-1 hour</div>
<div data-value="-3 hours">-3 hours</div>
<div data-value="-12 hours">-12 hours</div>
<div data-value="-1 day">-1 day</div>
<div data-value="-3 days">-3 days</div>
<div data-value="-1 week">-1 week</div>
<div data-value="-1 fortnight">-1 fortnight</div>
<div data-value="-30 days">-30 days</div>
<div data-value="-90 days">-90 days</div>
root@29cb09005028:/workspace/scubaduck# sed -n '200,400p' scubaduck/static/index
.html
<div data-value="-90 days">-90 days</div>
</div>
</div>
</div>
<div class="field">
<label>End<span class="help" title="Sets the start/end of the time ran
ge to query. Can be any kind of datetime string. For example: 'April 23, 2014' o
r 'yesterday'.">[?]</span></label>
<div class="rel-box">
<input id="end" type="text" />
<button type="button" class="rel-btn" data-target="end-select">φ
0;</button>
<div id="end-select" class="rel-dropdown" data-input="end">
<div data-value="now">now</div>
<div data-value="-1 hour">-1 hour</div>
<div data-value="-3 hours">-3 hours</div>
<div data-value="-12 hours">-12 hours</div>
<div data-value="-1 day">-1 day</div>
<div data-value="-3 days">-3 days</div>
<div data-value="-1 week">-1 week</div>
<div data-value="-1 fortnight">-1 fortnight</div>
<div data-value="-30 days">-30 days</div>
<div data-value="-90 days">-90 days</div>
</div>
</div>
</div>
<div class="field">
<label>Order By<span class="help" title="Choose a column to sort resul
ts by.">[?]</span></label>
<select id="order_by"></select>
<button id="order_dir" type="button">ASC \u25B2</button>
</div>
<div class="field">
<label>Limit<span class="help" title="Choose the maximum number of res
ults to show in the chart after any aggregations have been applied. For example,
a limit of 10 will show no more than 10 rows for a table, etc.">[?]</span></lab
el>
<input id="limit" type="number" value="100" />
</div>
<div id="x_axis_field" class="field" style="display:none;">
<label>X-axis</label>
<select id="x_axis"></select>
</div>
<div id="granularity_field" class="field" style="display:none;">
<label>Granularity</label>
<select id="granularity">
<option>Auto</option>
<option>Fine</option>
<option>1 second</option>
<option>5 seconds</option>
<option>10 seconds</option>
<option>30 seconds</option>
<option>1 minute</option>
<option>4 minutes</option>
<option>5 minutes</option>
<option>10 minutes</option>
<option>15 minutes</option>
<option>30 minutes</option>
<option>1 hour</option>
<option>3 hours</option>
<option>6 hours</option>
<option>1 day</option>
<option>1 week</option>
<option>30 days</option>
</select>
</div>
<div id="fill_field" class="field" style="display:none;">
<label>Fill Missing Buckets</label>
<select id="fill">
<option value="0">Fill with 0 (Per Series)</option>
<option value="connect">Connect (Per Series)</option>
<option value="blank">Leave blank</option>
</select>
</div>
<div id="group_by_field" class="field" style="display:none;">
<label>Group By</label>
<div class="chip-box">
<div class="chip-input">
<input id="group_by" class="f-val" type="text">
<button type="button" class="chip-copy">⎘</button>
</div>
<div class="chip-dropdown"></div>
</div>
</div>
<div id="aggregate_field" class="field" style="display:none;">
<label>Aggregate</label>
<select id="aggregate">
<option>Avg</option>
<option>Count</option>
<option>Sum</option>
<option>Min</option>
<option>Max</option>
<option>Count Distinct</option>
<option>p5</option>
<option>p25</option>
<option>p50</option>
<option>p70</option>
<option>p75</option>
<option>p90</option>
<option>p95</option>
<option>p99</option>
<option>p99.9</option>
<option>p99.99</option>
</select>
</div>
<div id="show_hits_field" class="field" style="display:none;">
<label>Show Hits</label>
<input id="show_hits" type="checkbox" checked>
</div>
<div id="filters">
<h4>Filters<span class="help" title="You can create as many filters as
you want. You can either write a filter using a UI or manual SQL. In the UI, fi
lter consists of a column name, a relation (e.g., =, !=, <, >) and then a text f
ield. The text field is a token input. It accepts multiple tokens for = relation
, in which case we match using an OR for all options.">[?]</span></h4>
<div id="filter_list"></div>
<button id="add_filter" type="button" onclick="addFilter()">Add Filter
</button>
</div>
<div id="query_info" style="margin-top:10px;"></div>
</div>
<div id="columns" class="tab-content">
<div id="column_actions">
<a id="columns_all" href="#">All</a>
<a id="columns_none" href="#">None</a>
</div>
<div id="column_groups"></div>
<div id="derived_columns">
<h4>Derived Columns</h4>
<div id="derived_list"></div>
<button id="add_derived" type="button" onclick="addDerived()">Add Deri
ved</button>
</div>
</div>
</div>
<div id="sidebar-resizer"></div>
<div id="view">
<table id="results"></table>
</div>
</div>
<script src="/js/chip_input.js"></script>
<script src="/js/timeseries_chart.js"></script>
<script>
const allColumns = [];
const baseColumns = [];
const columnTypes = {};
const stringColumns = [];
const baseStringColumns = [];
const integerColumns = [];
const baseIntegerColumns = [];
const timeColumns = [];
const baseTimeColumns = [];
const derivedColumns = [];
let selectedColumns = [];
let displayType = 'samples';
let groupBy = {chips: [], addChip: () => {}, renderChips: () => {}};
function initDropdown(select) {
const wrapper = document.createElement('div');
wrapper.className = 'dropdown';
if (select.classList.contains('f-col')) {
wrapper.classList.add('f-col');
}
select.parentNode.insertBefore(wrapper, select);
wrapper.appendChild(select);
select.style.display = 'none';
const disp = document.createElement('div');
disp.className = 'dropdown-display';
function updateDisplay() {
const opt = select.options[select.selectedIndex];
disp.textContent = opt ? opt.textContent : '';
}
updateDisplay();
wrapper.appendChild(disp);
const menu = document.createElement('div');
menu.className = 'dropdown-menu';
const search = document.createElement('input');
search.placeholder = 'Search';
menu.appendChild(search);
const list = document.createElement('div');
menu.appendChild(list);
wrapper.appendChild(menu);
function close() {
menu.style.display = 'none';
}
function open() {
renderOptions();
menu.style.display = 'block';
search.focus();
}
disp.addEventListener('click', () => {
if (menu.style.display === 'block') {
close();
} else {
open();
}
});
document.addEventListener('click', e => {
if (!wrapper.contains(e.target)) {
close();
}
});
function renderOptions() {
const q = search.value.toLowerCase();
list.innerHTML = '';
Array.from(select.options).forEach(o => {
if (!o.textContent.toLowerCase().includes(q)) return;
const div = document.createElement('div');
div.className = 'option';
Let me see if I can find anything related to timeColumns in the code.
root@29cb09005028:/workspace/scubaduck# grep -n "timeColumns" -n scubaduck/stati
c/index.html
337:const timeColumns = [];
510: timeColumns.length = 0;
530: timeColumns.push(c.name);
548: timeColumns.forEach(name => {
863: timeColumns.splice(0, timeColumns.length, ...baseTimeColumns);
root@29cb09005028:/workspace/scubaduck# sed -n '336,560p' scubaduck/static/index
.html
const baseIntegerColumns = [];
const timeColumns = [];
const baseTimeColumns = [];
const derivedColumns = [];
let selectedColumns = [];
let displayType = 'samples';
let groupBy = {chips: [], addChip: () => {}, renderChips: () => {}};
function initDropdown(select) {
const wrapper = document.createElement('div');
wrapper.className = 'dropdown';
if (select.classList.contains('f-col')) {
wrapper.classList.add('f-col');
}
select.parentNode.insertBefore(wrapper, select);
wrapper.appendChild(select);
select.style.display = 'none';
const disp = document.createElement('div');
disp.className = 'dropdown-display';
function updateDisplay() {
const opt = select.options[select.selectedIndex];
disp.textContent = opt ? opt.textContent : '';
}
updateDisplay();
wrapper.appendChild(disp);
const menu = document.createElement('div');
menu.className = 'dropdown-menu';
const search = document.createElement('input');
search.placeholder = 'Search';
menu.appendChild(search);
const list = document.createElement('div');
menu.appendChild(list);
wrapper.appendChild(menu);
function close() {
menu.style.display = 'none';
}
function open() {
renderOptions();
menu.style.display = 'block';
search.focus();
}
disp.addEventListener('click', () => {
if (menu.style.display === 'block') {
close();
} else {
open();
}
});
document.addEventListener('click', e => {
if (!wrapper.contains(e.target)) {
close();
}
});
function renderOptions() {
const q = search.value.toLowerCase();
list.innerHTML = '';
Array.from(select.options).forEach(o => {
if (!o.textContent.toLowerCase().includes(q)) return;
const div = document.createElement('div');
div.className = 'option';
if (q) {
const text = o.textContent;
const idx = text.toLowerCase().indexOf(q);
if (idx !== -1) {
div.innerHTML =
text.slice(0, idx) +
'<u>' +
text.slice(idx, idx + q.length) +
'</u>' +
text.slice(idx + q.length);
} else {
div.textContent = text;
}
} else {
div.textContent = o.textContent;
}
if (o.value === select.value) div.classList.add('selected');
div.addEventListener('mousedown', evt => {
evt.preventDefault();
select.value = o.value;
select.dispatchEvent(new Event('change'));
updateDisplay();
close();
});
list.appendChild(div);
});
}
search.addEventListener('input', renderOptions);
select.addEventListener('change', updateDisplay);
}
// Sidebar resizing
const sidebar = document.getElementById('sidebar');
const sidebarResizer = document.getElementById('sidebar-resizer');
let sidebarWidth = parseInt(localStorage.getItem('sidebarWidth') || 450, 10);
sidebar.style.width = sidebarWidth + 'px';
let sidebarResize = false;
function startSidebarDrag(e) {
e.preventDefault();
sidebarResize = true;
document.addEventListener('mousemove', onSidebarDrag);
document.addEventListener('mouseup', stopSidebarDrag);
}
function onSidebarDrag(e) {
if (!sidebarResize) return;
sidebarWidth = Math.max(200, e.clientX - sidebar.getBoundingClientRect().left)
;
sidebar.style.width = sidebarWidth + 'px';
}
function stopSidebarDrag() {
document.removeEventListener('mousemove', onSidebarDrag);
document.removeEventListener('mouseup', stopSidebarDrag);
sidebarResize = false;
localStorage.setItem('sidebarWidth', sidebarWidth);
}
sidebarResizer.addEventListener('mousedown', startSidebarDrag);
let orderDir = 'ASC';
const orderDirBtn = document.getElementById('order_dir');
const graphTypeSel = document.getElementById('graph_type');
function updateOrderDirButton() {
orderDirBtn.textContent = orderDir + (orderDir === 'ASC' ? ' \u25B2' : ' \u25B
C');
}
function updateDisplayTypeUI() {
const showTable = graphTypeSel.value === 'table';
const showTS = graphTypeSel.value === 'timeseries';
document.getElementById('group_by_field').style.display = showTable || showTS
? 'flex' : 'none';
document.getElementById('aggregate_field').style.display = showTable || showTS
? 'flex' : 'none';
document.getElementById('show_hits_field').style.display = showTable ? 'flex'
: 'none';
document.getElementById('x_axis_field').style.display = showTS ? 'flex' : 'non
e';
document.getElementById('granularity_field').style.display = showTS ? 'flex' :
'none';
document.getElementById('fill_field').style.display = showTS ? 'flex' : 'none'
;
document.querySelectorAll('#column_groups .col-group').forEach(g => {
if (g.querySelector('.col-group-header').textContent.startsWith('Strings'))
{
g.style.display = showTable || showTS ? 'none' : '';
}
});
if (showTS) {
const lim = document.getElementById('limit');
if (!lim.dataset.setByUser) {
lim.value = '7';
}
document.querySelectorAll('#column_groups input').forEach(cb => {
if (isTimeColumn(cb.value) || isStringColumn(cb.value)) {
cb.checked = false;
}
});
document.getElementById('order_by').value = '';
updateSelectedColumns();
}
displayType = graphTypeSel.value;
}
orderDirBtn.addEventListener('click', () => {
orderDir = orderDir === 'ASC' ? 'DESC' : 'ASC';
updateOrderDirButton();
});
updateOrderDirButton();
graphTypeSel.addEventListener('change', updateDisplayTypeUI);
function loadColumns(table) {
return fetch('/api/columns?table=' + encodeURIComponent(table)).then(r => r.js
on()).then(cols => {
const orderSelect = document.getElementById('order_by');
const xAxisSelect = document.getElementById('x_axis');
const groupsEl = document.getElementById('column_groups');
orderSelect.innerHTML = '';
xAxisSelect.innerHTML = '';
groupsEl.innerHTML = '';
allColumns.length = 0;
stringColumns.length = 0;
integerColumns.length = 0;
timeColumns.length = 0;
baseColumns.length = 0;
baseStringColumns.length = 0;
baseIntegerColumns.length = 0;
baseTimeColumns.length = 0;
for (const k in columnTypes) delete columnTypes[k];
const groups = {
time: {name: 'Time', cols: [], ul: null},
integer: {name: 'Integers', cols: [], ul: null},
string: {name: 'Strings', cols: [], ul: null},
};
cols.forEach(c => {
const t = c.type.toUpperCase();
columnTypes[c.name] = c.type;
allColumns.push(c.name);
baseColumns.push(c.name);
let g = 'string';
if (t.includes('INT')) g = 'integer';
if (t.includes('TIMESTAMP')) {
g = 'time';
timeColumns.push(c.name);
baseTimeColumns.push(c.name);
}
if (g === 'string') {
stringColumns.push(c.name);
baseStringColumns.push(c.name);
} else {
integerColumns.push(c.name);
baseIntegerColumns.push(c.name);
}
groups[g].cols.push(c.name);
if (g !== 'string') {
const o = document.createElement('option');
o.value = c.name;
o.textContent = c.name;
orderSelect.appendChild(o);
}
});
timeColumns.forEach(name => {
const o = document.createElement('option');
o.value = name;
o.textContent = name;
xAxisSelect.appendChild(o);
});
Object.keys(groups).forEach(key => {
const g = groups[key];
const div = document.createElement('div');
div.className = 'col-group';
const header = document.createElement('div');
header.className = 'col-group-header';
header.appendChild(document.createTextNode(g.name + ': '));
.html
header.appendChild(document.createTextNode(g.name + ': '));
const links = document.createElement('span');
links.className = 'links';
const allBtn = document.createElement('a');
allBtn.href = '#';
allBtn.textContent = 'All';
const noneBtn = document.createElement('a');
noneBtn.href = '#';
noneBtn.textContent = 'None';
links.appendChild(allBtn);
links.appendChild(noneBtn);
header.appendChild(links);
div.appendChild(header);
const ul = document.createElement('ul');
g.ul = ul;
g.cols.forEach(name => {
const li = document.createElement('li');
const label = document.createElement('label');
const cb = document.createElement('input');
cb.type = 'checkbox';
cb.value = name;
cb.checked = true;
cb.addEventListener('change', updateSelectedColumns);
label.appendChild(cb);
label.appendChild(document.createTextNode(' ' + name));
li.appendChild(label);
ul.appendChild(li);
});
allBtn.addEventListener('click', e => {
e.preventDefault();
ul.querySelectorAll('input').forEach(cb => (cb.checked = true));
updateSelectedColumns();
});
noneBtn.addEventListener('click', e => {
e.preventDefault();
ul.querySelectorAll('input').forEach(cb => (cb.checked = false));
updateSelectedColumns();
});
div.appendChild(ul);
groupsEl.appendChild(div);
});
document.getElementById('columns_all').addEventListener('click', e => {
e.preventDefault();
groupsEl.querySelectorAll('input').forEach(cb => (cb.checked = true));
updateSelectedColumns();
});
document.getElementById('columns_none').addEventListener('click', e => {
e.preventDefault();
groupsEl.querySelectorAll('input').forEach(cb => (cb.checked = false));
updateSelectedColumns();
});
updateSelectedColumns();
groupBy = document.getElementById('group_by').closest('.field');
initChipInput(groupBy, typed =>
allColumns.filter(c => c.toLowerCase().includes(typed.toLowerCase()))
);
initDropdown(orderSelect);
initDropdown(document.getElementById('aggregate'));
});
}
let columnsInitialized = false;
fetch('/api/tables').then(r => r.json()).then(tables => {
tables.forEach(t => {
const o = document.createElement('option');
o.value = t;
o.textContent = t;
document.getElementById('table').appendChild(o);
});
const table = parseSearch().table || tables[0];
document.getElementById('table').value = table;
loadColumns(table).then(() => {
updateDisplayTypeUI();
addFilter();
initFromUrl();
columnsInitialized = true;
});
document.getElementById('table').addEventListener('change', () => {
loadColumns(document.getElementById('table').value).then(() => {
if (columnsInitialized) {
applyParams(parseSearch());
}
});
});
});
document.querySelectorAll('#tabs .tab').forEach(btn => {
btn.addEventListener('click', () => {
document.querySelectorAll('#tabs .tab').forEach(t => t.classList.remove('act
ive'));
document.querySelectorAll('.tab-content').forEach(c => c.classList.remove('a
ctive'));
btn.classList.add('active');
document.getElementById(btn.dataset.tab).classList.add('active');
});
});
document.querySelectorAll('.rel-btn').forEach(btn => {
btn.addEventListener('click', () => {
const dd = document.getElementById(btn.dataset.target);
const show = dd.style.display === 'none' || dd.style.display === '';
document.querySelectorAll('.rel-dropdown').forEach(d => (d.style.display = '
none'));
dd.style.display = show ? 'block' : 'none';
});
});
document.querySelectorAll('.rel-dropdown div').forEach(opt => {
opt.addEventListener('click', () => {
const box = opt.closest('.rel-box');
const input = box.querySelector('input');
input.value = opt.dataset.value || opt.textContent;
opt.parentElement.style.display = 'none';
});
});
document.addEventListener('click', e => {
document.querySelectorAll('.rel-dropdown').forEach(dd => {
if (!dd.parentElement.contains(e.target)) dd.style.display = 'none';
});
});
function updateSelectedColumns() {
const base = allColumns.filter(name => {
const cb = document.querySelector(`#column_groups input[value="${name}"]`);
if (!cb || !cb.checked) return false;
if (graphTypeSel.value === 'table' && isStringColumn(name)) return false;
return true;
});
if (graphTypeSel.value === 'table' || graphTypeSel.value === 'timeseries') {
selectedColumns = groupBy.chips.slice();
if (document.getElementById('show_hits').checked) selectedColumns.push('Hits
');
base.forEach(c => {
if (!selectedColumns.includes(c)) selectedColumns.push(c);
});
derivedColumns.forEach(dc => {
if (dc.include && !selectedColumns.includes(dc.name)) selectedColumns.push
(dc.name);
});
} else {
selectedColumns = base.slice();
derivedColumns.forEach(dc => {
if (dc.include) selectedColumns.push(dc.name);
});
}
}
function isStringColumn(name) {
const t = (columnTypes[name] || '').toUpperCase();
return t.includes('CHAR') || t.includes('STRING') || t.includes('VARCHAR');
}
function isIntegerColumn(name) {
const t = (columnTypes[name] || '').toUpperCase();
return t.includes('INT');
}
function isTimeColumn(name) {
const t = (columnTypes[name] || '').toUpperCase();
return t.includes('TIMESTAMP');
}
function formatNumber(val) {
if (typeof val !== 'number') val = Number(val);
if (Number.isNaN(val)) return '';
if (val === 0) return '0';
const abs = Math.abs(val);
if (abs > 999.999) {
const units = [
{n: 1e12, s: 'T'},
{n: 1e9, s: 'B'},
{n: 1e6, s: 'M'},
{n: 1e3, s: 'K'},
];
for (const u of units) {
if (abs >= u.n) {
return (val / u.n).toFixed(2) + ' ' + u.s;
}
}
}
if (abs < 0.0005) return '0.000';
return val.toFixed(3);
}
function addFilter() {
const container = document.createElement('div');
container.className = 'filter';
container.innerHTML = `
<div class="filter-row">
<select class="f-col"></select>
<select class="f-op"></select>
<button type="button" class="remove" onclick="this.closest('.filter').remo
ve()">✖</button>
</div>
<div class="chip-box">
<div class="chip-input">
<input class="f-val" type="text">
<button type="button" class="chip-copy">⎘</button>
</div>
<div class="chip-dropdown"></div>
</div>
`;
const colSel = container.querySelector('.f-col');
colSel.innerHTML = allColumns.map(c => `<option value="${c}">${c}</option>`).j
oin('');
initDropdown(colSel);
function populateOps() {
const opSel = container.querySelector('.f-op');
const col = colSel.value;
const ops = isStringColumn(col)
? [
['=', '='],
['!=', '!='],
['~', 'matches regex'],
['!~', 'not matches regex'],
['contains', 'contains'],
['!contains', 'not contains'],
['empty', 'empty'],
['!empty', 'not empty'],
['LIKE', 'like'],
]
: [
['=', '='],
['!=', '!='],
['<', '<'],
['>', '>'],
];
root@29cb09005028:/workspace/scubaduck# sed -n '780,1020p' scubaduck/static/inde
x.html
];
opSel.innerHTML = ops.map(o => `<option value="${o[0]}">${o[1]}</option>`).j
oin('');
updateInputVis();
}
function updateInputVis() {
const op = container.querySelector('.f-op').value;
const box = container.querySelector('.chip-box');
box.style.display = op === 'empty' || op === '!empty' ? 'none' : 'block';
}
colSel.addEventListener('change', populateOps);
container.querySelector('.f-op').addEventListener('change', updateInputVis);
populateOps();
document.getElementById('filter_list').appendChild(container);
initChipInput(container, (typed, el) => {
const colEl = el.querySelector('.f-col select') || el.querySelector('.f-col'
);
if (!colEl) return [];
const col = colEl.value;
if (!isStringColumn(col)) return [];
return fetch(`/api/samples?column=${encodeURIComponent(col)}&q=${encodeURICo
mponent(typed)}`)
.then(r => r.json());
});
}
function nextDerivedName() {
let n = 1;
while (true) {
const name = `derived_${n}`;
if (!derivedColumns.some(d => d.name === name) && !allColumns.includes(name)
) return name;
n++;
}
}
function addDerived(data = {}) {
const container = document.createElement('div');
container.className = 'derived';
container.innerHTML = `
<div class="derived-row">
<select class="d-type">
<option value="aggregated">Aggregated</option>
<option value="string">String</option>
<option value="numeric">Numeric</option>
</select>
<input class="d-name" type="text">
<button type="button" class="remove" onclick="removeDerived(this)">✖</butt
on>
</div>
<label><input type="checkbox" class="d-use" checked> Include in Query</label
>
<textarea class="d-expr" rows="2"></textarea>
`;
document.getElementById('derived_list').appendChild(container);
const obj = {
type: data.type || 'string',
name: data.name || nextDerivedName(),
expr: data.expr || '',
include: data.include !== undefined ? data.include : true,
el: container
};
container.querySelector('.d-type').value = obj.type;
container.querySelector('.d-name').value = obj.name;
container.querySelector('.d-expr').value = obj.expr;
container.querySelector('.d-use').checked = obj.include;
['change','input'].forEach(evt => {
container.addEventListener(evt, refreshDerivedColumns);
});
derivedColumns.push(obj);
refreshDerivedColumns();
}
function removeDerived(btn) {
const el = btn.closest('.derived');
const idx = derivedColumns.findIndex(d => d.el === el);
if (idx !== -1) {
derivedColumns.splice(idx, 1);
}
el.remove();
refreshDerivedColumns();
}
function refreshDerivedColumns() {
allColumns.splice(0, allColumns.length, ...baseColumns);
stringColumns.splice(0, stringColumns.length, ...baseStringColumns);
integerColumns.splice(0, integerColumns.length, ...baseIntegerColumns);
timeColumns.splice(0, timeColumns.length, ...baseTimeColumns);
derivedColumns.forEach(d => {
d.type = d.el.querySelector('.d-type').value;
d.name = d.el.querySelector('.d-name').value;
d.expr = d.el.querySelector('.d-expr').value;
d.include = d.el.querySelector('.d-use').checked;
allColumns.push(d.name);
columnTypes[d.name] = d.type;
if (d.type === 'string') {
stringColumns.push(d.name);
} else {
integerColumns.push(d.name);
}
});
updateSelectedColumns();
}
let lastQueryTime = 0;
let queryStart = 0;
function dive(push=true) {
const params = collectParams();
if (push) {
history.pushState(params, '', paramsToSearch(params));
}
const payload = Object.assign({}, params);
const dcMap = {};
(params.derived_columns || []).forEach(d => {
if (d.include) dcMap[d.name] = d.expr;
});
payload.derived_columns = dcMap;
const view = document.getElementById('view');
view.innerHTML = '<p>Loading...</p>';
...
.catch(err => {
showError(err);
});
}
function collectParams() {
updateSelectedColumns();
const payload = {
table: document.getElementById('table').value,
start: document.getElementById('start').value,
end: document.getElementById('end').value,
order_by: document.getElementById('order_by').value,
order_dir: orderDir,
limit: parseInt(document.getElementById('limit').value, 10),
columns: selectedColumns.filter(c =>
c !== 'Hits' && !derivedColumns.some(dc => dc.name === c)
),
graph_type: graphTypeSel.value,
filters: Array.from(document.querySelectorAll('#filters .filter')).map(f =>
{
const chips = f.chips || [];
const op = f.querySelector('.f-op').value;
let value = null;
if (op !== 'empty' && op !== '!empty') {
value = chips.length === 0 ? null : (chips.length === 1 ? chips[0] : chi
ps);
}
const colSel = f.querySelector('.f-col select') || f.querySelector('.f-col
');
return {column: colSel.value, op, value};
}),
derived_columns: Array.from(document.querySelectorAll('#derived_list .derive
d')).map(d => ({
type: d.querySelector('.d-type').value,
name: d.querySelector('.d-name').value,
expr: d.querySelector('.d-expr').value,
include: d.querySelector('.d-use').checked,
}))
};
if (graphTypeSel.value === 'table' || graphTypeSel.value === 'timeseries') {
payload.group_by = groupBy.chips || [];
payload.aggregate = document.getElementById('aggregate').value;
payload.show_hits = document.getElementById('show_hits').checked;
}
if (graphTypeSel.value === 'timeseries') {
payload.x_axis = document.getElementById('x_axis').value;
payload.granularity = document.getElementById('granularity').value;
payload.fill = document.getElementById('fill').value;
}
return payload;
}
function paramsToSearch(params) {
const sp = new URLSearchParams();
if (params.table) sp.set('table', params.table);
if (params.start) sp.set('start', params.start);
if (params.end) sp.set('end', params.end);
if (params.order_by) sp.set('order_by', params.order_by);
if (params.order_dir) sp.set('order_dir', params.order_dir);
if (params.limit !== null && params.limit !== undefined) sp.set('limit', param
s.limit);
if (params.columns && params.columns.length) sp.set('columns', params.columns.
join(','));
if (params.filters && params.filters.length) sp.set('filters', JSON.stringify(
params.filters));
if (params.derived_columns && params.derived_columns.length) sp.set('derived_c
olumns', JSON.stringify(params.derived_columns));
if (params.graph_type) sp.set('graph_type', params.graph_type);
if (params.graph_type === 'table' || params.graph_type === 'timeseries') {
if (params.group_by && params.group_by.length) sp.set('group_by', params.gro
up_by.join(','));
if (params.aggregate) sp.set('aggregate', params.aggregate);
if (params.show_hits) sp.set('show_hits', '1');
}
if (params.graph_type === 'timeseries') {
if (params.x_axis) sp.set('x_axis', params.x_axis);
if (params.granularity) sp.set('granularity', params.granularity);
if (params.fill) sp.set('fill', params.fill);
}
const qs = sp.toString();
return qs ? '?' + qs : '';
}
function applyParams(params) {
if (params.table) document.getElementById('table').value = params.table;
document.getElementById('start').value = params.start || '';
document.getElementById('end').value = params.end || '';
if (params.order_by) {
document.getElementById('order_by').value = params.order_by;
}
orderDir = params.order_dir || 'ASC';
updateOrderDirButton();
if (params.limit !== undefined && params.limit !== null) {
document.getElementById('limit').value = params.limit;
}
graphTypeSel.value = params.graph_type || 'samples';
updateDisplayTypeUI();
if (params.x_axis) document.getElementById('x_axis').value = params.x_axis;
if (params.granularity) document.getElementById('granularity').value = params.
granularity;
if (params.fill) document.getElementById('fill').value = params.fill;
if (params.group_by) {
groupBy.chips.splice(0, groupBy.chips.length, ...params.group_by);
groupBy.renderChips();
}
if (params.aggregate) document.getElementById('aggregate').value = params.aggr
egate;
document.getElementById('show_hits').checked = params.show_hits ?? true;
document.querySelectorAll('#column_groups input').forEach(cb => {
cb.checked = !params.columns || params.columns.includes(cb.value);
});
updateSelectedColumns();
const dlist = document.getElementById('derived_list');
dlist.innerHTML = '';
derivedColumns.splice(0, derivedColumns.length);
if (params.derived_columns && params.derived_columns.length) {
params.derived_columns.forEach(dc => addDerived(dc));
}
refreshDerivedColumns();
const list = document.getElementById('filter_list');
list.innerHTML = '';
if (params.filters && params.filters.length) {
params.filters.forEach(f => {
addFilter();
ex.html
addFilter();
const el = list.lastElementChild;
const colSel = el.querySelector('.f-col select') || el.querySelector('.f-c
ol');
colSel.value = f.column;
colSel.dispatchEvent(new Event('change'));
el.querySelector('.f-op').value = f.op;
el.querySelector('.f-op').dispatchEvent(new Event('change'));
if (f.value !== null && f.op !== 'empty' && f.op !== '!empty') {
const values = Array.isArray(f.value) ? f.value : [f.value];
values.forEach(v => el.addChip(v));
el.renderChips();
}
});
} else {
addFilter();
}
}
function parseSearch() {
const sp = new URLSearchParams(window.location.search);
const params = {};
if (sp.has('table')) params.table = sp.get('table');
if (sp.has('start')) params.start = sp.get('start');
if (sp.has('end')) params.end = sp.get('end');
if (sp.has('order_by')) params.order_by = sp.get('order_by');
if (sp.has('order_dir')) params.order_dir = sp.get('order_dir');
if (sp.has('limit')) params.limit = parseInt(sp.get('limit'), 10);
if (sp.has('columns')) params.columns = sp.get('columns').split(',').filter(c
=> c);
if (sp.has('filters')) {
try { params.filters = JSON.parse(sp.get('filters')); } catch(e) { params.fi
lters = []; }
}
if (sp.has('graph_type')) params.graph_type = sp.get('graph_type');
if (sp.has('group_by')) params.group_by = sp.get('group_by').split(',').filter
(c => c);
if (sp.has('aggregate')) params.aggregate = sp.get('aggregate');
if (sp.has('show_hits')) params.show_hits = sp.get('show_hits') === '1';
if (sp.has('x_axis')) params.x_axis = sp.get('x_axis');
if (sp.has('granularity')) params.granularity = sp.get('granularity');
if (sp.has('fill')) params.fill = sp.get('fill');
if (sp.has('derived_columns')) {
try { params.derived_columns = JSON.parse(sp.get('derived_columns')); } catc
h(e) { params.derived_columns = []; }
}
return params;
}
function initFromUrl() {
const params = parseSearch();
history.replaceState(params, '', paramsToSearch(params));
applyParams(params);
dive(false);
}
window.addEventListener('popstate', e => {
const params = e.state || parseSearch();
applyParams(params);
dive(false);
});
let originalRows = [];
let sortState = {index: null, dir: null};
function renderTable(rows) {
const table = document.getElementById('results');
table.innerHTML = '';
if (rows.length === 0) return;
let hitsIndex = selectedColumns.indexOf('Hits');
let totalHits = 0;
if (hitsIndex !== -1) {
totalHits = rows.reduce((s, r) => s + Number(r[hitsIndex]), 0);
}
const header = document.createElement('tr');
selectedColumns.forEach((col, i) => {
const th = document.createElement('th');
let label = col;
if (
displayType === 'table' &&
col !== 'Hits' &&
!(groupBy.chips || []).includes(col)
) {
const agg = document.getElementById('aggregate').value.toLowerCase();
label += ` (${agg})`;
}
th.textContent = label;
th.dataset.index = i;
th.addEventListener('click', handleSort);
if (sortState.index === i) {
th.classList.add('sorted');
th.textContent =
label + (sortState.dir === 'desc' ? ' \u25BC' : ' \u25B2');
}
if (!isStringColumn(col)) th.style.textAlign = 'right';
header.appendChild(th);
});
table.appendChild(header);
rows.forEach(row => {
const tr = document.createElement('tr');
tr.addEventListener('click', () => {
const wasSelected = tr.classList.contains('selected');
document
.querySelectorAll('#results tr.selected')
.forEach(el => el.classList.remove('selected'));
if (!wasSelected) {
tr.classList.add('selected');
}
});
row.forEach((v, i) => {
const col = selectedColumns[i];
const td = document.createElement('td');
if (isTimeColumn(col)) {
const d = new Date(v);
td.textContent = d.toLocaleString('en-US', {
weekday: 'short',
month: 'short',
day: 'numeric',
year: 'numeric',
hour: 'numeric',
minute: 'numeric',
second: 'numeric',
hour12: true,
timeZoneName: 'short'
});
} else {
if (col === 'Hits') {
const pct = totalHits ? ((v / totalHits) * 100).toFixed(1) : '0';
td.textContent = `${formatNumber(v)} (${pct}%)`;
} else {
td.textContent = isStringColumn(col) ? v : formatNumber(v);
}
}
td.style.textAlign = isStringColumn(col) ? 'left' : 'right';
tr.appendChild(td);
});
table.appendChild(tr);
});
// ensure table does not overflow unless necessary
const view = document.getElementById('view');
if (table.scrollWidth <= view.clientWidth) {
table.style.width = '100%';
}
}
function handleSort(e) {
const idx = parseInt(e.target.dataset.index, 10);
if (sortState.index !== idx) {
sortState.index = idx;
sortState.dir = 'asc';
} else if (sortState.dir === 'asc') {
sortState.dir = 'desc';
} else if (sortState.dir === 'desc') {
sortState.index = null;
sortState.dir = null;
} else {
sortState.dir = 'asc';
}
let rows = originalRows.slice();
if (sortState.index !== null) {
rows.sort((a, b) => {
const va = a[sortState.index];
const vb = b[sortState.index];
if (va === vb) return 0;
if (sortState.dir === 'desc') return va < vb ? 1 : -1;
return va > vb ? 1 : -1;
});
}
renderTable(rows);
}
function showResults(data) {
window.lastResults = data;
const view = document.getElementById('view');
if (graphTypeSel.value === 'timeseries') {
showTimeSeries(data);
} else {
if (data.rows.length === 0) {
view.innerHTML =
'<p id="empty-message">Empty data provided to table</p><table id="result
s"></table>';
} else {
view.innerHTML = '<table id="results"></table>';
}
originalRows = data.rows.slice();
sortState = {index: null, dir: null};
renderTable(originalRows);
}
const sqlEl = document.createElement('pre');
sqlEl.id = 'sql_query';
sqlEl.style.whiteSpace = 'pre-wrap';
sqlEl.style.marginTop = '10px';
sqlEl.textContent = data.sql;
view.appendChild(sqlEl);
document.getElementById('query_info').textContent =
`Your query took about ${lastQueryTime} ms`;
}
function showError(err) {
window.lastResults = err;
const view = document.getElementById('view');
let msg = '';
if (typeof err === 'string') {
msg = err;
} else if (err) {
msg = err.error || 'Error';
if (err.sql) {
msg += '\nSQL: ' + err.sql;
}
if (err.traceback) {
msg += '\n' + err.traceback;
}
}
view.innerHTML = `<pre id="error-message">${msg}</pre>`;
document.getElementById('query_info').textContent = '';
}
function setSelectValue(selector, value) {
const el = typeof selector === 'string' ? document.querySelector(selector) : s
elector;
if (el) {
const select = el.tagName === 'SELECT' ? el : el.querySelector('select');
if (select) {
select.value = value;
select.dispatchEvent(new Event('change'));
}
}
}
</script>
</body>
</html>
I'm adding a new dropdown above Start/End for selecting the time column based on heuristics.
root@29cb09005028:/workspace/scubaduck# sed -n '1,200p' README.md
I really like Scuba (Meta's internal real-time database system). The distributed
, real-time database part of Scuba is quite difficult (and expensive) to replica
te, but I also really like Scuba's UI for doing queries, and I have found myself
wishing that I have access to it even for "small" databases, e.g., I have a sql
ite dataset I want to explore.
Pivotal ideas:
* Time series by default. In the dedicated "time series" view, there are many fe
atures specifically oriented towards working towards tables that represent event
s that occurred over time: the start, end, compare, aggregate and granularity fi
elds all specially privilege the timestamp field. In fact, you can't log events
to Scuba's backing data store without a timestamp, they always come with one. (S
cuba also supports other views that don't presuppose a time series, but the time
series is the most beloved and well used view.) This is in contrast to typical
software which tries to generalize to arbitrary data first, with time series bei
ng added on later.
* It's all about exploration. Scuba is predicated on the idea that you don't kno
w what you're looking for, that you are going to spend time tweaking queries and
changing filters/grouping as part of an investigation to figure out why a syste
m behaves the way it is. So the filters/comparisons/groupings you want to edit a
re always visible on the left sidebar, with the expectation that you're going to
tweak the query to look at something else. Similarly, all the parameters of you
r query get saved into your URL, so your browser history can double up as a quer
y history / you can easily share a query with someone else. This is contrast to
typical software which is often oriented to making pretty dashboards and reports
. (This function is important too, but it's not what I want in exploration mode!
)
* You can fix data problems in the query editor. It's pretty common to have mess
ed up and ended up with a database that doesn't have exactly the columns you nee
d, or some columns that are corrupted in some way. Scuba has pretty robust suppo
rt for defining custom columns with arbitrary SQL functions, grouping over them
as if they were native functions, and doing so with minimal runtime cost (Scuba
aims to turn around your query in milliseconds!) Having to go and run a huge dat
a pipeline to fix your data is a big impediment to exploration; quick and easy c
ustom columns means you can patch over problems when you're investigating and fi
x them for real later.
We're going to build a exploratory data analysis tool like Scuba for time series
database (i.e., a database with a mandatory timestamp representing the time an
event occurred). We'll use DuckDB as the underlying SQL engine served from a Py
thon server, and render the GUI/results as a webpage with vanilla HTML and JS. W
e'll use choices.js to support token inputs. We define a token input to mean a
text input element where as you type a dropdown displays with valid values, and
if you select one or press enter, the selection turns into a token/chip that can
only be deleted as one unit.
To start, we are going to support one views: samples. The samples view only all
ows you to view individual samples from the database, subject to a filter. Our m
ain UI concept is that there is a left sidebar that is the query editor, and the
right side that shows the view. The sidebar is always visible and defaults to
the query parameters of the current view. After you make changes to the query,
clicking the "Dive" button updates the view. The URL of the page encodes all of
the values of the query (and gets updated when you Dive), so the browser's back
button lets you view previous queries.
The query editor's job is to generate a SQL query, which then is applied on the
database, and then the result visualized according to the view.
Here are the settings you can apply to the query. The help text should show up w
hen you mouse over the field name:
* Start/End - Help text: "Sets the start/end of the time range to query. Can be
any kind of datetime string. For example: 'April 23, 2014' or 'yesterday'." The
UI for this selector supports both relative selections (now, -1 hour, -3 hours,
-12 hours, -1 day, -3 days, -1 week, -1 fortnight, -30 days, -90 days) as well a
s specifying an absolute date. The way this field is rendered is there is a fre
e form text box, a drop down arrow (for the relative selectors), and then a cale
ndar button (for date selection).
* Order By - Help text: "Choose a column to sort results by." There is an ASC/D
ESC toggle next to it.
* Limit - Help text: "Choose the maximum number of results to show in the chart
after any aggregations have been applied. For example, a limit of 10 will show
no more than 10 rows for a table, etc."
* Filters - You can create as many filters as you want. You can either write a f
ilter using a UI or manual SQL. In the UI, filter consists of a column name, a r
elation (e.g., =, !=, <, >) and then a text field. The text field is a token inp
ut. It accepts multiple tokens for = relation, in which case we match using an O
R for all options.
There is also a "Columns" tab which lets you view all fields in the table, organ
ized by their type. You can also define derived columns, by specifying a column
name and SQL expression. Derived columns can be used for all parts of the UI, in
cluding filters/group by/etc. Columns have checkboxes indicating if we should SE
LECT them or not. Each selected column shows up in the graph. There is an All/N
one link which can be used to select/deselect all checkboxes.
The query UI constructs a SQL query that intuitively has this form:
```
SELECT column, column, ...,
FROM table
WHERE time >= min-timestamp
AND time <= max-timestamp
[AND condition ...]
ORDER BY aggregate(column)
LIMIT number
```
You should write tests for the server backend, demonstrating that at specific qu
ery values we get back the correct rows of data.
## Running the server
Activate the virtual environment and run the Flask development server:
```bash
flask --app scubaduck.server run --debug
```
By default the server loads `sample.csv`. Set the `SCUBADUCK_DB` environment
variable to point at a different database file (CSV, SQLite or DuckDB) if you
want to use another dataset. If the file does not exist, the server will raise
a `FileNotFoundError` during startup.
I should review codex_setup.sh for ruff and pyright commands and setup details.
root@29cb09005028:/workspace/scubaduck# cat codex_setup.sh
uv sync --frozen
source .venv/bin/activate
playwright install chromium
echo "source .venv/bin/activate" >> ~/.bashrc
I'll add heuristics to detect time columns by matching keywords and types. I'll modify JS and server logic accordingly.
root@29cb09005028:/workspace/scubaduck# grep -n "timestamp" -R
.venv/lib/python3.12/site-packages/werkzeug/middleware/shared_data.py:153:
datetime.fromtimestamp(os.path.getmtime(filename), tz=timezone.utc),
.venv/lib/python3.12/site-packages/werkzeug/middleware/shared_data.py:193:
datetime.fromtimestamp(
.venv/lib/python3.12/site-packages/werkzeug/middleware/shared_data.py:223:
timestamp = mtime.timestamp()
.venv/lib/python3.12/site-packages/werkzeug/middleware/shared_data.py:225:
return f"wzsdm-{timestamp}-{file_size}-{checksum}"
.venv/lib/python3.12/site-packages/werkzeug/http.py:1014: timestamp: datetime
| date | int | float | struct_time | None = None,
.venv/lib/python3.12/site-packages/werkzeug/http.py:1016: """Format a datetim
e object or timestamp into an :rfc:`2822` date
.venv/lib/python3.12/site-packages/werkzeug/http.py:1023: :param timestamp: T
he datetime or timestamp to format. Defaults to
.venv/lib/python3.12/site-packages/werkzeug/http.py:1029: if isinstance(times
tamp, date):
.venv/lib/python3.12/site-packages/werkzeug/http.py:1030: if not isinstan
ce(timestamp, datetime):
.venv/lib/python3.12/site-packages/werkzeug/http.py:1032: timestamp =
datetime.combine(timestamp, time(), tzinfo=timezone.utc)
.venv/lib/python3.12/site-packages/werkzeug/http.py:1035: timestamp =
_dt_as_utc(timestamp)
.venv/lib/python3.12/site-packages/werkzeug/http.py:1037: return email.ut
ils.format_datetime(timestamp, usegmt=True)
.venv/lib/python3.12/site-packages/werkzeug/http.py:1039: if isinstance(times
tamp, struct_time):
.venv/lib/python3.12/site-packages/werkzeug/http.py:1040: timestamp = mkt
ime(timestamp)
.venv/lib/python3.12/site-packages/werkzeug/http.py:1042: return email.utils.
formatdate(timestamp, usegmt=True)
.venv/lib/python3.12/site-packages/werkzeug/http.py:1259: :param expires: sho
uld be a `datetime` object or unix timestamp.
.venv/lib/python3.12/site-packages/werkzeug/http.py:1322: expires = http_
date(datetime.now(tz=timezone.utc).timestamp() + max_age)
.venv/lib/python3.12/site-packages/werkzeug/test.py:1463: self.expire
s is not None and self.expires.timestamp() == 0
.venv/lib/python3.12/site-packages/werkzeug/sansio/response.py:212: :para
m expires: should be a `datetime` object or UNIX timestamp.
.venv/lib/python3.12/site-packages/pyright-1.1.400.dist-info/RECORD:3296:pyright
/dist/dist/typeshed-fallback/stubs/protobuf/google/protobuf/timestamp_pb2.pyi,sh
a256=vq7LUuvzvMwMV4MChcWMwPhOM926OyyV9wwqcUX4uFo,6834
.venv/lib/python3.12/site-packages/duckdb-stubs/__init__.pyi:336: def read_js
on(self, path_or_buffer: Union[str, StringIO, TextIOBase], *, columns: Optional[
Dict[str,str]] = None, sample_size: Optional[int] = None, maximum_depth: Optiona
l[int] = None, records: Optional[str] = None, format: Optional[str] = None, date
_format: Optional[str] = None, timestamp_format: Optional[str] = None, compressi
on: Optional[str] = None, maximum_object_size: Optional[int] = None, ignore_erro
rs: Optional[bool] = None, convert_strings_to_integers: Optional[bool] = None, f
ield_appearance_threshold: Optional[float] = None, map_inference_threshold: Opti
onal[int] = None, maximum_sample_files: Optional[int] = None, filename: Optional
[bool | str] = None, hive_partitioning: Optional[bool] = None, union_by_name: Op
tional[bool] = None, hive_types: Optional[Dict[str, str]] = None, hive_types_aut
ocast: Optional[bool] = None) -> DuckDBPyRelation: ...
I’ll focus the search within the repository, ignoring the venv folder.
bash: .venv/bin/activate: No such file or directory
root@29cb09005028:/workspace# grep -n "timestamp" -R --exclude-dir=.venv | head
scubaduck/tests/test_server.py:19: "order_by": "timestamp",
scubaduck/tests/test_server.py:22: "columns": ["timestamp", "event", "val
ue", "user"],
scubaduck/tests/test_server.py:52: "order_by": "timestamp",
scubaduck/tests/test_server.py:54: "columns": ["timestamp", "event", "val
ue", "user"],
scubaduck/tests/test_server.py:77: "columns": ["timestamp", "event", "val
ue", "user"],
scubaduck/tests/test_server.py:103: "order_by": "timestamp",
scubaduck/tests/test_server.py:105: "columns": ["timestamp", "user"],
scubaduck/tests/test_server.py:124: "order_by": "timestamp",
scubaduck/tests/test_server.py:126: "columns": ["timestamp", "event", "va
lue", "user"],
scubaduck/tests/test_server.py:162: "order_by": "timestamp",
I need to redo the cd
since the root path wasn't recognized properly.
root@29cb09005028:/workspace# cd scubaduck
root@29cb09005028:/workspace/scubaduck# ls
AGENTS.md codex_setup.sh scubaduck tests
README.md pyproject.toml stubs uv.lock
root@29cb09005028:/workspace/scubaduck# grep -n "timestamp" -R . | head
./.venv/lib/python3.12/site-packages/werkzeug/middleware/shared_data.py:153:
datetime.fromtimestamp(os.path.getmtime(filename), tz=timezone.utc),
./.venv/lib/python3.12/site-packages/werkzeug/middleware/shared_data.py:193:
datetime.fromtimestamp(
./.venv/lib/python3.12/site-packages/werkzeug/middleware/shared_data.py:223:
timestamp = mtime.timestamp()
./.venv/lib/python3.12/site-packages/werkzeug/middleware/shared_data.py:225:
return f"wzsdm-{timestamp}-{file_size}-{checksum}"
./.venv/lib/python3.12/site-packages/werkzeug/http.py:1014: timestamp: dateti
me | date | int | float | struct_time | None = None,
./.venv/lib/python3.12/site-packages/werkzeug/http.py:1016: """Format a datet
ime object or timestamp into an :rfc:`2822` date
./.venv/lib/python3.12/site-packages/werkzeug/http.py:1023: :param timestamp:
The datetime or timestamp to format. Defaults to
./.venv/lib/python3.12/site-packages/werkzeug/http.py:1029: if isinstance(tim
estamp, date):
./.venv/lib/python3.12/site-packages/werkzeug/http.py:1030: if not isinst
ance(timestamp, datetime):
./.venv/lib/python3.12/site-packages/werkzeug/http.py:1032: timestamp
= datetime.combine(timestamp, time(), tzinfo=timezone.utc)
I missed excluding venv. I'll search again and make sure to exclude it.
root@29cb09005028:/workspace/scubaduck# grep -n "timestamp" -R scubaduck tests |
head
scubaduck/server.py:197: x_axis = params.x_axis or "timestamp"
scubaduck/server.py:274: where_parts.append(f"timestamp >= '{params.start
}'")
scubaduck/server.py:276: where_parts.append(f"timestamp <= '{params.end}'
")
scubaduck/server.py:453: for cand in ["time", "timestamp"]:
scubaduck/server.py:521: axis = params.x_axis or "timestamp"
scubaduck/sample.csv:1:timestamp,event,value,user
tests/test_server.py:19: "order_by": "timestamp",
tests/test_server.py:22: "columns": ["timestamp", "event", "value", "user
"],
tests/test_server.py:52: "order_by": "timestamp",
tests/test_server.py:54: "columns": ["timestamp", "event", "value", "user
"],
I'm updating the server query to handle "time_column" and adding new UI for it.
root@29cb09005028:/workspace/scubaduck# grep -n "x_axis" -n tests/test_web.py
194: assert page.is_visible("#x_axis_field")
I'll look around lines 192-194 for the relevant changes.
root@29cb09005028:/workspace/scubaduck# sed -n '185,210p' tests/test_web.py
assert not page.is_visible("text=Strings:")
def test_graph_type_timeseries_fields(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
assert page.is_visible("#group_by_field")
assert page.is_visible("#aggregate_field")
assert page.is_visible("#x_axis_field")
assert page.is_visible("#granularity_field")
assert page.is_visible("#fill_field")
def test_timeseries_default_query(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#graph_type", state="attached")
select_value(page, "#graph_type", "timeseries")
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
data = page.evaluate("window.lastResults")
assert "error" not in data
assert page.is_visible("#chart")
page.click("text=Columns")
assert not page.is_checked("#column_groups input[value='timestamp']")
root@29cb09005028:/workspace/scubaduck# grep -n "timeColumns" -n scubaduck/stati
c/index.html
337:const timeColumns = [];
510: timeColumns.length = 0;
530: timeColumns.push(c.name);
548: timeColumns.forEach(name => {
863: timeColumns.splice(0, timeColumns.length, ...baseTimeColumns);
root@29cb09005028:/workspace/scubaduck# sed -n '492,560p' scubaduck/static/index
.html
orderDirBtn.addEventListener('click', () => {
orderDir = orderDir === 'ASC' ? 'DESC' : 'ASC';
updateOrderDirButton();
});
updateOrderDirButton();
graphTypeSel.addEventListener('change', updateDisplayTypeUI);
function loadColumns(table) {
return fetch('/api/columns?table=' + encodeURIComponent(table)).then(r => r.js
on()).then(cols => {
const orderSelect = document.getElementById('order_by');
const xAxisSelect = document.getElementById('x_axis');
const groupsEl = document.getElementById('column_groups');
orderSelect.innerHTML = '';
xAxisSelect.innerHTML = '';
groupsEl.innerHTML = '';
allColumns.length = 0;
stringColumns.length = 0;
integerColumns.length = 0;
timeColumns.length = 0;
baseColumns.length = 0;
baseStringColumns.length = 0;
baseIntegerColumns.length = 0;
baseTimeColumns.length = 0;
for (const k in columnTypes) delete columnTypes[k];
const groups = {
time: {name: 'Time', cols: [], ul: null},
integer: {name: 'Integers', cols: [], ul: null},
string: {name: 'Strings', cols: [], ul: null},
};
cols.forEach(c => {
const t = c.type.toUpperCase();
columnTypes[c.name] = c.type;
allColumns.push(c.name);
baseColumns.push(c.name);
let g = 'string';
if (t.includes('INT')) g = 'integer';
if (t.includes('TIMESTAMP')) {
g = 'time';
timeColumns.push(c.name);
baseTimeColumns.push(c.name);
}
if (g === 'string') {
stringColumns.push(c.name);
baseStringColumns.push(c.name);
} else {
integerColumns.push(c.name);
baseIntegerColumns.push(c.name);
}
groups[g].cols.push(c.name);
if (g !== 'string') {
const o = document.createElement('option');
o.value = c.name;
o.textContent = c.name;
orderSelect.appendChild(o);
}
});
timeColumns.forEach(name => {
const o = document.createElement('option');
o.value = name;
o.textContent = name;
xAxisSelect.appendChild(o);
});
Object.keys(groups).forEach(key => {
const g = groups[key];
const div = document.createElement('div');
div.className = 'col-group';
const header = document.createElement('div');
header.className = 'col-group-header';
header.appendChild(document.createTextNode(g.name + ': '));
I'll look at the code around line 860 since it calls after refreshDerivedColumns.
root@29cb09005028:/workspace/scubaduck# sed -n '860,870p' scubaduck/static/index
.html
allColumns.splice(0, allColumns.length, ...baseColumns);
stringColumns.splice(0, stringColumns.length, ...baseStringColumns);
integerColumns.splice(0, integerColumns.length, ...baseIntegerColumns);
timeColumns.splice(0, timeColumns.length, ...baseTimeColumns);
derivedColumns.forEach(d => {
d.type = d.el.querySelector('.d-type').value;
d.name = d.el.querySelector('.d-name').value;
d.expr = d.el.querySelector('.d-expr').value;
d.include = d.el.querySelector('.d-use').checked;
allColumns.push(d.name);
columnTypes[d.name] = d.type;
I’ll create timeSelectOptions
to store candidate columns for the time column dropdown. timeColumns
should stay focused on grouping, not identifying candidates. I'll populate these options based on type and store heuristics for default choice. Afterward, I'll update the time column select UI when loading columns and apply parameters appropriately.
I’ll set defaultTimeColumn
based on the guessed or first option, and handle applyParams accordingly. Server will always allow the user to pick but should still guess when needed.
I’ll update tests to include time_column
, adjusting for integer time columns and dataset changes.
root@29cb09005028:/workspace/scubaduck# head -n 5 scubaduck/sample.csv
timestamp,event,value,user
2024-01-01 00:00:00,login,10,alice
2024-01-01 01:00:00,logout,20,bob
2024-01-02 00:00:00,login,30,alice
2024-01-02 03:00:00,login,40,charlie
I'll test using an integer timestamp column and modify the query to handle it accordingly.
I'll convert integer timestamps after getting min/max values instead of using strftime
.
Start and end must be strings—converting to 'YYYY-MM-DD HH:MM:SS'. I'll verify bucket expression.
root@29cb09005028:/workspace/scubaduck# sed -n '182,217p' scubaduck/server.py
s = dtparser.parse(start)
e = dtparser.parse(end)
except Exception:
return 3600
total = max((e - s).total_seconds(), 1)
buckets = 100 if gran == "auto" else 500
return max(int(total // buckets), 1)
return 3600
def build_query(params: QueryParams, column_types: Dict[str, str] | None = None)
-> str:
select_parts: list[str] = []
group_cols = params.group_by[:]
if params.graph_type == "timeseries":
sec = _granularity_seconds(params.granularity, params.start, params.end)
x_axis = params.x_axis or "timestamp"
if params.start:
bucket_expr = (
f"TIMESTAMP '{params.start}' + INTERVAL '{sec} second' * "
f"CAST(floor((epoch({x_axis}) - epoch(TIMESTAMP '{params.start}'
))/{sec}) AS BIGINT)"
)
else:
bucket_expr = (
f"TIMESTAMP 'epoch' + INTERVAL '{sec} second' * "
f"CAST(floor(epoch({x_axis})/{sec}) AS BIGINT)"
)
select_parts.append(f"{bucket_expr} AS bucket")
group_cols = ["bucket"] + group_cols
has_agg = bool(group_cols) or params.aggregate is not None
if has_agg:
select_cols = (
group_cols[1:] if params.graph_type == "timeseries" else group_cols
)
select_parts.extend(select_cols)
agg = (params.aggregate or "avg").lower()
I'm realizing x_axis could be either a time-like column or a numeric type. I'll modify the build_query
function to convert x_axis to a timestamp if it's numeric.
root@29cb09005028:/workspace/scubaduck# sed -n '270,320p' scubaduck/server.py
select_clause = ", ".join(select_parts) if select_parts else "*"
lines = [f"SELECT {select_clause}", f'FROM "{params.table}"']
where_parts: list[str] = []
if params.start:
where_parts.append(f"timestamp >= '{params.start}'")
if params.end:
where_parts.append(f"timestamp <= '{params.end}'")
for f in params.filters:
op = f.op
if op in {"empty", "!empty"}:
val = "''"
else:
if f.value is None:
continue
if isinstance(f.value, list):
if not f.value:
continue
if op == "=":
vals = " OR ".join(f"{f.column} = '{v}'" for v in f.value)
where_parts.append(f"({vals})")
continue
val = f"'{f.value}'" if isinstance(f.value, str) else str(f.value)
if op == "contains":
where_parts.append(f"{f.column} ILIKE '%' || {val} || '%'")
elif op == "!contains":
where_parts.append(f"{f.column} NOT ILIKE '%' || {val} || '%'")
elif op == "empty":
where_parts.append(f"{f.column} = {val}")
elif op == "!empty":
where_parts.append(f"{f.column} != {val}")
else:
where_parts.append(f"{f.column} {op} {val}")
if where_parts:
lines.append("WHERE " + " AND ".join(where_parts))
if group_cols:
lines.append("GROUP BY " + ", ".join(group_cols))
if params.order_by:
lines.append(f"ORDER BY {params.order_by} {params.order_dir}")
elif params.graph_type == "timeseries":
lines.append("ORDER BY bucket")
if params.limit is not None:
lines.append(f"LIMIT {params.limit}")
return "\n".join(lines)
def create_app(db_file: str | Path | None = None) -> Flask:
app = Flask(__name__, static_folder="static")
if db_file is None:
env_db = os.environ.get("SCUBADUCK_DB")
if env_db:
I'm making changes to reference params.time_column
for querying start and end conditions.
I'll adjust the logic to include time_column
, handling it in both queries and the client side.
I'll associate time-based columns with a selection and ensure numeric or timestamp columns are recognized.
I’ll adjust date handling to correctly interpret numeric values and timestamp types when displayed.
I'll adjust timestamp handling on both client-side and server-side without modifying SELECT.
root@29cb09005028:/workspace/scubaduck# grep -n _make_payload -n tests/test_serv
er.py
157:def _make_payload() -> dict[str, object]:
201: payload = _make_payload()
325: payload = _make_payload()
root@29cb09005028:/workspace/scubaduck# sed -n '150,210p' tests/test_server.py
not_empty = {**base, "filters": [{"column": "user", "op": "!empty"}]}
rv = client.post(
"/api/query", data=json.dumps(not_empty), content_type="application/json
"
)
assert len(rv.get_json()["rows"]) == 4
def _make_payload() -> dict[str, object]:
return {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-02 00:00:00",
"order_by": "timestamp",
"order_dir": "ASC",
"limit": 10,
"columns": ["timestamp", "event", "value", "user"],
"filters": [],
}
def test_database_types(tmp_path: Path) -> None:
csv_file = tmp_path / "events.csv"
csv_file.write_text(Path("scubaduck/sample.csv").read_text())
sqlite_file = tmp_path / "events.sqlite"
import sqlite3
conn = sqlite3.connect(sqlite_file)
conn.execute(
"CREATE TABLE events (timestamp TEXT, event TEXT, value INTEGER, user TE
XT)"
)
with open(csv_file) as f:
next(f)
for line in f:
ts, ev, val, user = line.strip().split(",")
conn.execute(
"INSERT INTO events VALUES (?, ?, ?, ?)", (ts, ev, int(val), use
r)
)
conn.commit()
conn.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcce
ssIssue]
duckdb_file = tmp_path / "events.duckdb"
con = duckdb.connect(duckdb_file)
con.execute(
f"CREATE TABLE events AS SELECT * FROM read_csv_auto('{csv_file.as_posix
()}')"
)
con.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcces
sIssue]
for db in (csv_file, sqlite_file, duckdb_file):
app = server.create_app(db)
client = app.test_client()
payload = _make_payload()
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/js
on"
)
rows = rv.get_json()["rows"]
assert len(rows) == 3
def test_sqlite_longvarchar(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) ->
None:
sqlite_file = tmp_path / "events.sqlite"
I'll modify _make_payload to include "time_column": "timestamp" and adjust manual tests.
root@29cb09005028:/workspace/scubaduck# grep -n "time_column" -n tests/test_serv
er.py
I'll update the payloads and search for dictionary definitions, avoiding _make_payload
.
root@29cb09005028:/workspace/scubaduck# grep -n "payload = {" -n tests/test_serv
er.py | head
15: payload = {
48: payload = {
72: base_payload = {
99: payload = {
250: payload = {
306: payload = {
343: payload = {
379: payload = {
400: payload = {
420: payload = {
I'll open the top 120 lines to add the time_column to each section.
root@29cb09005028:/workspace/scubaduck# sed -n '1,120p' tests/test_server.py
from __future__ import annotations
import json
from pathlib import Path
from typing import cast
import duckdb
from scubaduck import server
import pytest
def test_basic_query() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-02 00:00:00",
"order_by": "timestamp",
"order_dir": "ASC",
"limit": 10,
"columns": ["timestamp", "event", "value", "user"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert data
rows = data["rows"]
# We expect first three rows (until 2024-01-02 00:00:00)
assert len(rows) == 3
assert rows[0][1] == "login"
assert rows[1][1] == "logout"
def test_js_served() -> None:
app = server.app
client = app.test_client()
rv = client.get("/js/chip_input.js")
assert rv.status_code == 200
assert b"initChipInput" in rv.data
def test_filter_multi_token() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-02 03:00:00",
"order_by": "timestamp",
"limit": 10,
"columns": ["timestamp", "event", "value", "user"],
"filters": [{"column": "user", "op": "=", "value": ["alice", "charlie"]}
],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert data
rows = data["rows"]
# Should only return rows for alice and charlie
assert len(rows) == 3
assert rows[0][3] == "alice"
assert rows[-1][3] == "charlie"
def test_empty_filter_is_noop() -> None:
app = server.app
client = app.test_client()
base_payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"limit": 100,
"columns": ["timestamp", "event", "value", "user"],
}
no_filter = {**base_payload, "filters": []}
empty_filter = {
**base_payload,
"filters": [{"column": "user", "op": "=", "value": None}],
}
rv1 = client.post(
"/api/query", data=json.dumps(no_filter), content_type="application/json
"
)
rv2 = client.post(
"/api/query", data=json.dumps(empty_filter), content_type="application/j
son"
)
rows1 = rv1.get_json()["rows"]
rows2 = rv2.get_json()["rows"]
assert rows1 == rows2
def test_select_columns() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"order_by": "timestamp",
"limit": 10,
"columns": ["timestamp", "user"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert data
rows = data["rows"]
assert len(rows[0]) == 2
assert rows[0][1] == "alice"
def test_string_filter_ops() -> None:
app = server.app
client = app.test_client()
root@29cb09005028:/workspace/scubaduck# sed -n '120,250p' tests/test_server.py
client = app.test_client()
base = {
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"order_by": "timestamp",
"limit": 100,
"columns": ["timestamp", "event", "value", "user"],
}
contains = {
**base,
"filters": [{"column": "user", "op": "contains", "value": "ali"}],
}
rv = client.post(
"/api/query", data=json.dumps(contains), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert all("ali" in r[3] for r in rows)
regex = {
**base,
"filters": [{"column": "user", "op": "~", "value": "^a.*"}],
}
rv = client.post(
"/api/query", data=json.dumps(regex), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert all(r[3].startswith("a") for r in rows)
assert len(rows) == 2
not_empty = {**base, "filters": [{"column": "user", "op": "!empty"}]}
rv = client.post(
"/api/query", data=json.dumps(not_empty), content_type="application/json
"
)
assert len(rv.get_json()["rows"]) == 4
def _make_payload() -> dict[str, object]:
return {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-02 00:00:00",
"order_by": "timestamp",
"order_dir": "ASC",
"limit": 10,
"columns": ["timestamp", "event", "value", "user"],
"filters": [],
}
def test_database_types(tmp_path: Path) -> None:
csv_file = tmp_path / "events.csv"
csv_file.write_text(Path("scubaduck/sample.csv").read_text())
sqlite_file = tmp_path / "events.sqlite"
import sqlite3
conn = sqlite3.connect(sqlite_file)
conn.execute(
"CREATE TABLE events (timestamp TEXT, event TEXT, value INTEGER, user TE
XT)"
)
with open(csv_file) as f:
next(f)
for line in f:
ts, ev, val, user = line.strip().split(",")
conn.execute(
"INSERT INTO events VALUES (?, ?, ?, ?)", (ts, ev, int(val), use
r)
)
conn.commit()
conn.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcce
ssIssue]
duckdb_file = tmp_path / "events.duckdb"
con = duckdb.connect(duckdb_file)
con.execute(
f"CREATE TABLE events AS SELECT * FROM read_csv_auto('{csv_file.as_posix
()}')"
)
con.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcces
sIssue]
for db in (csv_file, sqlite_file, duckdb_file):
app = server.create_app(db)
client = app.test_client()
payload = _make_payload()
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/js
on"
)
rows = rv.get_json()["rows"]
assert len(rows) == 3
def test_sqlite_longvarchar(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) ->
None:
sqlite_file = tmp_path / "events.sqlite"
import sqlite3
conn = sqlite3.connect(sqlite_file)
conn.execute(
"CREATE TABLE events (timestamp TEXT, url LONGVARCHAR, title VARCHAR(10)
)"
)
conn.execute(
"INSERT INTO events VALUES ('2024-01-01 00:00:00', 'https://a.com', 'Hom
e')"
)
conn.commit()
conn.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcce
ssIssue]
from typing import Any
real_connect = duckdb.connect
def failing_connect(*args: Any, **kwargs: Any) -> Any:
real = real_connect(*args, **kwargs)
class Wrapper:
def __init__(self, con: duckdb.DuckDBPyConnection) -> None:
self.con = con
self._failed = False
def execute(self, sql: str, *a: Any, **kw: Any):
if not self._failed and sql == "LOAD sqlite":
self._failed = True
raise RuntimeError("fail")
return self.con.execute(sql, *a, **kw)
def __getattr__(self, name: str) -> object:
return getattr(self.con, name)
return Wrapper(real)
monkeypatch.setattr(server.duckdb, "connect", failing_connect)
app = server.create_app(sqlite_file)
client = app.test_client()
payload = {
root@29cb09005028:/workspace/scubaduck# sed -n '250,420p' tests/test_server.py
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-01 01:00:00",
"order_by": "timestamp",
"columns": ["timestamp", "url", "title"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == "https://a.com"
def test_sqlite_bigint(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
sqlite_file = tmp_path / "big.sqlite"
import sqlite3
conn = sqlite3.connect(sqlite_file)
conn.execute("CREATE TABLE events (timestamp TEXT, value INTEGER)")
big_value = 13385262862605259
conn.execute(
"INSERT INTO events VALUES ('2024-01-01 00:00:00', ?)",
(big_value,),
)
conn.commit()
conn.close() # pyright: ignore[reportUnknownMemberType, reportAttributeAcce
ssIssue]
from typing import Any
real_connect = duckdb.connect
def failing_connect(*args: Any, **kwargs: Any) -> Any:
real = real_connect(*args, **kwargs)
class Wrapper:
def __init__(self, con: duckdb.DuckDBPyConnection) -> None:
self.con = con
self._failed = False
def execute(self, sql: str, *a: Any, **kw: Any):
if not self._failed and sql == "LOAD sqlite":
self._failed = True
raise RuntimeError("fail")
return self.con.execute(sql, *a, **kw)
def __getattr__(self, name: str) -> object:
return getattr(self.con, name)
return Wrapper(real)
monkeypatch.setattr(server.duckdb, "connect", failing_connect)
app = server.create_app(sqlite_file)
client = app.test_client()
payload = {
"table": "events",
"order_by": "timestamp",
"columns": ["timestamp", "value"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == big_value
def test_envvar_db(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
csv_file = tmp_path / "custom.csv"
csv_file.write_text("timestamp,event,value,user\n2024-01-01 00:00:00,login,5
,bob\n")
monkeypatch.setenv("SCUBADUCK_DB", str(csv_file))
app = server.create_app()
client = app.test_client()
payload = _make_payload()
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert len(rows) == 1
def test_envvar_db_missing(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> N
one:
missing = tmp_path / "missing.sqlite"
monkeypatch.setenv("SCUBADUCK_DB", str(missing))
with pytest.raises(FileNotFoundError):
server.create_app()
def test_group_by_table() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "table",
"order_by": "user",
"limit": 10,
"columns": ["value"],
"group_by": ["user"],
"aggregate": "Sum",
"show_hits": True,
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert rows[0][0] == "alice"
assert rows[0][1] == 2
assert rows[0][2] == 40
def test_relative_time_query(monkeypatch: pytest.MonkeyPatch) -> None:
app = server.app
client = app.test_client()
from datetime import datetime
fixed_now = datetime(2024, 1, 2, 4, 0, 0)
class FixedDateTime(datetime):
@classmethod
def now(cls, tz=None): # type: ignore[override]
return fixed_now if tz is None else fixed_now.astimezone(tz)
monkeypatch.setattr(server, "datetime", FixedDateTime)
payload = {
"table": "events",
"start": "-1 hour",
"end": "now",
"order_by": "timestamp",
"limit": 100,
"columns": ["timestamp", "event", "value", "user"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert len(data["rows"]) == 1
assert data["rows"][0][3] == "charlie"
def test_invalid_time_error() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "nonsense",
"end": "now",
"order_by": "timestamp",
"limit": 10,
"columns": ["timestamp"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "error" in data
def test_query_error_returns_sql_and_traceback() -> None:
app = server.app
client = app.test_client()
payload = {
root@29cb09005028:/workspace/scubaduck# sed -n '420,640p' tests/test_server.py
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"columns": ["event"],
"group_by": ["user"],
"aggregate": "avg",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "error" in data
def test_table_unknown_column_error() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "table",
"order_by": "timestamp",
"limit": 100,
"columns": ["user", "Hits", "value"],
"group_by": ["user"],
"aggregate": "Count",
"show_hits": True,
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "Unknown column" in data["error"]
def test_samples_view_rejects_group_by() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"graph_type": "samples",
"group_by": ["user"],
"columns": ["timestamp"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "only valid" in data["error"]
def test_table_avg_with_timestamp() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "table",
"order_by": "user",
"limit": 100,
"columns": ["user", "timestamp", "value"],
"group_by": ["user"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert "error" not in data
rows = data["rows"]
assert rows[0][0] == "alice"
from dateutil import parser
ts = parser.parse(rows[0][1]).replace(tzinfo=None)
assert ts == parser.parse("2024-01-01 12:00:00")
def test_timeseries_basic() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"limit": 100,
"group_by": ["user"],
"aggregate": "Count",
"columns": ["value"],
"x_axis": "timestamp",
"granularity": "1 day",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert len(data["rows"]) == 4
def test_timeseries_orders_by_xaxis() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"limit": 100,
"columns": ["value"],
"x_axis": "timestamp",
"granularity": "1 day",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
rows = data["rows"]
from dateutil import parser
timestamps = [parser.parse(r[0]).replace(tzinfo=None) for r in rows]
assert timestamps == sorted(timestamps)
def test_timeseries_limit_applies_to_series() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"limit": 1,
"order_by": "user",
"group_by": ["user"],
"aggregate": "Count",
"columns": ["value"],
"x_axis": "timestamp",
"granularity": "1 day",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert len(data["rows"]) == 2
assert all(r[1] == "alice" for r in data["rows"])
def test_timeseries_auto_and_fine_buckets() -> None:
app = server.app
client = app.test_client()
def run(gran: str) -> None:
payload = {
"start": "2024-01-01 00:00:00",
"end": "2024-01-02 03:00:00",
"graph_type": "timeseries",
"columns": ["value"],
"x_axis": "timestamp",
"granularity": gran,
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/js
on"
)
data = rv.get_json()
assert rv.status_code == 200
from dateutil import parser
start = parser.parse(cast(str, payload["start"])).replace(tzinfo=None)
buckets = [
parser.parse(cast(str, r[0])).replace(tzinfo=None) for r in data["ro
ws"]
]
assert buckets[0] == start
if len(buckets) > 1:
step = (buckets[1] - buckets[0]).total_seconds()
assert step % data["bucket_size"] == 0
assert any(r[1] != 0 for r in data["rows"])
run("Auto")
run("Fine")
def test_timeseries_string_column_error() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"limit": 7,
"columns": ["timestamp", "event", "value", "user"],
"x_axis": "timestamp",
"granularity": "1 hour",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "Aggregate" in data["error"]
def test_derived_column_basic() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"limit": 10,
"columns": ["timestamp"],
"derived_columns": {"val2": "value * 2"},
I need to page down a bit more to capture everything.
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == 20
def test_timeseries_derived_column() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"granularity": "1 hour",
"limit": 7,
"columns": ["value"],
"derived_columns": {"derived_1": "value * 2"},
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
rows = data["rows"]
assert all(r[2] == r[1] * 2 for r in rows)
def test_default_start_end_returned() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"order_by": "timestamp",
"limit": 5,
"columns": ["timestamp"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["start"] == "2024-01-01 00:00:00"
assert data["end"] == "2024-01-02 03:00:00"
I'll add "time_column": "timestamp"
to each payload, starting with _make_payload.
er.py
15: payload = {
48: payload = {
72: base_payload = {
99: payload = {
250: payload = {
306: payload = {
343: payload = {
379: payload = {
400: payload = {
420: payload = {
439: payload = {
462: payload = {
479: payload = {
506: payload = {
529: payload = {
554: payload = {
581: payload = {
613: payload = {
634: payload = {
654: payload = {
676: payload = {
I'll begin reviewing where updates are needed for the current changes.
root@29cb09005028:/workspace/scubaduck# sed -n '240,270p' tests/test_server.py
def __getattr__(self, name: str) -> object:
return getattr(self.con, name)
return Wrapper(real)
monkeypatch.setattr(server.duckdb, "connect", failing_connect)
app = server.create_app(sqlite_file)
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-01 01:00:00",
"order_by": "timestamp",
"columns": ["timestamp", "url", "title"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == "https://a.com"
def test_sqlite_bigint(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
sqlite_file = tmp_path / "big.sqlite"
import sqlite3
conn = sqlite3.connect(sqlite_file)
conn.execute("CREATE TABLE events (timestamp TEXT, value INTEGER)")
monkeypatch.setattr(server.duckdb, "connect", failing_connect)
app = server.create_app(sqlite_file)
client = app.test_client()
payload = {
"table": "events",
"order_by": "timestamp",
"columns": ["timestamp", "value"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == big_value
def test_envvar_db(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
csv_file = tmp_path / "custom.csv"
csv_file.write_text("timestamp,event,value,user\n2024-01-01 00:00:00,login,5
,bob\n")
monkeypatch.setenv("SCUBADUCK_DB", str(csv_file))
app = server.create_app()
client = app.test_client()
payload = _make_payload()
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert len(rows) == 1
def test_envvar_db_missing(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> N
one:
missing = tmp_path / "missing.sqlite"
monkeypatch.setenv("SCUBADUCK_DB", str(missing))
with pytest.raises(FileNotFoundError):
server.create_app()
def test_group_by_table() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "table",
"order_by": "user",
"limit": 10,
"columns": ["value"],
"group_by": ["user"],
"aggregate": "Sum",
"show_hits": True,
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert rows[0][0] == "alice"
assert rows[0][1] == 2
I need to look further to capture everything I need.
@classmethod
def now(cls, tz=None): # type: ignore[override]
return fixed_now if tz is None else fixed_now.astimezone(tz)
monkeypatch.setattr(server, "datetime", FixedDateTime)
payload = {
"table": "events",
"start": "-1 hour",
"end": "now",
"order_by": "timestamp",
"limit": 100,
"columns": ["timestamp", "event", "value", "user"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert len(data["rows"]) == 1
assert data["rows"][0][3] == "charlie"
def test_invalid_time_error() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "nonsense",
"end": "now",
"order_by": "timestamp",
"limit": 10,
"columns": ["timestamp"],
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "error" in data
def test_query_error_returns_sql_and_traceback() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"columns": ["event"],
"group_by": ["user"],
"aggregate": "avg",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "error" in data
def test_table_unknown_column_error() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "table",
"order_by": "timestamp",
"limit": 100,
"columns": ["user", "Hits", "value"],
"group_by": ["user"],
"aggregate": "Count",
"show_hits": True,
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "Unknown column" in data["error"]
def test_samples_view_rejects_group_by() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"graph_type": "samples",
"group_by": ["user"],
"columns": ["timestamp"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "only valid" in data["error"]
def test_table_avg_with_timestamp() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "table",
"order_by": "user",
"limit": 100,
"columns": ["user", "timestamp", "value"],
"group_by": ["user"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert "error" not in data
rows = data["rows"]
assert rows[0][0] == "alice"
...
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert len(data["rows"]) == 2
assert all(r[1] == "alice" for r in data["rows"])
def test_timeseries_auto_and_fine_buckets() -> None:
app = server.app
client = app.test_client()
def run(gran: str) -> None:
payload = {
"start": "2024-01-01 00:00:00",
"end": "2024-01-02 03:00:00",
"graph_type": "timeseries",
"columns": ["value"],
"x_axis": "timestamp",
"granularity": gran,
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/js
on"
)
data = rv.get_json()
assert rv.status_code == 200
from dateutil import parser
start = parser.parse(cast(str, payload["start"])).replace(tzinfo=None)
buckets = [
parser.parse(cast(str, r[0])).replace(tzinfo=None) for r in data["ro
ws"]
]
assert buckets[0] == start
if len(buckets) > 1:
step = (buckets[1] - buckets[0]).total_seconds()
assert step % data["bucket_size"] == 0
assert any(r[1] != 0 for r in data["rows"])
run("Auto")
run("Fine")
def test_timeseries_string_column_error() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"limit": 7,
"columns": ["timestamp", "event", "value", "user"],
"x_axis": "timestamp",
"granularity": "1 hour",
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 400
assert "Aggregate" in data["error"]
def test_derived_column_basic() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"limit": 10,
"columns": ["timestamp"],
"derived_columns": {"val2": "value * 2"},
"filters": [],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == 20
def test_timeseries_derived_column() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"granularity": "1 hour",
"limit": 7,
"columns": ["value"],
"derived_columns": {"derived_1": "value * 2"},
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
rows = data["rows"]
assert all(r[2] == r[1] * 2 for r in rows)
def test_default_start_end_returned() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"order_by": "timestamp",
"limit": 5,
"columns": ["timestamp"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["start"] == "2024-01-01 00:00:00"
assert data["end"] == "2024-01-02 03:00:00"
The file is long. I'll scroll on to the end to check for more tests.
def test_timeseries_derived_column() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"start": "2024-01-01 00:00:00",
"end": "2024-01-03 00:00:00",
"graph_type": "timeseries",
"granularity": "1 hour",
"limit": 7,
"columns": ["value"],
"derived_columns": {"derived_1": "value * 2"},
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
rows = data["rows"]
assert all(r[2] == r[1] * 2 for r in rows)
def test_default_start_end_returned() -> None:
app = server.app
client = app.test_client()
payload = {
"table": "events",
"order_by": "timestamp",
"limit": 5,
"columns": ["timestamp"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["start"] == "2024-01-01 00:00:00"
assert data["end"] == "2024-01-02 03:00:00"
I’ll patch the tests to adapt to the new default "timestamp" in QueryParams. Let's skip modifying existing tests and add a new one instead.
> import datetime
> print(int(datetime.datetime(2024,1,1,0,0).timestamp()))
> print(int(datetime.datetime(2024,1,1,1,0).timestamp()))
> PY
1704067200
1704070800
I'll test using a CSV named 'events.csv' to check integer time format compatibility.
I'll test that the time column dropdown appears, updates in the URL, and persists selections.
I need to check run_query
in test_web.py
to handle the new #time_column field properly.
def select_value(page: Any, selector: str, value: str) -> None:
page.evaluate(
"arg => setSelectValue(arg.sel, arg.val)",
{"sel": selector, "val": value},
)
def run_query(
page: Any,
url: str,
*,
start: str | None = None,
end: str | None = None,
order_by: str | None = None,
order_dir: str | None = "ASC",
limit: int | None = None,
group_by: list[str] | None = None,
aggregate: str | None = None,
) -> dict[str, Any]:
page.goto(url)
page.wait_for_selector("#order_by option", state="attached")
page.wait_for_selector("#order_dir", state="attached")
page.wait_for_function("window.lastResults !== undefined")
if start is not None:
page.fill("#start", start)
if end is not None:
page.fill("#end", end)
if order_by is not None:
select_value(page, "#order_by", order_by)
if order_dir is not None and order_dir == "DESC":
page.click("#order_dir")
if limit is not None:
page.fill("#limit", str(limit))
if group_by is not None:
select_value(page, "#graph_type", "table")
select_value(page, "#graph_type", "table")
page.evaluate(
"g => { groupBy.chips = g; groupBy.renderChips(); }",
group_by,
)
if aggregate is not None:
select_value(page, "#graph_type", "table")
select_value(page, "#aggregate", aggregate)
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
return page.evaluate("window.lastResults")
def test_range_filters(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="2024-01-02 00:00:00",
end="2024-01-02 04:00:00",
order_by="timestamp",
limit=100,
)
assert len(data["rows"]) == 2
from dateutil import parser
timestamps = [parser.parse(row[0]).replace(tzinfo=None) for row in data["row
s"]]
assert timestamps == [
parser.parse("2024-01-02 00:00:00"),
parser.parse("2024-01-02 03:00:00"),
]
def test_order_by(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="2024-01-01 00:00:00",
end="2024-01-03 00:00:00",
order_by="value",
order_dir="DESC",
I'll add a time_column parameter and ensure the default selection works in tests. Let’s implement the changes.
root@29cb09005028:/workspace/scubaduck# git status --short
Time to dive into server.py and make the necessary changes.
from typing import Any, Dict, List, Tuple, cast
import re
from datetime import datetime, timedelta, timezone
import time
from pathlib import Path
import os
import sqlite3
import traceback
import math
import duckdb
from dateutil import parser as dtparser
from flask import Flask, jsonify, request, send_from_directory
@dataclass
class Filter:
column: str
op: str
value: str | int | float | list[str] | None
@dataclass
class QueryParams:
start: str | None = None
end: str | None = None
order_by: str | None = None
order_dir: str = "ASC"
limit: int | None = None
columns: list[str] = field(default_factory=lambda: [])
filters: list[Filter] = field(default_factory=lambda: [])
derived_columns: dict[str, str] = field(default_factory=lambda: {})
graph_type: str = "samples"
group_by: list[str] = field(default_factory=lambda: [])
aggregate: str | None = None
show_hits: bool = False
x_axis: str | None = None
granularity: str = "Auto"
fill: str = "0"
table: str = "events"
def _normalize_sqlite_type(sql: str) -> str:
"""Map arbitrary SQLite column types to DuckDB-compatible types."""
t = sql.strip().upper()
if "(" in t:
t = t.split("(", 1)[0]
if "INT" in t:
# SQLite only has a single INTEGER type which is always 64-bit.
# Use DuckDB's BIGINT to avoid overflow when values exceed INT32.
return "BIGINT"
if any(key in t for key in ("CHAR", "CLOB", "TEXT")):
return "VARCHAR"
if "BLOB" in t:
return "BLOB"
if any(key in t for key in ("DOUBLE", "REAL", "FLOA", "NUMERIC", "DECIMAL"))
:
return "DOUBLE"
if "BOOL" in t:
return "BOOLEAN"
if "DATE" in t or "TIME" in t:
return "TIMESTAMP" if "TIME" in t else "DATE"
return "VARCHAR"
def _load_database(path: Path) -> duckdb.DuckDBPyConnection:
I'm reviewing the dataclass to implement some modifications.
end: str | None = None
order_by: str | None = None
order_dir: str = "ASC"
limit: int | None = None
columns: list[str] = field(default_factory=lambda: [])
filters: list[Filter] = field(default_factory=lambda: [])
derived_columns: dict[str, str] = field(default_factory=lambda: {})
graph_type: str = "samples"
group_by: list[str] = field(default_factory=lambda: [])
aggregate: str | None = None
show_hits: bool = False
x_axis: str | None = None
granularity: str = "Auto"
fill: str = "0"
table: str = "events"
def _normalize_sqlite_type(sql: str) -> str:
"""Map arbitrary SQLite column types to DuckDB-compatible types."""
t = sql.strip().upper()
if "(" in t:
t = t.split("(", 1)[0]
if "INT" in t:
# SQLite only has a single INTEGER type which is always 64-bit.
# Use DuckDB's BIGINT to avoid overflow when values exceed INT32.
return "BIGINT"
if any(key in t for key in ("CHAR", "CLOB", "TEXT")):
return "VARCHAR"
if "BLOB" in t:
return "BLOB"
if any(key in t for key in ("DOUBLE", "REAL", "FLOA", "NUMERIC", "DECIMAL"))
:
return "DOUBLE"
if "BOOL" in t:
return "BOOLEAN"
if "DATE" in t or "TIME" in t:
return "TIMESTAMP" if "TIME" in t else "DATE"
return "VARCHAR"
def _load_database(path: Path) -> duckdb.DuckDBPyConnection:
I need to add the time_column to the dataclass. I'll get that updated.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/server.py
> @@
> class QueryParams:
> @@
> fill: str = "0"
> table: str = "events"
> + time_column: str = "timestamp"
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/server.py
I’ve confirmed dataclass is imported. Time to implement the _time_expr function now.
root@29cb09005028:/workspace/scubaduck# grep -n "_granularity_seconds" -n scubad
uck/server.py
159:def _granularity_seconds(granularity: str, start: str | None, end: str | Non
e) -> int:
197: sec = _granularity_seconds(params.granularity, params.start, params.
end)
541: bucket_size = _granularity_seconds(
I'm paging through 150 lines to add a helper.
dt = datetime.now(timezone.utc) + delta
return dt.replace(microsecond=0).strftime("%Y-%m-%d %H:%M:%S")
dt = dtparser.parse(s)
return dt.replace(microsecond=0, tzinfo=None).strftime("%Y-%m-%d %H:%M:%S")
def _granularity_seconds(granularity: str, start: str | None, end: str | None) -
> int:
gran = granularity.lower()
mapping = {
"1 second": 1,
"5 seconds": 5,
"10 seconds": 10,
"30 seconds": 30,
"1 minute": 60,
"4 minutes": 240,
"5 minutes": 300,
"10 minutes": 600,
"15 minutes": 900,
"30 minutes": 1800,
"1 hour": 3600,
"3 hours": 10800,
"6 hours": 21600,
"1 day": 86400,
"1 week": 604800,
"30 days": 2592000,
}
if gran in mapping:
return mapping[gran]
if gran in {"auto", "fine"} and start and end:
try:
s = dtparser.parse(start)
e = dtparser.parse(end)
except Exception:
return 3600
total = max((e - s).total_seconds(), 1)
buckets = 100 if gran == "auto" else 500
return max(int(total // buckets), 1)
return 3600
def build_query(params: QueryParams, column_types: Dict[str, str] | None = None)
-> str:
select_parts: list[str] = []
group_cols = params.group_by[:]
if params.graph_type == "timeseries":
sec = _granularity_seconds(params.granularity, params.start, params.end)
x_axis = params.x_axis or "timestamp"
if params.start:
bucket_expr = (
f"TIMESTAMP '{params.start}' + INTERVAL '{sec} second' * "
f"CAST(floor((epoch({x_axis}) - epoch(TIMESTAMP '{params.start}'
))/{sec}) AS BIGINT)"
)
else:
bucket_expr = (
f"TIMESTAMP 'epoch' + INTERVAL '{sec} second' * "
f"CAST(floor(epoch({x_axis})/{sec}) AS BIGINT)"
)
select_parts.append(f"{bucket_expr} AS bucket")
group_cols = ["bucket"] + group_cols
has_agg = bool(group_cols) or params.aggregate is not None
if has_agg:
select_cols = (
group_cols[1:] if params.graph_type == "timeseries" else group_cols
)
select_parts.extend(select_cols)
agg = (params.aggregate or "avg").lower()
def agg_expr(col: str) -> str:
if agg.startswith("p"):
quant = float(agg[1:]) / 100
return f"quantile({col}, {quant})"
if agg == "count distinct":
return f"count(DISTINCT {col})"
if agg == "avg" and column_types is not None:
ctype = column_types.get(col, "").upper()
if "TIMESTAMP" in ctype or "DATE" in ctype or "TIME" in ctype:
return (
"TIMESTAMP 'epoch' + INTERVAL '1 second' * "
f"CAST(avg(epoch({col})) AS BIGINT)"
I'll define the _time_expr helper function right after _granularity_seconds, before build_query.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/server.py
> @@
> def _granularity_seconds(granularity: str, start: str | None, end: str | None
) -> int:
> @@
> return 3600
> +
> +
> +def _time_expr(col: str, column_types: Dict[str, str] | None) -> str:
> + """Return SQL expression for column interpreted as timestamp."""
> + if column_types is None:
> + return col
> + ctype = column_types.get(col, "").upper()
> + if not any(t in ctype for t in ["TIMESTAMP", "DATE", "TIME"]):
> + if any(t in ctype for t in [
> + "INT",
> + "DECIMAL",
> + "REAL",
> + "DOUBLE",
> + "FLOAT",
> + "NUMERIC",
> + "HUGEINT",
> + ]):
> + return f"TIMESTAMP 'epoch' + INTERVAL '1 second' * {col}"
> + return col
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/server.py
I’ll adjust the build_query function to fit the requirements.
return 3600
def _time_expr(col: str, column_types: Dict[str, str] | None) -> str:
"""Return SQL expression for column interpreted as timestamp."""
if column_types is None:
return col
ctype = column_types.get(col, "").upper()
if not any(t in ctype for t in ["TIMESTAMP", "DATE", "TIME"]):
if any(t in ctype for t in [
"INT",
"DECIMAL",
"REAL",
"DOUBLE",
"FLOAT",
"NUMERIC",
"HUGEINT",
]):
return f"TIMESTAMP 'epoch' + INTERVAL '1 second' * {col}"
return col
def build_query(params: QueryParams, column_types: Dict[str, str] | None = None)
-> str:
select_parts: list[str] = []
group_cols = params.group_by[:]
if params.graph_type == "timeseries":
sec = _granularity_seconds(params.granularity, params.start, params.end)
x_axis = params.x_axis or "timestamp"
if params.start:
bucket_expr = (
f"TIMESTAMP '{params.start}' + INTERVAL '{sec} second' * "
f"CAST(floor((epoch({x_axis}) - epoch(TIMESTAMP '{params.start}'
))/{sec}) AS BIGINT)"
)
else:
bucket_expr = (
f"TIMESTAMP 'epoch' + INTERVAL '{sec} second' * "
f"CAST(floor(epoch({x_axis})/{sec}) AS BIGINT)"
)
select_parts.append(f"{bucket_expr} AS bucket")
group_cols = ["bucket"] + group_cols
has_agg = bool(group_cols) or params.aggregate is not None
if has_agg:
select_cols = (
group_cols[1:] if params.graph_type == "timeseries" else group_cols
)
select_parts.extend(select_cols)
agg = (params.aggregate or "avg").lower()
def agg_expr(col: str) -> str:
if agg.startswith("p"):
quant = float(agg[1:]) / 100
return f"quantile({col}, {quant})"
if agg == "count distinct":
return f"count(DISTINCT {col})"
if agg == "avg" and column_types is not None:
ctype = column_types.get(col, "").upper()
if "TIMESTAMP" in ctype or "DATE" in ctype or "TIME" in ctype:
return (
"TIMESTAMP 'epoch' + INTERVAL '1 second' * "
f"CAST(avg(epoch({col})) AS BIGINT)"
)
return f"{agg}({col})"
I'll update the x_axis to use params.x_axis or params.time_column, and adjust bucket_expr with _time_expr.
"FLOAT",
"NUMERIC",
"HUGEINT",
]):
return f"TIMESTAMP 'epoch' + INTERVAL '1 second' * {col}"
return col
def build_query(params: QueryParams, column_types: Dict[str, str] | None = None)
-> str:
select_parts: list[str] = []
group_cols = params.group_by[:]
if params.graph_type == "timeseries":
sec = _granularity_seconds(params.granularity, params.start, params.end)
x_axis = params.x_axis or "timestamp"
if params.start:
bucket_expr = (
f"TIMESTAMP '{params.start}' + INTERVAL '{sec} second' * "
f"CAST(floor((epoch({x_axis}) - epoch(TIMESTAMP '{params.start}'
))/{sec}) AS BIGINT)"
)
else:
bucket_expr = (
f"TIMESTAMP 'epoch' + INTERVAL '{sec} second' * "
f"CAST(floor(epoch({x_axis})/{sec}) AS BIGINT)"
)
select_parts.append(f"{bucket_expr} AS bucket")
group_cols = ["bucket"] + group_cols
has_agg = bool(group_cols) or params.aggregate is not None
if has_agg:
select_cols = (
group_cols[1:] if params.graph_type == "timeseries" else group_cols
)
select_parts.extend(select_cols)
agg = (params.aggregate or "avg").lower()
def agg_expr(col: str) -> str:
if agg.startswith("p"):
quant = float(agg[1:]) / 100
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/server.py
> @@
> - if params.graph_type == "timeseries":
> - sec = _granularity_seconds(params.granularity, params.start, params.e
nd)
> - x_axis = params.x_axis or "timestamp"
> - if params.start:
> - bucket_expr = (
> - f"TIMESTAMP '{params.start}' + INTERVAL '{sec} second' * "
> - f"CAST(floor((epoch({x_axis}) - epoch(TIMESTAMP '{params.star
t}'))/{sec}) AS BIGINT)"
> - )
> - else:
> - bucket_expr = (
> - f"TIMESTAMP 'epoch' + INTERVAL '{sec} second' * "
> - f"CAST(floor(epoch({x_axis})/{sec}) AS BIGINT)"
> - )
> + if params.graph_type == "timeseries":
> + sec = _granularity_seconds(params.granularity, params.start, params.e
nd)
> + x_axis = params.x_axis or params.time_column
> + xexpr = _time_expr(x_axis, column_types)
> + if params.start:
> + bucket_expr = (
> + f"TIMESTAMP '{params.start}' + INTERVAL '{sec} second' * "
> + f"CAST(floor((epoch({xexpr}) - epoch(TIMESTAMP '{params.start
}'))/{sec}) AS BIGINT)"
> + )
> + else:
> + bucket_expr = (
> + f"TIMESTAMP 'epoch' + INTERVAL '{sec} second' * "
> + f"CAST(floor(epoch({xexpr})/{sec}) AS BIGINT)"
> + )
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/server.py
I'll search for lines containing "timestamp >=" to update the conditions.
root@29cb09005028:/workspace/scubaduck# grep -n "timestamp >=" -n scubaduck/serv
er.py
295: where_parts.append(f"timestamp >= '{params.start}'")
I’ll work on fixing things in that part of the code.
root@29cb09005028:/workspace/scubaduck# sed -n '288,312p' scubaduck/server.py
for name, expr in params.derived_columns.items():
select_parts.append(f"{expr} AS {name}")
select_clause = ", ".join(select_parts) if select_parts else "*"
lines = [f"SELECT {select_clause}", f'FROM "{params.table}"']
where_parts: list[str] = []
if params.start:
where_parts.append(f"timestamp >= '{params.start}'")
if params.end:
where_parts.append(f"timestamp <= '{params.end}'")
for f in params.filters:
op = f.op
if op in {"empty", "!empty"}:
val = "''"
else:
if f.value is None:
continue
if isinstance(f.value, list):
if not f.value:
continue
if op == "=":
vals = " OR ".join(f"{f.column} = '{v}'" for v in f.value)
where_parts.append(f"({vals})")
continue
val = f"'{f.value}'" if isinstance(f.value, str) else str(f.value)
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/server.py
> @@
> - if params.start:
> - where_parts.append(f"timestamp >= '{params.start}'")
> - if params.end:
> - where_parts.append(f"timestamp <= '{params.end}'")
> + time_expr = _time_expr(params.time_column, column_types)
> + if params.start:
> + where_parts.append(f"{time_expr} >= '{params.start}'")
> + if params.end:
> + where_parts.append(f"{time_expr} <= '{params.end}'")
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/server.py
I’ll adjust the default start/end detection earlier in the query if they’re missing.
root@29cb09005028:/workspace/scubaduck# grep -n "SELECT min" -n scubaduck/server
.py
547: f'SELECT min({axis}), max({axis}) FROM "{params.table}"'
root@29cb09005028:/workspace/scubaduck# sed -n '528,568p' scubaduck/server.py
"error": f"Aggregate {agg} cannot be applied
to column {c}",
}
),
400,
)
if allow_time and not (is_numeric or is_time):
return (
jsonify(
{
"error": f"Aggregate {agg} cannot be applied
to column {c}",
}
),
400,
)
if params.start is None or params.end is None:
axis = params.x_axis or "timestamp"
row = cast(
tuple[datetime | None, datetime | None],
con.execute(
f'SELECT min({axis}), max({axis}) FROM "{params.table}"'
).fetchall()[0],
)
mn, mx = row
if params.start is None and mn is not None:
params.start = (
mn.strftime("%Y-%m-%d %H:%M:%S") if not isinstance(mn, str)
else mn
)
if params.end is None and mx is not None:
params.end = (
mx.strftime("%Y-%m-%d %H:%M:%S") if not isinstance(mx, str)
else mx
)
bucket_size: int | None = None
if params.graph_type == "timeseries":
bucket_size = _granularity_seconds(
params.granularity,
params.start if isinstance(params.start, str) else None,
params.end if isinstance(params.end, str) else None,
)
if (
params.limit is not None
I'll set the axis default to params.time_column
and convert integers to datetimes.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/server.py
> @@
> - if params.start is None or params.end is None:
> - axis = params.x_axis or "timestamp"
> + if params.start is None or params.end is None:
> + axis = params.x_axis or params.time_column
> row = cast(
> tuple[datetime | None, datetime | None],
> con.execute(
> f'SELECT min({axis}), max({axis}) FROM "{params.table}"'
> ).fetchall()[0],
> )
> mn, mx = row
> + if isinstance(mn, (int, float)):
> + mn = datetime.fromtimestamp(int(mn), tz=timezone.utc)
> + if isinstance(mx, (int, float)):
> + mx = datetime.fromtimestamp(int(mx), tz=timezone.utc)
> if params.start is None and mn is not None:
> params.start = (
> mn.strftime("%Y-%m-%d %H:%M:%S") if not isinstance(mn, st
r) else mn
> )
> if params.end is None and mx is not None:
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/server.py
I'll review the query() endpoint for validations, especially the "time_column" to check processing.
root@29cb09005028:/workspace/scubaduck# grep -n "time_column" -n scubaduck/serve
r.py
46: time_column: str = "timestamp"
217: x_axis = params.x_axis or params.time_column
294: time_expr = _time_expr(params.time_column, column_types)
543: axis = params.x_axis or params.time_column
I added time_column to the dataclass. Now, I need to pass it via payload.
root@29cb09005028:/workspace/scubaduck# grep -n "QueryParams(" -n scubaduck/serv
er.py
434: params = QueryParams(
root@29cb09005028:/workspace/scubaduck# sed -n '432,466p' scubaduck/server.py
return jsonify({"error": str(exc)}), 400
params = QueryParams(
start=start,
end=end,
order_by=payload.get("order_by"),
order_dir=payload.get("order_dir", "ASC"),
limit=payload.get("limit"),
columns=payload.get("columns", []),
derived_columns=payload.get("derived_columns", {}),
graph_type=payload.get("graph_type", "samples"),
group_by=payload.get("group_by", []),
aggregate=payload.get("aggregate"),
show_hits=payload.get("show_hits", False),
x_axis=payload.get("x_axis"),
granularity=payload.get("granularity", "Auto"),
fill=payload.get("fill", "0"),
table=payload.get("table", default_table),
)
for f in payload.get("filters", []):
params.filters.append(Filter(f["column"], f["op"], f.get("value")))
if params.table not in tables:
return jsonify({"error": "Invalid table"}), 400
column_types = get_columns(params.table)
if params.graph_type not in {"table", "timeseries"} and (
params.group_by or params.aggregate or params.show_hits
):
return (
jsonify(
{
"error": "group_by, aggregate and show_hits are only val
id for table or timeseries view"
}
I'll add time_column
and check if it exists in column_types before proceeding.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/server.py
> @@
> fill=payload.get("fill", "0"),
> table=payload.get("table", default_table),
> + time_column=payload.get("time_column", "timestamp"),
> )
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/server.py
I'll look for validation near column_types = get_columns(params.table)
.
root@29cb09005028:/workspace/scubaduck# sed -n '470,540p' scubaduck/server.py
)
valid_cols = set(column_types.keys())
valid_cols.update(params.derived_columns.keys())
if params.graph_type == "timeseries":
if params.x_axis is None:
for cand in ["time", "timestamp"]:
if cand in valid_cols:
params.x_axis = cand
break
if params.x_axis is None or params.x_axis not in valid_cols:
return jsonify({"error": "Invalid x_axis"}), 400
ctype = column_types.get(params.x_axis, "").upper()
if not any(t in ctype for t in ["TIMESTAMP", "DATE", "TIME"]):
return jsonify({"error": "x_axis must be a time column"}), 400
for col in params.columns:
if col not in valid_cols:
return jsonify({"error": f"Unknown column: {col}"}), 400
for col in params.group_by:
if col not in valid_cols:
return jsonify({"error": f"Unknown column: {col}"}), 400
if params.order_by and params.order_by not in valid_cols:
return jsonify({"error": f"Unknown column: {params.order_by}"}), 400
if params.group_by or params.graph_type == "timeseries":
agg = (params.aggregate or "avg").lower()
if agg.startswith("p") or agg == "sum":
need_numeric = True
allow_time = False
elif agg == "avg" or agg in {"min", "max"}:
need_numeric = False
allow_time = True
else:
need_numeric = False
allow_time = False
if need_numeric or allow_time:
for c in params.columns:
if c in params.group_by or c == params.x_axis:
continue
if c not in column_types:
continue
ctype = column_types.get(c, "").upper()
is_numeric = any(
t in ctype
for t in [
"INT",
"DECIMAL",
"REAL",
"DOUBLE",
"FLOAT",
"NUMERIC",
"HUGEINT",
]
)
is_time = "TIMESTAMP" in ctype or "DATE" in ctype or "TIME"
in ctype
if need_numeric and not is_numeric:
return (
jsonify(
{
"error": f"Aggregate {agg} cannot be applied
to column {c}",
}
),
400,
)
if allow_time and not (is_numeric or is_time):
return (
jsonify(
{
"error": f"Aggregate {agg} cannot be applied
to column {c}",
}
),
I'll add validation for params.time_column right after column_types and set the default for x_axis.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/server.py
> @@
> column_types = get_columns(params.table)
>
> + if params.time_column not in column_types:
> + return jsonify({"error": "Invalid time_column"}), 400
> +
> if params.graph_type not in {"table", "timeseries"} and (
> params.group_by or params.aggregate or params.show_hits
> ):
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/server.py
I'll update the x_axis default to check for "time_column" and allow numeric values.
root@29cb09005028:/workspace/scubaduck# sed -n '496,524p' scubaduck/server.py
if params.group_by or params.graph_type == "timeseries":
agg = (params.aggregate or "avg").lower()
if agg.startswith("p") or agg == "sum":
need_numeric = True
allow_time = False
elif agg == "avg" or agg in {"min", "max"}:
need_numeric = False
allow_time = True
else:
need_numeric = False
allow_time = False
if need_numeric or allow_time:
for c in params.columns:
if c in params.group_by or c == params.x_axis:
continue
if c not in column_types:
continue
ctype = column_types.get(c, "").upper()
is_numeric = any(
t in ctype
for t in [
"INT",
"DECIMAL",
"REAL",
"DOUBLE",
"FLOAT",
"NUMERIC",
"HUGEINT",
The default x_axis seems to be around line 480.
root@29cb09005028:/workspace/scubaduck# sed -n '480,497p' scubaduck/server.py
if cand in valid_cols:
params.x_axis = cand
break
if params.x_axis is None or params.x_axis not in valid_cols:
return jsonify({"error": "Invalid x_axis"}), 400
ctype = column_types.get(params.x_axis, "").upper()
if not any(t in ctype for t in ["TIMESTAMP", "DATE", "TIME"]):
return jsonify({"error": "x_axis must be a time column"}), 400
for col in params.columns:
if col not in valid_cols:
return jsonify({"error": f"Unknown column: {col}"}), 400
for col in params.group_by:
if col not in valid_cols:
return jsonify({"error": f"Unknown column: {col}"}), 400
if params.order_by and params.order_by not in valid_cols:
return jsonify({"error": f"Unknown column: {params.order_by}"}), 400
if params.group_by or params.graph_type == "timeseries":
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/server.py
> @@
> - if params.graph_type == "timeseries":
> - if params.x_axis is None:
> - for cand in ["time", "timestamp"]:
> - if cand in valid_cols:
> - params.x_axis = cand
> - break
> - if params.x_axis is None or params.x_axis not in valid_cols:
> - return jsonify({"error": "Invalid x_axis"}), 400
> - ctype = column_types.get(params.x_axis, "").upper()
> - if not any(t in ctype for t in ["TIMESTAMP", "DATE", "TIME"]):
> - return jsonify({"error": "x_axis must be a time column"}), 40
0
> + if params.graph_type == "timeseries":
> + if params.x_axis is None:
> + params.x_axis = params.time_column
> + if params.x_axis is None or params.x_axis not in valid_cols:
> + return jsonify({"error": "Invalid x_axis"}), 400
> + ctype = column_types.get(params.x_axis, "").upper()
> + is_time = any(t in ctype for t in ["TIMESTAMP", "DATE", "TIME"])
> + is_numeric = any(
> + t in ctype
> + for t in [
> + "INT",
> + "DECIMAL",
> + "REAL",
> + "DOUBLE",
> + "FLOAT",
> + "NUMERIC",
> + "HUGEINT",
> + ]
> + )
> + if not (is_time or is_numeric):
> + return jsonify({"error": "x_axis must be a time column"}), 40
0
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/server.py
I'll focus on updating variable names and scanning for more references to 'timestamp'.
root@29cb09005028:/workspace/scubaduck# grep -n "timestamp" -n scubaduck/server.
py
46: time_column: str = "timestamp"
194: """Return SQL expression for column interpreted as timestamp."""
450: time_column=payload.get("time_column", "timestamp"),
566: mn = datetime.fromtimestamp(int(mn), tz=timezone.utc)
568: mx = datetime.fromtimestamp(int(mx), tz=timezone.utc)
Time to open index.html and make the necessary patches.
root@29cb09005028:/workspace/scubaduck# grep -n "time_column" -n scubaduck/stati
c/index.html
I'll add a new field after the header in the settings section. Let's find it.
root@29cb09005028:/workspace/scubaduck# grep -n "id=\"settings\"" -n scubaduck/s
tatic/index.html
185: <div id="settings" class="tab-content active">
root@29cb09005028:/workspace/scubaduck# sed -n '188,220p' scubaduck/static/index
.html
<div class="rel-box">
<input id="start" type="text" />
<button type="button" class="rel-btn" data-target="start-select">	
660;</button>
<div id="start-select" class="rel-dropdown" data-input="start">
<div data-value="-1 hour">-1 hour</div>
<div data-value="-3 hours">-3 hours</div>
<div data-value="-12 hours">-12 hours</div>
<div data-value="-1 day">-1 day</div>
<div data-value="-3 days">-3 days</div>
<div data-value="-1 week">-1 week</div>
<div data-value="-1 fortnight">-1 fortnight</div>
<div data-value="-30 days">-30 days</div>
<div data-value="-90 days">-90 days</div>
</div>
</div>
</div>
<div class="field">
<label>End<span class="help" title="Sets the start/end of the time ran
ge to query. Can be any kind of datetime string. For example: 'April 23, 2014' o
r 'yesterday'.">[?]</span></label>
<div class="rel-box">
<input id="end" type="text" />
<button type="button" class="rel-btn" data-target="end-select">φ
0;</button>
<div id="end-select" class="rel-dropdown" data-input="end">
<div data-value="now">now</div>
<div data-value="-1 hour">-1 hour</div>
<div data-value="-3 hours">-3 hours</div>
<div data-value="-12 hours">-12 hours</div>
<div data-value="-1 day">-1 day</div>
<div data-value="-3 days">-3 days</div>
<div data-value="-1 week">-1 week</div>
<div data-value="-1 fortnight">-1 fortnight</div>
<div data-value="-30 days">-30 days</div>
<div data-value="-90 days">-90 days</div>
</div>
I’ll check lines 188-205, then verify the context before inserting the 'Start' field.
root@29cb09005028:/workspace/scubaduck# sed -n '178,196p' scubaduck/static/index
.html
<div id="content">
<div id="sidebar">
<div id="tabs">
<button class="tab active" data-tab="settings">View Settings</button>
<button class="tab" data-tab="columns">Columns</button>
<button id="dive" onclick="dive()">Dive</button>
</div>
<div id="settings" class="tab-content active">
<div class="field">
<label>Start<span class="help" title="Sets the start/end of the time r
ange to query. Can be any kind of datetime string. For example: 'April 23, 2014'
or 'yesterday'.">[?]</span></label>
<div class="rel-box">
<input id="start" type="text" />
<button type="button" class="rel-btn" data-target="start-select">	
660;</button>
<div id="start-select" class="rel-dropdown" data-input="start">
<div data-value="-1 hour">-1 hour</div>
<div data-value="-3 hours">-3 hours</div>
<div data-value="-12 hours">-12 hours</div>
<div data-value="-1 day">-1 day</div>
<div data-value="-3 days">-3 days</div>
I'll add the new field before the 'Start' field in settings and patch it.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> <div id="settings" class="tab-content active">
> + <div class="field">
> + <label>Time Column</label>
> + <select id="time_column"></select>
> + </div>
> <div class="field">
> <label>Start<span class="help" title="Sets the start/end of the tim
e range to query. Can be any kind of datetime string. For example: 'April 23, 20
14' or 'yesterday'.">[?]</span></label>
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
I’ll check for variable declarations near the top and add timeSelectOptions arrays.
root@29cb09005028:/workspace/scubaduck# sed -n '330,348p' scubaduck/static/index
.html
</div>
<script src="/js/chip_input.js"></script>
<script src="/js/timeseries_chart.js"></script>
<script>
const allColumns = [];
const baseColumns = [];
const columnTypes = {};
const stringColumns = [];
const baseStringColumns = [];
const integerColumns = [];
const baseIntegerColumns = [];
const timeColumns = [];
const baseTimeColumns = [];
const derivedColumns = [];
let selectedColumns = [];
let displayType = 'samples';
let groupBy = {chips: [], addChip: () => {}, renderChips: () => {}};
function initDropdown(select) {
I'll go ahead and add arrays for the new candidate list.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> const integerColumns = [];
> const baseIntegerColumns = [];
> const timeColumns = [];
> const baseTimeColumns = [];
> +const timeColumnOptions = [];
> +const baseTimeColumnOptions = [];
> let selectedColumns = [];
> let displayType = 'samples';
> let groupBy = {chips: [], addChip: () => {}, renderChips: () => {}};
> +let defaultTimeColumn = '';
> *** End Patch
> PATCH
Failed to find expected lines ["const integerColumns = [];", "const baseIntegerC
olumns = [];", "const timeColumns = [];", "const baseTimeColumns = [];", "let se
lectedColumns = [];", "let displayType = 'samples';", "let groupBy = {chips: [],
addChip: () => {}, renderChips: () => {}};"] in scubaduck/static/index.html
I'll open lines 338 to 352 to understand the quoting issue that caused the failure.
root@29cb09005028:/workspace/scubaduck# sed -n '338,358p' scubaduck/static/index
.html
const baseStringColumns = [];
const integerColumns = [];
const baseIntegerColumns = [];
const timeColumns = [];
const baseTimeColumns = [];
const derivedColumns = [];
let selectedColumns = [];
let displayType = 'samples';
let groupBy = {chips: [], addChip: () => {}, renderChips: () => {}};
function initDropdown(select) {
const wrapper = document.createElement('div');
wrapper.className = 'dropdown';
if (select.classList.contains('f-col')) {
wrapper.classList.add('f-col');
}
select.parentNode.insertBefore(wrapper, select);
wrapper.appendChild(select);
select.style.display = 'none';
const disp = document.createElement('div');
disp.className = 'dropdown-display';
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> const baseStringColumns = [];
> const integerColumns = [];
> const baseIntegerColumns = [];
> const timeColumns = [];
> const baseTimeColumns = [];
> +const timeColumnOptions = [];
> +const baseTimeColumnOptions = [];
> const derivedColumns = [];
> let selectedColumns = [];
> let displayType = 'samples';
> let groupBy = {chips: [], addChip: () => {}, renderChips: () => {}};
> +let defaultTimeColumn = '';
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
I'll update the loadColumns function to populate the time_column select. Opening it now.
root@29cb09005028:/workspace/scubaduck# grep -n "loadColumns" -n scubaduck/stati
c/index.html
506:function loadColumns(table) {
638: loadColumns(table).then(() => {
645: loadColumns(document.getElementById('table').value).then(() => {
root@29cb09005028:/workspace/scubaduck# sed -n '506,560p' scubaduck/static/index
.html
function loadColumns(table) {
return fetch('/api/columns?table=' + encodeURIComponent(table)).then(r => r.js
on()).then(cols => {
const orderSelect = document.getElementById('order_by');
const xAxisSelect = document.getElementById('x_axis');
const groupsEl = document.getElementById('column_groups');
orderSelect.innerHTML = '';
xAxisSelect.innerHTML = '';
groupsEl.innerHTML = '';
allColumns.length = 0;
stringColumns.length = 0;
integerColumns.length = 0;
timeColumns.length = 0;
baseColumns.length = 0;
baseStringColumns.length = 0;
baseIntegerColumns.length = 0;
baseTimeColumns.length = 0;
for (const k in columnTypes) delete columnTypes[k];
const groups = {
time: {name: 'Time', cols: [], ul: null},
integer: {name: 'Integers', cols: [], ul: null},
string: {name: 'Strings', cols: [], ul: null},
};
cols.forEach(c => {
const t = c.type.toUpperCase();
columnTypes[c.name] = c.type;
allColumns.push(c.name);
baseColumns.push(c.name);
let g = 'string';
if (t.includes('INT')) g = 'integer';
if (t.includes('TIMESTAMP')) {
g = 'time';
timeColumns.push(c.name);
baseTimeColumns.push(c.name);
}
if (g === 'string') {
stringColumns.push(c.name);
baseStringColumns.push(c.name);
} else {
integerColumns.push(c.name);
baseIntegerColumns.push(c.name);
}
groups[g].cols.push(c.name);
if (g !== 'string') {
const o = document.createElement('option');
o.value = c.name;
o.textContent = c.name;
orderSelect.appendChild(o);
}
});
timeColumns.forEach(name => {
const o = document.createElement('option');
o.value = name;
o.textContent = name;
xAxisSelect.appendChild(o);
});
I'll clear and populate the timeColumnOptions after resets, then compute heuristics for guessedTimeColumn.
root@29cb09005028:/workspace/scubaduck# sed -n '560,620p' scubaduck/static/index
.html
});
Object.keys(groups).forEach(key => {
const g = groups[key];
const div = document.createElement('div');
div.className = 'col-group';
const header = document.createElement('div');
header.className = 'col-group-header';
header.appendChild(document.createTextNode(g.name + ': '));
const links = document.createElement('span');
links.className = 'links';
const allBtn = document.createElement('a');
allBtn.href = '#';
allBtn.textContent = 'All';
const noneBtn = document.createElement('a');
noneBtn.href = '#';
noneBtn.textContent = 'None';
links.appendChild(allBtn);
links.appendChild(noneBtn);
header.appendChild(links);
div.appendChild(header);
const ul = document.createElement('ul');
g.ul = ul;
g.cols.forEach(name => {
const li = document.createElement('li');
const label = document.createElement('label');
const cb = document.createElement('input');
cb.type = 'checkbox';
cb.value = name;
cb.checked = true;
cb.addEventListener('change', updateSelectedColumns);
label.appendChild(cb);
label.appendChild(document.createTextNode(' ' + name));
li.appendChild(label);
ul.appendChild(li);
});
allBtn.addEventListener('click', e => {
e.preventDefault();
ul.querySelectorAll('input').forEach(cb => (cb.checked = true));
updateSelectedColumns();
});
noneBtn.addEventListener('click', e => {
e.preventDefault();
ul.querySelectorAll('input').forEach(cb => (cb.checked = false));
updateSelectedColumns();
});
div.appendChild(ul);
groupsEl.appendChild(div);
});
document.getElementById('columns_all').addEventListener('click', e => {
e.preventDefault();
groupsEl.querySelectorAll('input').forEach(cb => (cb.checked = true));
updateSelectedColumns();
});
document.getElementById('columns_none').addEventListener('click', e => {
e.preventDefault();
groupsEl.querySelectorAll('input').forEach(cb => (cb.checked = false));
updateSelectedColumns();
});
updateSelectedColumns();
groupBy = document.getElementById('group_by').closest('.field');
initChipInput(groupBy, typed =>
I'll go back and include the timeColumnOptions in the earlier changes.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> - orderSelect.innerHTML = '';
> - xAxisSelect.innerHTML = '';
> + const timeColumnSelect = document.getElementById('time_column');
> + orderSelect.innerHTML = '';
> + xAxisSelect.innerHTML = '';
> + timeColumnSelect.innerHTML = '';
> @@
> - timeColumns.length = 0;
> + timeColumns.length = 0;
> + timeColumnOptions.length = 0;
> baseColumns.length = 0;
> baseStringColumns.length = 0;
> baseIntegerColumns.length = 0;
> baseTimeColumns.length = 0;
> + baseTimeColumnOptions.length = 0;
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
I’ll add a heuristics variable and update the loops to account for it.
root@29cb09005028:/workspace/scubaduck# grep -n "cols.forEach" -n scubaduck/stat
ic/index.html | head
532: cols.forEach(c => {
586: g.cols.forEach(name => {
I'll apply the patch near line 532 in the code.
root@29cb09005028:/workspace/scubaduck# sed -n '532,572p' scubaduck/static/index
.html
cols.forEach(c => {
const t = c.type.toUpperCase();
columnTypes[c.name] = c.type;
allColumns.push(c.name);
baseColumns.push(c.name);
let g = 'string';
if (t.includes('INT')) g = 'integer';
if (t.includes('TIMESTAMP')) {
g = 'time';
timeColumns.push(c.name);
baseTimeColumns.push(c.name);
}
if (g === 'string') {
stringColumns.push(c.name);
baseStringColumns.push(c.name);
} else {
integerColumns.push(c.name);
baseIntegerColumns.push(c.name);
}
groups[g].cols.push(c.name);
if (g !== 'string') {
const o = document.createElement('option');
o.value = c.name;
o.textContent = c.name;
orderSelect.appendChild(o);
}
});
timeColumns.forEach(name => {
const o = document.createElement('option');
o.value = name;
o.textContent = name;
xAxisSelect.appendChild(o);
});
Object.keys(groups).forEach(key => {
const g = groups[key];
const div = document.createElement('div');
div.className = 'col-group';
const header = document.createElement('div');
header.className = 'col-group-header';
header.appendChild(document.createTextNode(g.name + ': '));
const links = document.createElement('span');
I'll adjust the code to fill timeColumnOptions and heuristics, with time-specific logic after the loop.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> - cols.forEach(c => {
> - const t = c.type.toUpperCase();
> - columnTypes[c.name] = c.type;
> - allColumns.push(c.name);
> - baseColumns.push(c.name);
> - let g = 'string';
> - if (t.includes('INT')) g = 'integer';
> - if (t.includes('TIMESTAMP')) {
> - g = 'time';
> - timeColumns.push(c.name);
> - baseTimeColumns.push(c.name);
> - }
> - if (g === 'string') {
> - stringColumns.push(c.name);
> - baseStringColumns.push(c.name);
> - } else {
> - integerColumns.push(c.name);
> - baseIntegerColumns.push(c.name);
> - }
> - groups[g].cols.push(c.name);
> - if (g !== 'string') {
> - const o = document.createElement('option');
> - o.value = c.name;
> - o.textContent = c.name;
> - orderSelect.appendChild(o);
> - }
> - });
> - timeColumns.forEach(name => {
> - const o = document.createElement('option');
> - o.value = name;
> - o.textContent = name;
> - xAxisSelect.appendChild(o);
> - });
> + const heur = ['timestamp','created','created_at','event_time','time','dat
e','occurred','happened','logged'];
> + let guess = null;
> + cols.forEach(c => {
> + const t = c.type.toUpperCase();
> + columnTypes[c.name] = c.type;
> + allColumns.push(c.name);
> + baseColumns.push(c.name);
> + let g = 'string';
> + const isNumeric = t.includes('INT') || t.includes('DECIMAL') || t.inclu
des('NUMERIC') || t.includes('REAL') || t.includes('DOUBLE') || t.includes('FLOA
T') || t.includes('HUGEINT');
> + const isTimeType = t.includes('TIMESTAMP') || t.includes('DATE') || t.i
ncludes('TIME');
> + if (isNumeric || isTimeType) {
> + timeColumnOptions.push(c.name);
> + baseTimeColumnOptions.push(c.name);
> + if (!guess && heur.some(h => c.name.toLowerCase().includes(h))) guess
= c.name;
> + }
> + if (isTimeType) {
> + g = 'time';
> + timeColumns.push(c.name);
> + baseTimeColumns.push(c.name);
> + } else if (isNumeric) {
> + g = 'integer';
> + }
> + if (g === 'string') {
> + stringColumns.push(c.name);
> + baseStringColumns.push(c.name);
> + } else if (g === 'integer') {
> + integerColumns.push(c.name);
> + baseIntegerColumns.push(c.name);
> + }
> + groups[g].cols.push(c.name);
> + if (g !== 'string') {
> + const o = document.createElement('option');
> + o.value = c.name;
> + o.textContent = c.name;
> + orderSelect.appendChild(o);
> + }
> + });
> + timeColumns.forEach(name => {
> + const o = document.createElement('option');
> + o.value = name;
> + o.textContent = name;
> + xAxisSelect.appendChild(o);
> + });
> + timeColumnOptions.forEach(name => {
> + const o = document.createElement('option');
> + o.value = name;
> + o.textContent = name;
> + timeColumnSelect.appendChild(o);
> + });
> + defaultTimeColumn = guess || timeColumnOptions[0] || '';
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
I'll update refreshDerivedColumns and other functions to adjust timeColumnOptions as well.
root@29cb09005028:/workspace/scubaduck# grep -n "timeColumnOptions" -n scubaduck
/static/index.html
343:const timeColumnOptions = [];
520: timeColumnOptions.length = 0;
543: timeColumnOptions.push(c.name);
575: timeColumnOptions.forEach(name => {
581: defaultTimeColumn = guess || timeColumnOptions[0] || '';
I added lines at 520 and inserted baseTimeColumnOptions. Let's search for them.
root@29cb09005028:/workspace/scubaduck# grep -n "baseTimeColumnOptions" -n scuba
duck/static/index.html
344:const baseTimeColumnOptions = [];
525: baseTimeColumnOptions.length = 0;
544: baseTimeColumnOptions.push(c.name);
I should handle things like refreshDerivedColumns, so I'll search for related code.
root@29cb09005028:/workspace/scubaduck# grep -n "baseTimeColumnOptions" -n scuba
duck/static/index.html
344:const baseTimeColumnOptions = [];
525: baseTimeColumnOptions.length = 0;
544: baseTimeColumnOptions.push(c.name);
I'll focus on updating lines around 860 related to refreshDerivedColumns.
root@29cb09005028:/workspace/scubaduck# grep -n "refreshDerivedColumns" -n scuba
duck/static/index.html
871: container.addEventListener(evt, refreshDerivedColumns);
874: refreshDerivedColumns();
884: refreshDerivedColumns();
887:function refreshDerivedColumns() {
1043: refreshDerivedColumns();
root@29cb09005028:/workspace/scubaduck# sed -n '860,910p' scubaduck/static/index
.html
type: data.type || 'string',
name: data.name || nextDerivedName(),
expr: data.expr || '',
include: data.include !== undefined ? data.include : true,
el: container
};
container.querySelector('.d-type').value = obj.type;
container.querySelector('.d-name').value = obj.name;
container.querySelector('.d-expr').value = obj.expr;
container.querySelector('.d-use').checked = obj.include;
['change','input'].forEach(evt => {
container.addEventListener(evt, refreshDerivedColumns);
});
derivedColumns.push(obj);
refreshDerivedColumns();
}
function removeDerived(btn) {
const el = btn.closest('.derived');
const idx = derivedColumns.findIndex(d => d.el === el);
if (idx !== -1) {
derivedColumns.splice(idx, 1);
}
el.remove();
refreshDerivedColumns();
}
function refreshDerivedColumns() {
allColumns.splice(0, allColumns.length, ...baseColumns);
stringColumns.splice(0, stringColumns.length, ...baseStringColumns);
integerColumns.splice(0, integerColumns.length, ...baseIntegerColumns);
timeColumns.splice(0, timeColumns.length, ...baseTimeColumns);
derivedColumns.forEach(d => {
d.type = d.el.querySelector('.d-type').value;
d.name = d.el.querySelector('.d-name').value;
d.expr = d.el.querySelector('.d-expr').value;
d.include = d.el.querySelector('.d-use').checked;
allColumns.push(d.name);
columnTypes[d.name] = d.type;
if (d.type === 'string') {
stringColumns.push(d.name);
} else {
integerColumns.push(d.name);
}
});
updateSelectedColumns();
}
let lastQueryTime = 0;
let queryStart = 0;
I need to refresh timeColumnOptions
for derived columns that may be numeric or time.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> allColumns.splice(0, allColumns.length, ...baseColumns);
> stringColumns.splice(0, stringColumns.length, ...baseStringColumns);
> integerColumns.splice(0, integerColumns.length, ...baseIntegerColumns);
> timeColumns.splice(0, timeColumns.length, ...baseTimeColumns);
> + timeColumnOptions.splice(0, timeColumnOptions.length, ...baseTimeColumnOpti
ons);
> derivedColumns.forEach(d => {
> d.type = d.el.querySelector('.d-type').value;
> d.name = d.el.querySelector('.d-name').value;
> d.expr = d.el.querySelector('.d-expr').value;
> d.include = d.el.querySelector('.d-use').checked;
> allColumns.push(d.name);
> columnTypes[d.name] = d.type;
> if (d.type === 'string') {
> stringColumns.push(d.name);
> } else {
> integerColumns.push(d.name);
> + timeColumnOptions.push(d.name);
> }
> });
> updateSelectedColumns();
> }
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
I think I'll search for 'isTimeColumn' next to tweak updateSelectedColumns.
root@29cb09005028:/workspace/scubaduck# grep -n "isTimeColumn" -n scubaduck/stat
ic/index.html
490: if (isTimeColumn(cb.value) || isStringColumn(cb.value)) {
739:function isTimeColumn(name) {
1157: if (isTimeColumn(col)) {
I'll start by reviewing the code near line 734.
root@29cb09005028:/workspace/scubaduck# sed -n '730,750p' scubaduck/static/index
.html
const t = (columnTypes[name] || '').toUpperCase();
return t.includes('CHAR') || t.includes('STRING') || t.includes('VARCHAR');
}
function isIntegerColumn(name) {
const t = (columnTypes[name] || '').toUpperCase();
return t.includes('INT');
}
function isTimeColumn(name) {
const t = (columnTypes[name] || '').toUpperCase();
return t.includes('TIMESTAMP');
}
function formatNumber(val) {
if (typeof val !== 'number') val = Number(val);
if (Number.isNaN(val)) return '';
if (val === 0) return '0';
const abs = Math.abs(val);
if (abs > 999.999) {
const units = [
I'll update the isTimeColumn function to check the time and compare with selected columns.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> function isTimeColumn(name) {
> - const t = (columnTypes[name] || '').toUpperCase();
> - return t.includes('TIMESTAMP');
> + const t = (columnTypes[name] || '').toUpperCase();
> + if (t.includes('TIMESTAMP') || t.includes('DATE') || t.includes('TIME')) re
turn true;
> + const sel = document.getElementById('time_column').value;
> + const xsel = document.getElementById('x_axis').value;
> + if (name === sel || name === xsel) return true;
> + return false;
> }
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
Next, I'll update the showResults function to support numeric time columns.
root@29cb09005028:/workspace/scubaduck# grep -n "showResults" -n scubaduck/stati
c/index.html
940: showResults(data);
1221:function showResults(data) {
ex.html
}
renderTable(rows);
}
function showResults(data) {
window.lastResults = data;
const view = document.getElementById('view');
if (graphTypeSel.value === 'timeseries') {
showTimeSeries(data);
} else {
if (data.rows.length === 0) {
view.innerHTML =
'<p id="empty-message">Empty data provided to table</p><table id="result
s"></table>';
} else {
view.innerHTML = '<table id="results"></table>';
}
originalRows = data.rows.slice();
sortState = {index: null, dir: null};
renderTable(originalRows);
}
const sqlEl = document.createElement('pre');
sqlEl.id = 'sql_query';
sqlEl.style.whiteSpace = 'pre-wrap';
sqlEl.style.marginTop = '10px';
sqlEl.textContent = data.sql;
view.appendChild(sqlEl);
document.getElementById('query_info').textContent =
`Your query took about ${lastQueryTime} ms`;
}
function showError(err) {
window.lastResults = err;
I’ll look into the renderTable function for rendering details.
root@29cb09005028:/workspace/scubaduck# grep -n "renderTable" -n scubaduck/stati
c/index.html
1114:function renderTable(rows) {
1217: renderTable(rows);
1235: renderTable(originalRows);
root@29cb09005028:/workspace/scubaduck# sed -n '1114,1168p' scubaduck/static/ind
ex.html
function renderTable(rows) {
const table = document.getElementById('results');
table.innerHTML = '';
if (rows.length === 0) return;
let hitsIndex = selectedColumns.indexOf('Hits');
let totalHits = 0;
if (hitsIndex !== -1) {
totalHits = rows.reduce((s, r) => s + Number(r[hitsIndex]), 0);
}
const header = document.createElement('tr');
selectedColumns.forEach((col, i) => {
const th = document.createElement('th');
let label = col;
if (
displayType === 'table' &&
col !== 'Hits' &&
!(groupBy.chips || []).includes(col)
) {
const agg = document.getElementById('aggregate').value.toLowerCase();
label += ` (${agg})`;
}
th.textContent = label;
th.dataset.index = i;
th.addEventListener('click', handleSort);
if (sortState.index === i) {
th.classList.add('sorted');
th.textContent =
label + (sortState.dir === 'desc' ? ' \u25BC' : ' \u25B2');
}
if (!isStringColumn(col)) th.style.textAlign = 'right';
header.appendChild(th);
});
table.appendChild(header);
rows.forEach(row => {
const tr = document.createElement('tr');
tr.addEventListener('click', () => {
const wasSelected = tr.classList.contains('selected');
document
.querySelectorAll('#results tr.selected')
.forEach(el => el.classList.remove('selected'));
if (!wasSelected) {
tr.classList.add('selected');
}
});
row.forEach((v, i) => {
const col = selectedColumns[i];
const td = document.createElement('td');
if (isTimeColumn(col)) {
const d = new Date(v);
td.textContent = d.toLocaleString('en-US', {
weekday: 'short',
month: 'short',
day: 'numeric',
year: 'numeric',
hour: 'numeric',
root@29cb09005028:/workspace/scubaduck# sed -n '1168,1198p' scubaduck/static/ind
ex.html
hour: 'numeric',
minute: 'numeric',
second: 'numeric',
hour12: true,
timeZoneName: 'short'
});
} else {
if (col === 'Hits') {
const pct = totalHits ? ((v / totalHits) * 100).toFixed(1) : '0';
td.textContent = `${formatNumber(v)} (${pct}%)`;
} else {
td.textContent = isStringColumn(col) ? v : formatNumber(v);
}
}
td.style.textAlign = isStringColumn(col) ? 'left' : 'right';
tr.appendChild(td);
});
table.appendChild(tr);
});
// ensure table does not overflow unless necessary
const view = document.getElementById('view');
if (table.scrollWidth <= view.clientWidth) {
table.style.width = '100%';
}
}
function handleSort(e) {
const idx = parseInt(e.target.dataset.index, 10);
if (sortState.index !== idx) {
sortState.index = idx;
sortState.dir = 'asc';
I'll modify how rows render when they are time-related and numeric.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> - if (isTimeColumn(col)) {
> - const d = new Date(v);
> + if (isTimeColumn(col)) {
> + let d;
> + const t = (columnTypes[col] || '').toUpperCase();
> + if (t.includes('TIMESTAMP') || t.includes('DATE') || t.includes('TIME
')) {
> + d = new Date(v);
> + } else {
> + d = new Date(Number(v) * 1000);
> + }
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
I’ll check around line 1170 to make sure the rest of the code is intact.
root@29cb09005028:/workspace/scubaduck# sed -n '1164,1198p' scubaduck/static/ind
ex.html
if (t.includes('TIMESTAMP') || t.includes('DATE') || t.includes('TIME'))
{
d = new Date(v);
} else {
d = new Date(Number(v) * 1000);
}
td.textContent = d.toLocaleString('en-US', {
weekday: 'short',
month: 'short',
day: 'numeric',
year: 'numeric',
hour: 'numeric',
minute: 'numeric',
second: 'numeric',
hour12: true,
timeZoneName: 'short'
});
} else {
if (col === 'Hits') {
const pct = totalHits ? ((v / totalHits) * 100).toFixed(1) : '0';
td.textContent = `${formatNumber(v)} (${pct}%)`;
} else {
td.textContent = isStringColumn(col) ? v : formatNumber(v);
}
}
td.style.textAlign = isStringColumn(col) ? 'left' : 'right';
tr.appendChild(td);
});
table.appendChild(tr);
});
// ensure table does not overflow unless necessary
const view = document.getElementById('view');
if (table.scrollWidth <= view.clientWidth) {
table.style.width = '100%';
}
}
I'll modify collectParams to add the time_column, and then search for it.
root@29cb09005028:/workspace/scubaduck# grep -n "collectParams" -n scubaduck/sta
tic/index.html
918: const params = collectParams();
947:function collectParams() {
root@29cb09005028:/workspace/scubaduck# sed -n '947,998p' scubaduck/static/index
.html
function collectParams() {
updateSelectedColumns();
const payload = {
table: document.getElementById('table').value,
start: document.getElementById('start').value,
end: document.getElementById('end').value,
order_by: document.getElementById('order_by').value,
order_dir: orderDir,
limit: parseInt(document.getElementById('limit').value, 10),
columns: selectedColumns.filter(c =>
c !== 'Hits' && !derivedColumns.some(dc => dc.name === c)
),
graph_type: graphTypeSel.value,
filters: Array.from(document.querySelectorAll('#filters .filter')).map(f =>
{
const chips = f.chips || [];
const op = f.querySelector('.f-op').value;
let value = null;
if (op !== 'empty' && op !== '!empty') {
value = chips.length === 0 ? null : (chips.length === 1 ? chips[0] : chi
ps);
}
const colSel = f.querySelector('.f-col select') || f.querySelector('.f-col
');
return {column: colSel.value, op, value};
}),
derived_columns: Array.from(document.querySelectorAll('#derived_list .derive
d')).map(d => ({
type: d.querySelector('.d-type').value,
name: d.querySelector('.d-name').value,
expr: d.querySelector('.d-expr').value,
include: d.querySelector('.d-use').checked,
}))
};
if (graphTypeSel.value === 'table' || graphTypeSel.value === 'timeseries') {
payload.group_by = groupBy.chips || [];
payload.aggregate = document.getElementById('aggregate').value;
payload.show_hits = document.getElementById('show_hits').checked;
}
if (graphTypeSel.value === 'timeseries') {
payload.x_axis = document.getElementById('x_axis').value;
payload.granularity = document.getElementById('granularity').value;
payload.fill = document.getElementById('fill').value;
}
return payload;
}
function paramsToSearch(params) {
const sp = new URLSearchParams();
if (params.table) sp.set('table', params.table);
if (params.start) sp.set('start', params.start);
if (params.end) sp.set('end', params.end);
if (params.order_by) sp.set('order_by', params.order_by);
if (params.order_dir) sp.set('order_dir', params.order_dir);
if (params.limit !== null && params.limit !== undefined) sp.set('limit', param
s.limit);
if (params.columns && params.columns.length) sp.set('columns', params.columns.
join(','));
root@29cb09005028:/workspace/scubaduck# sed -n '998,1050p' scubaduck/static/inde
x.html
if (params.columns && params.columns.length) sp.set('columns', params.columns.
join(','));
if (params.filters && params.filters.length) sp.set('filters', JSON.stringify(
params.filters));
if (params.derived_columns && params.derived_columns.length) sp.set('derived_c
olumns', JSON.stringify(params.derived_columns));
if (params.graph_type) sp.set('graph_type', params.graph_type);
if (params.graph_type === 'table' || params.graph_type === 'timeseries') {
if (params.group_by && params.group_by.length) sp.set('group_by', params.gro
up_by.join(','));
if (params.aggregate) sp.set('aggregate', params.aggregate);
if (params.show_hits) sp.set('show_hits', '1');
}
if (params.graph_type === 'timeseries') {
if (params.x_axis) sp.set('x_axis', params.x_axis);
if (params.granularity) sp.set('granularity', params.granularity);
if (params.fill) sp.set('fill', params.fill);
}
const qs = sp.toString();
return qs ? '?' + qs : '';
}
function applyParams(params) {
if (params.table) document.getElementById('table').value = params.table;
document.getElementById('start').value = params.start || '';
document.getElementById('end').value = params.end || '';
if (params.order_by) {
document.getElementById('order_by').value = params.order_by;
}
orderDir = params.order_dir || 'ASC';
updateOrderDirButton();
if (params.limit !== undefined && params.limit !== null) {
document.getElementById('limit').value = params.limit;
}
graphTypeSel.value = params.graph_type || 'samples';
updateDisplayTypeUI();
if (params.x_axis) document.getElementById('x_axis').value = params.x_axis;
if (params.granularity) document.getElementById('granularity').value = params.
granularity;
if (params.fill) document.getElementById('fill').value = params.fill;
if (params.group_by) {
groupBy.chips.splice(0, groupBy.chips.length, ...params.group_by);
groupBy.renderChips();
}
if (params.aggregate) document.getElementById('aggregate').value = params.aggr
egate;
document.getElementById('show_hits').checked = params.show_hits ?? true;
document.querySelectorAll('#column_groups input').forEach(cb => {
cb.checked = !params.columns || params.columns.includes(cb.value);
});
updateSelectedColumns();
const dlist = document.getElementById('derived_list');
dlist.innerHTML = '';
derivedColumns.splice(0, derivedColumns.length);
if (params.derived_columns && params.derived_columns.length) {
params.derived_columns.forEach(dc => addDerived(dc));
}
refreshDerivedColumns();
const list = document.getElementById('filter_list');
root@29cb09005028:/workspace/scubaduck# sed -n '1050,1120p' scubaduck/static/ind
ex.html
const list = document.getElementById('filter_list');
list.innerHTML = '';
if (params.filters && params.filters.length) {
params.filters.forEach(f => {
addFilter();
const el = list.lastElementChild;
const colSel = el.querySelector('.f-col select') || el.querySelector('.f-c
ol');
colSel.value = f.column;
colSel.dispatchEvent(new Event('change'));
el.querySelector('.f-op').value = f.op;
el.querySelector('.f-op').dispatchEvent(new Event('change'));
if (f.value !== null && f.op !== 'empty' && f.op !== '!empty') {
const values = Array.isArray(f.value) ? f.value : [f.value];
values.forEach(v => el.addChip(v));
el.renderChips();
}
});
} else {
addFilter();
}
}
function parseSearch() {
const sp = new URLSearchParams(window.location.search);
const params = {};
if (sp.has('table')) params.table = sp.get('table');
if (sp.has('start')) params.start = sp.get('start');
if (sp.has('end')) params.end = sp.get('end');
if (sp.has('order_by')) params.order_by = sp.get('order_by');
if (sp.has('order_dir')) params.order_dir = sp.get('order_dir');
if (sp.has('limit')) params.limit = parseInt(sp.get('limit'), 10);
if (sp.has('columns')) params.columns = sp.get('columns').split(',').filter(c
=> c);
if (sp.has('filters')) {
try { params.filters = JSON.parse(sp.get('filters')); } catch(e) { params.fi
lters = []; }
}
if (sp.has('graph_type')) params.graph_type = sp.get('graph_type');
if (sp.has('group_by')) params.group_by = sp.get('group_by').split(',').filter
(c => c);
if (sp.has('aggregate')) params.aggregate = sp.get('aggregate');
if (sp.has('show_hits')) params.show_hits = sp.get('show_hits') === '1';
if (sp.has('x_axis')) params.x_axis = sp.get('x_axis');
if (sp.has('granularity')) params.granularity = sp.get('granularity');
if (sp.has('fill')) params.fill = sp.get('fill');
if (sp.has('derived_columns')) {
try { params.derived_columns = JSON.parse(sp.get('derived_columns')); } catc
h(e) { params.derived_columns = []; }
}
return params;
}
function initFromUrl() {
const params = parseSearch();
history.replaceState(params, '', paramsToSearch(params));
applyParams(params);
dive(false);
}
window.addEventListener('popstate', e => {
const params = e.state || parseSearch();
applyParams(params);
dive(false);
});
let originalRows = [];
let sortState = {index: null, dir: null};
function renderTable(rows) {
const table = document.getElementById('results');
table.innerHTML = '';
if (rows.length === 0) return;
let hitsIndex = selectedColumns.indexOf('Hits');
let totalHits = 0;
if (hitsIndex !== -1) {
I'll update collectParams to set time_column
and adjust related functions.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> table: document.getElementById('table').value,
> + time_column: document.getElementById('time_column').value,
> start: document.getElementById('start').value,
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
I'll add the necessary values to the paramsToSearch list now.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> if (params.table) sp.set('table', params.table);
> + if (params.time_column) sp.set('time_column', params.time_column);
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
I'll adjust applyParams to properly handle setting the field.
root@29cb09005028:/workspace/scubaduck# grep -n "time_column" -n scubaduck/stati
c/index.html
188: <select id="time_column"></select>
511: const timeColumnSelect = document.getElementById('time_column');
742: const sel = document.getElementById('time_column').value;
951: time_column: document.getElementById('time_column').value,
994: if (params.time_column) sp.set('time_column', params.time_column);
I need to fix the parseSearch
and applyParams
functions.
root@29cb09005028:/workspace/scubaduck# grep -n "parseSearch" -n scubaduck/stati
c/index.html
657: const table = parseSearch().table || tables[0];
668: applyParams(parseSearch());
1074:function parseSearch() {
1101: const params = parseSearch();
1108: const params = e.state || parseSearch();
root@29cb09005028:/workspace/scubaduck# sed -n '1074,1098p' scubaduck/static/ind
ex.html
function parseSearch() {
const sp = new URLSearchParams(window.location.search);
const params = {};
if (sp.has('table')) params.table = sp.get('table');
if (sp.has('start')) params.start = sp.get('start');
if (sp.has('end')) params.end = sp.get('end');
if (sp.has('order_by')) params.order_by = sp.get('order_by');
if (sp.has('order_dir')) params.order_dir = sp.get('order_dir');
if (sp.has('limit')) params.limit = parseInt(sp.get('limit'), 10);
if (sp.has('columns')) params.columns = sp.get('columns').split(',').filter(c
=> c);
if (sp.has('filters')) {
try { params.filters = JSON.parse(sp.get('filters')); } catch(e) { params.fi
lters = []; }
}
if (sp.has('graph_type')) params.graph_type = sp.get('graph_type');
if (sp.has('group_by')) params.group_by = sp.get('group_by').split(',').filter
(c => c);
if (sp.has('aggregate')) params.aggregate = sp.get('aggregate');
if (sp.has('show_hits')) params.show_hits = sp.get('show_hits') === '1';
if (sp.has('x_axis')) params.x_axis = sp.get('x_axis');
if (sp.has('granularity')) params.granularity = sp.get('granularity');
if (sp.has('fill')) params.fill = sp.get('fill');
if (sp.has('derived_columns')) {
try { params.derived_columns = JSON.parse(sp.get('derived_columns')); } catc
h(e) { params.derived_columns = []; }
}
return params;
}
I'll include the time_column in both parseSearch and applyParams.
root@29cb09005028:/workspace/scubaduck# sed -n '1120,1160p' scubaduck/static/ind
ex.html
let hitsIndex = selectedColumns.indexOf('Hits');
let totalHits = 0;
if (hitsIndex !== -1) {
totalHits = rows.reduce((s, r) => s + Number(r[hitsIndex]), 0);
}
const header = document.createElement('tr');
selectedColumns.forEach((col, i) => {
const th = document.createElement('th');
let label = col;
if (
displayType === 'table' &&
col !== 'Hits' &&
!(groupBy.chips || []).includes(col)
) {
const agg = document.getElementById('aggregate').value.toLowerCase();
label += ` (${agg})`;
}
th.textContent = label;
th.dataset.index = i;
th.addEventListener('click', handleSort);
if (sortState.index === i) {
th.classList.add('sorted');
th.textContent =
label + (sortState.dir === 'desc' ? ' \u25BC' : ' \u25B2');
}
if (!isStringColumn(col)) th.style.textAlign = 'right';
header.appendChild(th);
});
table.appendChild(header);
rows.forEach(row => {
const tr = document.createElement('tr');
tr.addEventListener('click', () => {
const wasSelected = tr.classList.contains('selected');
document
.querySelectorAll('#results tr.selected')
.forEach(el => el.classList.remove('selected'));
if (!wasSelected) {
tr.classList.add('selected');
}
});
row.forEach((v, i) => {
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> if (sp.has('table')) params.table = sp.get('table');
> + if (sp.has('time_column')) params.time_column = sp.get('time_column');
> @@
> if (params.table) document.getElementById('table').value = params.table;
> + document.getElementById('time_column').value = params.time_column || defaul
tTimeColumn;
> *** End Patch
> PATCH
Failed to find expected lines [" if (params.table) document.getElementById('tab
le').value = params.table;"] in scubaduck/static/index.html
The patch failed due to context, so I'll patch it step by step, starting with parseSearch.
root@29cb09005028:/workspace/scubaduck# sed -n '1088,1108p' scubaduck/static/ind
ex.html
if (sp.has('group_by')) params.group_by = sp.get('group_by').split(',').filter
(c => c);
if (sp.has('aggregate')) params.aggregate = sp.get('aggregate');
if (sp.has('show_hits')) params.show_hits = sp.get('show_hits') === '1';
if (sp.has('x_axis')) params.x_axis = sp.get('x_axis');
if (sp.has('granularity')) params.granularity = sp.get('granularity');
if (sp.has('fill')) params.fill = sp.get('fill');
if (sp.has('derived_columns')) {
try { params.derived_columns = JSON.parse(sp.get('derived_columns')); } catc
h(e) { params.derived_columns = []; }
}
return params;
}
function initFromUrl() {
const params = parseSearch();
history.replaceState(params, '', paramsToSearch(params));
applyParams(params);
dive(false);
}
window.addEventListener('popstate', e => {
const params = e.state || parseSearch();
I'll adjust the code by modifying lines before 'if (sp.has('table'))...' to fix it.
root@29cb09005028:/workspace/scubaduck# sed -n '1078,1092p' scubaduck/static/ind
ex.html
if (sp.has('start')) params.start = sp.get('start');
if (sp.has('end')) params.end = sp.get('end');
if (sp.has('order_by')) params.order_by = sp.get('order_by');
if (sp.has('order_dir')) params.order_dir = sp.get('order_dir');
if (sp.has('limit')) params.limit = parseInt(sp.get('limit'), 10);
if (sp.has('columns')) params.columns = sp.get('columns').split(',').filter(c
=> c);
if (sp.has('filters')) {
try { params.filters = JSON.parse(sp.get('filters')); } catch(e) { params.fi
lters = []; }
}
if (sp.has('graph_type')) params.graph_type = sp.get('graph_type');
if (sp.has('group_by')) params.group_by = sp.get('group_by').split(',').filter
(c => c);
if (sp.has('aggregate')) params.aggregate = sp.get('aggregate');
if (sp.has('show_hits')) params.show_hits = sp.get('show_hits') === '1';
if (sp.has('x_axis')) params.x_axis = sp.get('x_axis');
if (sp.has('granularity')) params.granularity = sp.get('granularity');
I'll add the necessary changes after the "table" section.
root@29cb09005028:/workspace/scubaduck# sed -n '1074,1080p' scubaduck/static/ind
ex.html
function parseSearch() {
const sp = new URLSearchParams(window.location.search);
const params = {};
if (sp.has('table')) params.table = sp.get('table');
if (sp.has('start')) params.start = sp.get('start');
if (sp.has('end')) params.end = sp.get('end');
if (sp.has('order_by')) params.order_by = sp.get('order_by');
I'm starting the patch now.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> const params = {};
> if (sp.has('table')) params.table = sp.get('table');
> + if (sp.has('time_column')) params.time_column = sp.get('time_column');
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
Time_column needs to be set in the applyParams function. I'll make that change now.
root@29cb09005028:/workspace/scubaduck# grep -n "applyParams" -n scubaduck/stati
c/index.html
668: applyParams(parseSearch());
1018:function applyParams(params) {
1104: applyParams(params);
1110: applyParams(params);
root@29cb09005028:/workspace/scubaduck# sed -n '1018,1048p' scubaduck/static/ind
ex.html
function applyParams(params) {
if (params.table) document.getElementById('table').value = params.table;
document.getElementById('start').value = params.start || '';
document.getElementById('end').value = params.end || '';
if (params.order_by) {
document.getElementById('order_by').value = params.order_by;
}
orderDir = params.order_dir || 'ASC';
updateOrderDirButton();
if (params.limit !== undefined && params.limit !== null) {
document.getElementById('limit').value = params.limit;
}
graphTypeSel.value = params.graph_type || 'samples';
updateDisplayTypeUI();
if (params.x_axis) document.getElementById('x_axis').value = params.x_axis;
if (params.granularity) document.getElementById('granularity').value = params.
granularity;
if (params.fill) document.getElementById('fill').value = params.fill;
if (params.group_by) {
groupBy.chips.splice(0, groupBy.chips.length, ...params.group_by);
groupBy.renderChips();
}
if (params.aggregate) document.getElementById('aggregate').value = params.aggr
egate;
document.getElementById('show_hits').checked = params.show_hits ?? true;
document.querySelectorAll('#column_groups input').forEach(cb => {
cb.checked = !params.columns || params.columns.includes(cb.value);
});
updateSelectedColumns();
const dlist = document.getElementById('derived_list');
dlist.innerHTML = '';
derivedColumns.splice(0, derivedColumns.length);
if (params.derived_columns && params.derived_columns.length) {
I’ll proceed from line 1048 onwards.
root@29cb09005028:/workspace/scubaduck# sed -n '1048,1074p' scubaduck/static/ind
ex.html
if (params.derived_columns && params.derived_columns.length) {
params.derived_columns.forEach(dc => addDerived(dc));
}
refreshDerivedColumns();
const list = document.getElementById('filter_list');
list.innerHTML = '';
if (params.filters && params.filters.length) {
params.filters.forEach(f => {
addFilter();
const el = list.lastElementChild;
const colSel = el.querySelector('.f-col select') || el.querySelector('.f-c
ol');
colSel.value = f.column;
colSel.dispatchEvent(new Event('change'));
el.querySelector('.f-op').value = f.op;
el.querySelector('.f-op').dispatchEvent(new Event('change'));
if (f.value !== null && f.op !== 'empty' && f.op !== '!empty') {
const values = Array.isArray(f.value) ? f.value : [f.value];
values.forEach(v => el.addChip(v));
el.renderChips();
}
});
} else {
addFilter();
}
}
function parseSearch() {
I'll patch after the line 'if (params.table) ...' to set time_column.
root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/static/index.html
> @@
> function applyParams(params) {
> if (params.table) document.getElementById('table').value = params.table;
> + document.getElementById('time_column').value = params.time_column || defaul
tTimeColumn;
> document.getElementById('start').value = params.start || '';
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/static/index.html
I’m organizing changes in params functions, setting default columns, and ensuring correct time column behavior.
root@29cb09005028:/workspace/scubaduck# source .venv/bin/activate
(scubaduck) root@29cb09005028:/workspace/scubaduck# ruff format scubaduck/server
.py scubaduck/static/index.html > /tmp/format.log && tail -n 20 /tmp/format.log
error: Failed to parse scubaduck/static/index.html:1:1: Expected a statement
(scubaduck) root@29cb09005028:/workspace/scubaduck#
Ruff format doesn't parse HTML, so I'll stick to Python files only.
(scubaduck) root@29cb09005028:/workspace/scubaduck# ruff format scubaduck/server
.py > /tmp/format.log && cat /tmp/format.log
1 file left unchanged
(scubaduck) root@29cb09005028:/workspace/scubaduck# ruff check scubaduck > /tmp/
ruff.log && tail -n 20 /tmp/ruff.log
All checks passed!
(scubaduck) root@29cb09005028:/workspace/scubaduck# pyright > /tmp/pyright.log &
& tail -n 20 /tmp/pyright.log
(scubaduck) root@29cb09005028:/workspace/scubaduck# cat /tmp/pyright.log
/workspace/scubaduck/scubaduck/server.py
/workspace/scubaduck/scubaduck/server.py:483:16 - error: Condition will always
evaluate to False since the types "str" and "None" have no overlap (reportUnnec
essaryComparison)
1 error, 0 warnings, 0 informations
(scubaduck) root@29cb09005028:/workspace/scubaduck#
Looks like there's a Pyright error at line 483 in server.py. I'll inspect it.
(scubaduck) root@29cb09005028:/workspace/scubaduck# sed -n '470,495p' scubaduck/
server.py
jsonify(
{
"error": "group_by, aggregate and show_hits are only val
id for table or timeseries view"
}
),
400,
)
valid_cols = set(column_types.keys())
valid_cols.update(params.derived_columns.keys())
if params.graph_type == "timeseries":
if params.x_axis is None:
params.x_axis = params.time_column
if params.x_axis is None or params.x_axis not in valid_cols:
return jsonify({"error": "Invalid x_axis"}), 400
ctype = column_types.get(params.x_axis, "").upper()
is_time = any(t in ctype for t in ["TIMESTAMP", "DATE", "TIME"])
is_numeric = any(
t in ctype
for t in [
"INT",
"DECIMAL",
"REAL",
"DOUBLE",
"FLOAT",
"NUMERIC",
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I need to open the lines around line 483.
(scubaduck) root@29cb09005028:/workspace/scubaduck# sed -n '492,506p' scubaduck/
server.py
"REAL",
"DOUBLE",
"FLOAT",
"NUMERIC",
"HUGEINT",
]
)
if not (is_time or is_numeric):
return jsonify({"error": "x_axis must be a time column"}), 400
for col in params.columns:
if col not in valid_cols:
return jsonify({"error": f"Unknown column: {col}"}), 400
for col in params.group_by:
if col not in valid_cols:
return jsonify({"error": f"Unknown column: {col}"}), 400
(scubaduck) root@29cb09005028:/workspace/scubaduck#
The first condition is redundant, so I'll patch it to improve the check logic.
(scubaduck) root@29cb09005028:/workspace/scubaduck# apply_patch<<'PATCH'
> *** Begin Patch
> *** Update File: scubaduck/server.py
> @@
> - if params.x_axis is None:
> - params.x_axis = params.time_column
> - if params.x_axis is None or params.x_axis not in valid_cols:
> + if params.x_axis is None:
> + params.x_axis = params.time_column
> + if params.x_axis not in valid_cols:
> return jsonify({"error": "Invalid x_axis"}), 400
> *** End Patch
> PATCH
Success. Updated the following files:
M scubaduck/server.py
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll rerun pyright for another check.
(scubaduck) root@29cb09005028:/workspace/scubaduck# pyright > /tmp/pyright.log &
& tail -n 20 /tmp/pyright.log
0 errors, 0 warnings, 0 informations
(scubaduck) root@29cb09005028:/workspace/scubaduck# cat /tmp/pyright.log
0 errors, 0 warnings, 0 informations
(scubaduck) root@29cb09005028:/workspace/scubaduck#
Alright, time to run the tests and see how things are holding up.
(scubaduck) root@29cb09005028:/workspace/scubaduck# pytest -q > /tmp/pytest.log
&& tail -n 20 /tmp/pytest.log
bringing up nodes...
bringing up nodes...
........................................................................ [ 85%]
............ [100%]
84 passed in 13.63s
(scubaduck) root@29cb09005028:/workspace/scubaduck# cat /tmp/pytest.log
bringing up nodes...
bringing up nodes...
........................................................................ [ 85%]
............ [100%]
84 passed in 13.63s
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I’ll check the git status now that all 84 tests have passed.
(scubaduck) root@29cb09005028:/workspace/scubaduck# git status --short
M scubaduck/server.py
M scubaduck/static/index.html
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I need to add the missing test for the time column in test_server.py.
(scubaduck) root@29cb09005028:/workspace/scubaduck# grep -n "test_sqlite_bigint"
-n tests/test_server.py
265:def test_sqlite_bigint(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> N
one:
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll pull through with a patch once the test is done!
return getattr(self.con, name)
return Wrapper(real)
monkeypatch.setattr(server.duckdb, "connect", failing_connect)
app = server.create_app(sqlite_file)
client = app.test_client()
payload = {
"table": "events",
"order_by": "timestamp",
"columns": ["timestamp", "value"],
}
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
data = rv.get_json()
assert rv.status_code == 200
assert data["rows"][0][1] == big_value
def test_envvar_db(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
csv_file = tmp_path / "custom.csv"
csv_file.write_text("timestamp,event,value,user\n2024-01-01 00:00:00,login,5
,bob\n")
monkeypatch.setenv("SCUBADUCK_DB", str(csv_file))
app = server.create_app()
client = app.test_client()
payload = _make_payload()
rv = client.post(
"/api/query", data=json.dumps(payload), content_type="application/json"
)
rows = rv.get_json()["rows"]
assert len(rows) == 1
def test_envvar_db_missing(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> N
one:
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll add a test that comes after the existing test_sqlite_bigint.
> @@
> def test_sqlite_bigint(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> No
ne:
> @@
> assert data["rows"][0][1] == big_value
> +
> +
> +def test_integer_time_column(tmp_path: Path) -> None:
> + csv_file = tmp_path / "events.csv"
> + csv_file.write_text("created,event\n1704067200,login\n1704070800,logout\n
")
> + app = server.create_app(csv_file)
> + client = app.test_client()
> + payload = {
> + "table": "events",
> + "start": "2024-01-01 00:00:00",
> + "end": "2024-01-01 01:00:00",
> + "order_by": "created",
> + "columns": ["created", "event"],
> + "time_column": "created",
> + }
> + rv = client.post(
> + "/api/query", data=json.dumps(payload), content_type="application/jso
n"
> + )
> + data = rv.get_json()
> + assert rv.status_code == 200
> + assert len(data["rows"]) == 2
> *** End Patch
> PATCH
Success. Updated the following files:
M tests/test_server.py
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll write a test for the dropdown component to ensure it's working correctly.
(scubaduck) root@29cb09005028:/workspace/scubaduck# grep -n "time_column" -n tes
ts/test_web.py
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll add the test for the time column near line 20, after run_query.
(scubaduck) root@29cb09005028:/workspace/scubaduck# sed -n '30,70p' tests/test_w
eb.py
page.fill("#start", start)
if end is not None:
page.fill("#end", end)
if order_by is not None:
select_value(page, "#order_by", order_by)
if order_dir is not None and order_dir == "DESC":
page.click("#order_dir")
if limit is not None:
page.fill("#limit", str(limit))
if group_by is not None:
select_value(page, "#graph_type", "table")
page.evaluate(
"g => { groupBy.chips = g; groupBy.renderChips(); }",
group_by,
)
if aggregate is not None:
select_value(page, "#graph_type", "table")
select_value(page, "#aggregate", aggregate)
page.evaluate("window.lastResults = undefined")
page.click("text=Dive")
page.wait_for_function("window.lastResults !== undefined")
return page.evaluate("window.lastResults")
def test_range_filters(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="2024-01-02 00:00:00",
end="2024-01-02 04:00:00",
order_by="timestamp",
limit=100,
)
assert len(data["rows"]) == 2
from dateutil import parser
timestamps = [parser.parse(row[0]).replace(tzinfo=None) for row in data["row
s"]]
assert timestamps == [
parser.parse("2024-01-02 00:00:00"),
parser.parse("2024-01-02 03:00:00"),
]
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll add a new test after test_limit, so I'll search for 'test_limit'.
(scubaduck) root@29cb09005028:/workspace/scubaduck# grep -n "test_limit" -n test
s/test_web.py
87:def test_limit(page: Any, server_url: str) -> None:
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll insert a test around line 100 after completing this.
order_dir="DESC",
limit=100,
)
values = [row[2] for row in data["rows"]]
assert values == sorted(values, reverse=True)
def test_limit(page: Any, server_url: str) -> None:
data = run_query(
page,
server_url,
start="2024-01-01 00:00:00",
end="2024-01-03 00:00:00",
order_by="timestamp",
limit=2,
)
assert len(data["rows"]) == 2
def test_simple_filter(page: Any, server_url: str) -> None:
page.goto(server_url)
page.wait_for_selector("#order_by option", state="attached")
page.click("text=Add Filter")
filter_el = page.query_selector("#filters .filter:last-child")
assert filter_el
page.evaluate(
"arg => setSelectValue(arg.el.querySelector('.f-col'), arg.val)",
{"el": filter_el, "val": "user"},
)
val_input = filter_el.query_selector(".f-val")
val_input.click()
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I’ll place the new test right after test_limit.
> *** Begin Patch
> *** Update File: tests/test_web.py
> @@
> def test_limit(page: Any, server_url: str) -> None:
> @@
> assert len(data["rows"]) == 2
> +
> +
> +def test_time_column_dropdown(page: Any, server_url: str) -> None:
> + page.goto(server_url)
> + page.wait_for_selector("#time_column option", state="attached")
> + options = page.locator("#time_column option").all_inner_texts()
> + assert "timestamp" in options
> + assert "value" in options
> + assert page.input_value("#time_column") == "timestamp"
> *** End Patch
> PATCH
Success. Updated the following files:
M tests/test_web.py
(scubaduck) root@29cb09005028:/workspace/scubaduck#
Time to run the ruff check and pyright again to verify everything.
(scubaduck) root@29cb09005028:/workspace/scubaduck# ruff check scubaduck tests >
/tmp/ruff.log && tail -n 20 /tmp/ruff.log
All checks passed!
(scubaduck) root@29cb09005028:/workspace/scubaduck# pyright > /tmp/pyright.log &
& tail -n 20 /tmp/pyright.log
0 errors, 0 warnings, 0 informations
(scubaduck) root@29cb09005028:/workspace/scubaduck# pytest -q > /tmp/pytest.log
&& tail -n 20 /tmp/pytest.log
bringing up nodes...
........................................................................ [ 83%]
.............. [100%]
86 passed in 12.91s
(scubaduck) root@29cb09005028:/workspace/scubaduck# cat /tmp/pytest.log
bringing up nodes...
bringing up nodes...
........................................................................ [ 83%]
.............. [100%]
86 passed in 12.91s
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll verify the status to make sure the patch is ready for commit.
(scubaduck) root@29cb09005028:/workspace/scubaduck# git status --short
M scubaduck/server.py
M scubaduck/static/index.html
M tests/test_server.py
M tests/test_web.py
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I’ll go ahead and add everything and commit the changes.
(scubaduck) root@29cb09005028:/workspace/scubaduck# git add scubaduck/server.py
scubaduck/static/index.html tests/test_server.py tests/test_web.py
(scubaduck) root@29cb09005028:/workspace/scubaduck# git commit -m "feat(server):
allow selectable time column" && git status --short
[work 4d97e0c] feat(server): allow selectable time column
4 files changed, 134 insertions(+), 17 deletions(-)
(scubaduck) root@29cb09005028:/workspace/scubaduck#
The commit message showed up, but now I need to run the status check.
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I’ll check the commit diff for line snippets where I added the new _time_expr function.
sed -n '188,236p'
188 buckets = 100 if gran == "auto" else 500
189 return max(int(total // buckets), 1)
190 return 3600
191
192
193 def _time_expr(col: str, column_types: Dict[str, str] | None) -> str:
194 """Return SQL expression for column interpreted as timestamp."""
195 if column_types is None:
196 return col
197 ctype = column_types.get(col, "").upper()
198 if not any(t in ctype for t in ["TIMESTAMP", "DATE", "TIME"]):
199 if any(
200 t in ctype
201 for t in [
202 "INT",
203 "DECIMAL",
204 "REAL",
205 "DOUBLE",
206 "FLOAT",
207 "NUMERIC",
208 "HUGEINT",
209 ]
210 ):
211 return f"TIMESTAMP 'epoch' + INTERVAL '1 second' * {col}"
212 return col
213
214
215 def build_query(params: QueryParams, column_types: Dict[str, str] | None
= None) -> str:
216 select_parts: list[str] = []
217 group_cols = params.group_by[:]
218 if params.graph_type == "timeseries":
219 sec = _granularity_seconds(params.granularity, params.start, par
ams.end)
220 x_axis = params.x_axis or params.time_column
221 xexpr = _time_expr(x_axis, column_types)
222 if params.start:
223 bucket_expr = (
224 f"TIMESTAMP '{params.start}' + INTERVAL '{sec} second' *
"
225 f"CAST(floor((epoch({xexpr}) - epoch(TIMESTAMP '{params.
start}'))/{sec}) AS BIGINT)"
226 )
227 else:
228 bucket_expr = (
229 f"TIMESTAMP 'epoch' + INTERVAL '{sec} second' * "
230 f"CAST(floor(epoch({xexpr})/{sec}) AS BIGINT)"
231 )
232 select_parts.append(f"{bucket_expr} AS bucket")
233 group_cols = ["bucket"] + group_cols
234 has_agg = bool(group_cols) or params.aggregate is not None
235 if has_agg:
236 select_cols = (
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll include part of the build_query
around line 218 and show the time_column dataclass.
(scubaduck) root@29cb09005028:/workspace/scubaduck# nl -ba scubaduck/server.py |
sed -n '40,60p'
40 aggregate: str | None = None
41 show_hits: bool = False
42 x_axis: str | None = None
43 granularity: str = "Auto"
44 fill: str = "0"
45 table: str = "events"
46 time_column: str = "timestamp"
47
48
49 def _normalize_sqlite_type(sql: str) -> str:
50 """Map arbitrary SQLite column types to DuckDB-compatible types."""
51 t = sql.strip().upper()
52 if "(" in t:
53 t = t.split("(", 1)[0]
54 if "INT" in t:
55 # SQLite only has a single INTEGER type which is always 64-bit.
56 # Use DuckDB's BIGINT to avoid overflow when values exceed INT32
.
57 return "BIGINT"
58 if any(key in t for key in ("CHAR", "CLOB", "TEXT")):
59 return "VARCHAR"
60 if "BLOB" in t:
(scubaduck) root@29cb09005028:/workspace/scubaduck#
I'll add the new time_column
dropdown snippet to index.html
.
(scubaduck) root@29cb09005028:/workspace/scubaduck# nl -ba scubaduck/static/inde
x.html | sed -n '184,204p'
184 </div>
185 <div id="settings" class="tab-content active">
186 <div class="field">
187 <label>Time Column</label>
188 <select id="time_column"></select>
189 </div>
190 <div class="field">
191 <label>Start<span class="help" title="Sets the start/end of th
e time range to query. Can be any kind of datetime string. For example: 'April 2
3, 2014' or 'yesterday'.">[?]</span></label>
192 <div class="rel-box">
193 <input id="start" type="text" />
194 <button type="button" class="rel-btn" data-target="start-sel
ect">▼</button>
195 <div id="start-select" class="rel-dropdown" data-input="star
t">
196 <div data-value="-1 hour">-1 hour</div>
197 <div data-value="-3 hours">-3 hours</div>
198 <div data-value="-12 hours">-12 hours</div>
199 <div data-value="-1 day">-1 day</div>
200 <div data-value="-3 days">-3 days</div>
201 <div data-value="-1 week">-1 week</div>
202 <div data-value="-1 fortnight">-1 fortnight</div>
203 <div data-value="-30 days">-30 days</div>
204 <div data-value="-90 days">-90 days</div>
(scubaduck) root@29cb09005028:/workspace/scubaduck# nl -ba scubaduck/static/inde
x.html | sed -n '944,960p'
944 });
945 }
946
947 function collectParams() {
948 updateSelectedColumns();
949 const payload = {
950 table: document.getElementById('table').value,
951 time_column: document.getElementById('time_column').value,
952 start: document.getElementById('start').value,
953 end: document.getElementById('end').value,
954 order_by: document.getElementById('order_by').value,
955 order_dir: orderDir,
956 limit: parseInt(document.getElementById('limit').value, 10),
957 columns: selectedColumns.filter(c =>
958 c !== 'Hits' && !derivedColumns.some(dc => dc.name === c)
959 ),
960 graph_type: graphTypeSel.value,
(scubaduck) root@29cb09005028:/workspace/scubaduck#
Alright, time to commit the changes.
(scubaduck) root@29cb09005028:/workspace/scubaduck# git status --short
(scubaduck) root@29cb09005028:/workspace/scubaduck#
Time to put together a concise PR message, summarizing the changes I made.