Compare commits

..

2 Commits

Author SHA1 Message Date
2581de3dc7 feat: scripts to get coverage across panel added 2025-09-24 18:16:08 +02:00
d9cbe4f998 feat: furtehr analysis added 2025-09-24 18:15:18 +02:00
3 changed files with 232 additions and 0 deletions

View File

@@ -840,6 +840,141 @@ def _(sc, variants_high_cov_ids, variants_lost_cov_ids, variants_low_cov_ids):
return return
@app.cell
def _(mo):
mo.md(r"""# (3) Stats and Metadata""")
return
@app.cell
def _(pd):
meta = pd.read_excel(
"/home/darren/Documents/4_data/3_internal/2_lb/Metadata_LB_20250924.xlsx",
index_col="fastq_id",
)
meta
return (meta,)
@app.cell
def _(clinicals, meta, pd, stats):
# merge stats and metadata and remove seracares
stats_meta = pd.concat([stats, meta], axis=1)
stats_meta = stats_meta.loc[clinicals, :]
stats_meta = stats_meta.reset_index()
stats_meta.head()
return (stats_meta,)
@app.cell
def _(mo, stats_meta):
var1 = mo.ui.dropdown(options=stats_meta.columns, label="Variable 1")
var2 = mo.ui.dropdown(options=stats_meta.columns, label="Variable 2")
mo.vstack([var1, var2])
return var1, var2
@app.cell
def _(px, stats_meta, var1, var2):
# note that all SE1 clinical samples are older QG extracted cfDNAs
fig5 = px.scatter(
data_frame=stats_meta,
x=var1.value,
y=var2.value,
color="Flow cell #",
width=700,
height=500,
template="simple_white",
hover_data=["index"],
color_discrete_sequence=px.colors.qualitative.Dark2,
category_orders={"Flow cell #": ["SE1", "SE2"]},
trendline="ols",
)
fig5.update_traces(
marker=dict(size=9, line=dict(width=1, color="DarkSlateGrey")),
selector=dict(mode="markers"),
)
fig5.add_vline(
x=30,
line_width=3,
line_dash="dot",
annotation_text="30ng",
annotation_position="top left",
)
fig5.show()
return
@app.cell
def _(stats_meta):
stats_meta["ng_group"] = [
"< 30ng" if i < 30 else "> 30ng" for i in stats_meta["Input DNA used (ng)"]
]
return
@app.cell
def _(pd, stats_meta):
stats_meta_melt = pd.melt(
stats_meta, id_vars=["index", "sample_type", "ng_group", "Flow cell #"]
)
stats_meta_melt.head()
return (stats_meta_melt,)
@app.cell
def _(mo, stats_meta_melt):
box_vars = mo.ui.dropdown(
options=stats_meta_melt["variable"].unique(), label="Variables to plot"
)
mo.hstack([box_vars])
return (box_vars,)
@app.cell
def _(box_vars, px, stats_meta_melt):
fig6 = px.box(
data_frame=stats_meta_melt.loc[
(stats_meta_melt["variable"] == box_vars.value)
& (stats_meta_melt["Flow cell #"] == "SE2")
],
color="ng_group",
y="value",
x="variable",
template="simple_white",
points="all",
labels={
"ng_group": "cfDNA Input for Library",
"value": box_vars.value,
"variable": "",
},
color_discrete_sequence=px.colors.qualitative.Dark2,
category_orders={"ng_group": ["< 30ng", "> 30ng"]},
hover_data=["index"],
width=800,
)
fig6.update_traces(
marker=dict(size=10, line=dict(width=1, color="DarkSlateGrey")),
selector=dict(type="points"),
)
fig6.update_xaxes(ticks="", showticklabels=False)
fig6.update_yaxes(showgrid=True, tickfont_size=14)
fig6.update_legends(font_size=14)
for trace in fig6.select_traces():
trace.marker.update(size=10, line=dict(width=1, color="DarkSlateGrey"))
fig6.show()
return
@app.cell
def _(stats_meta):
stats_meta["ng_group"].value_counts()
return
@app.cell @app.cell
def _(): def _():
return return

86
src/get_panel_coverage.py Normal file
View File

@@ -0,0 +1,86 @@
import os
from subprocess import run
import logging
import sys
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
level=logging.INFO,
stream=sys.stderr,
)
def main(s3_paths: list[str], bed_path: str, ref_path: str, cli_bin: str) -> None:
logging.info("Script started...")
logging.info(f"Using reference: {ref_path}")
logging.info(f"Using bed file: {bed_path}")
logging.info(f"Number of BAM/CRAMs to process: {len(s3_paths)}")
logging.info(f"BAM/CRAMs to process: {s3_paths}")
for idx, file in enumerate(s3_paths):
name = file.split("/")[-1]
file_num = f"{idx+1}/{len(s3_paths)}"
logging.info(f"({file_num}) Downloading file: {name}")
down_cmd = (
f"{cli_bin} s3 cp {file} . --profile=se"
if cli_bin == "aws"
else f"{cli_bin} get {file} ."
)
run(down_cmd, shell=True, capture_output=False)
sort_name = f"{name.split('.')[0]}.sorted.cram"
sort_cmd = f"samtools sort -@ 18 {name} --reference {ref_path} > {sort_name}"
run(sort_cmd, shell=True, capture_output=False)
logging.info(f"({file_num}) Getting coverage for '{name}'")
cov_cmd = (
f"bedtools coverage -d -sorted -a {bed_path} -b {sort_name} | "
f"python parse_cov.py > {name.split(".")[0]}_cov.json"
)
run(cov_cmd, shell=True, capture_output=False)
os.remove(name)
os.remove(sort_name)
logging.info(f"({file_num}) Processing of '{name}' completed!")
if __name__ == "__main__":
s3_paths = [
# "s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-25-0001-pa-21Mar2025_S33.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-25-0002-pa-21Mar2025_S34.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-25-0003-pa-21Mar2025_S35.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-25-0004-pa-21Mar2025_S36.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-25-0005-pa-21Mar2025_S37.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-25-0006-pa-21Mar2025_S38.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-0d125pc-1-28Mar2025_S41.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-0d125pc-2-28Mar2025_S42.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-0d25pc-1-28Mar2025_S43.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-0d25pc-2-28Mar2025_S44.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-0d5pc-1-28Mar2025_S45.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-0d5pc-2-28Mar2025_S46.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-0pc-1-28Mar2025_S39.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-0pc-2-28Mar2025_S40.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-1pc-1-28Mar2025_S48.collapsed.cram",
"s3://serenomica-pipeline-archive/se1-prd-2.1.1/LB-SeraPlasma-1pc-2-28Mar2025_S47.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0007-pa-1-23May2025_S20.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0007-pa-2-23May2025_S21.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0008-pa-1-23May2025_S22.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0008-pa-2-23May2025_S23.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0009-pa-1-23May2025_S24.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0009-pa-2-23May2025_S25.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0010-pa-1-23May2025_S26.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0010-pa-2-23May2025_S27.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0011-pa-1-23May2025_S32.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0012-pa-1-23May2025_S28.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0012-pa-2-23May2025_S29.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0032-pa-1-23May2025_S30.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-25-0032-pa-2-23May2025_S31.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-SeraCare-0d25pc-1-21Mar2025_S17.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-SeraCare-0d25pc-2-21Mar2025_S18.collapsed.cram",
"s3://serenomica-pipeline-archive/se2-lb-1/LB-SeraPlasma-0pc-3-28Mar2025_S19.collapsed.cram",
]
# "/home/darren/Documents/4_data/3_internal/1_panels/TWIST/twist_LB_probes_plus_30bp_merged.sorted.bed"
bed_path = "/home/darren/Documents/4_data/3_internal/1_panels/TWIST/twist_LB_probes_plus_30bp_merged.sorted.bed"
ref_path = "/home/darren/Documents/4_data/1_genomes/human/hg38/Homo_sapiens_assembly38.fasta"
main(s3_paths, bed_path, ref_path, cli_bin="aws")

11
src/parse_cov.py Normal file
View File

@@ -0,0 +1,11 @@
import sys
from collections import defaultdict
import json
data = defaultdict(lambda: [])
for line in sys.stdin:
chrom, start, stop, _, depth = line.rstrip().split("\t")
data[f"{chrom}_{start}_{stop}"].append(depth)
print(json.dumps(dict(data)))