From 35e59a9cde7119c849169178675ec0e18467aa5f Mon Sep 17 00:00:00 2001
From: Theo <theo.serralta@chu-dijon.fr>
Date: Thu, 7 Nov 2024 13:51:59 +0100
Subject: [PATCH] Remove files

---
 test_gpu_time_SV.py            | 1966 --------------------------------
 test_gpu_time_normalisation.py | 1893 ------------------------------
 2 files changed, 3859 deletions(-)
 delete mode 100644 test_gpu_time_SV.py
 delete mode 100644 test_gpu_time_normalisation.py

diff --git a/test_gpu_time_SV.py b/test_gpu_time_SV.py
deleted file mode 100644
index d5beb24..0000000
--- a/test_gpu_time_SV.py
+++ /dev/null
@@ -1,1966 +0,0 @@
-import pysam
-import sys
-import getopt
-import logging
-import numpy as np
-import pycuda.driver as cuda
-import pycuda.autoinit
-from pycuda.compiler import SourceModule
-import pycuda.gpuarray as gpuarray
-from pycuda.autoinit import context
-import multiprocessing
-from multiprocessing import Process, Queue
-import time
-import subprocess
-import pandas as pd
-from collections import defaultdict
-from datetime import datetime
-
-# Options
-
-
-def parse_arguments():
-    """
-    Parse command-line arguments.
-
-    This function parses the command-line arguments provided to the script and extracts the values for various parameters.
-
-    Parameters
-    ----------
-    None
-
-    Returns
-    -------
-    tuple
-        A tuple containing the following elements:
-        bamfile_path : str or None
-            The path to the BAM file.
-        window_size : int or None
-            The size of the window.
-        step_size : int or None
-            The step size for the analysis.
-        zscore_threshold : float or None
-            The threshold for the Z-score.
-        lengthFilter : int or None
-            The filter for length.
-        output_file : str or None
-            The path to the output file.
-        logfile : str or None
-            The path to the log file.
-
-        Each element can be None if the corresponding argument was not provided.
-    """
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "b:c:w:s:z:l:t:o:p:m:e:")
-        (
-            bamfile_path,
-            num_chr,
-            window_size,
-            step_size,
-            zscore_threshold,
-            lengthFilter,
-            output_file,
-            output_file_pairs,
-            output_file_splits,
-            logfile,
-        ) = (None, None, None, None, None,None,None, None, None, None)
-        for opt, arg in opts:
-            if opt in ("-b"):
-                bamfile_path = arg
-            if opt in ("-c"):
-                num_chr = arg
-            if opt in ("-w"):
-                window_size = int(arg)
-            if opt in ("-s"):
-                step_size = int(arg)
-            if opt in ("-z"):
-                zscore_threshold = float(arg)
-            if opt in ("-l"):
-                lengthFilter = int(arg)
-            if opt in ("-o"):
-                output_file = arg
-            if opt in ("-p"):
-                output_file_pairs = arg
-            if opt in ("-m"):
-                output_file_splits = arg
-            if opt in ("-e"):
-                logfile = arg
-        return (
-            bamfile_path,
-            num_chr,
-            window_size,
-            step_size,
-            zscore_threshold,
-            lengthFilter,
-            output_file,
-            output_file_pairs,
-            output_file_splits,
-            logfile,
-        )
-    except getopt.GetoptError:
-        print("Invalid argument")
-        sys.exit(1)
-
-
-if __name__ == "__main__":
-    (
-        bamfile_path,
-        num_chr,
-        window_size,
-        step_size,
-        zscore_threshold,
-        lengthFilter,
-        output_file,
-        output_file_pairs,
-        output_file_splits,
-        logfile,
-    ) = parse_arguments()
-    logging.basicConfig(
-        filename="%s" % (logfile),
-        filemode="a",
-        level=logging.INFO,
-        format="%(asctime)s %(levelname)s - %(message)s",
-    )
-    logging.info("start")
-    global seq
-
-# Code CUDA
-mod = SourceModule(
-    """
-//Kernel pour calculer la profondeur moyenne brute
-
-__global__ void calcul_depth_kernel(int *depth_data, int seq_length, int window_size, int step_size, float *depth_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        int pos_start = (idx * step_size) + 1;
-        int pos_end = pos_start + window_size;
-        int count_reads = 0;
-
-                
-        for (int i = pos_start; i < pos_end; i++) {
-            count_reads += depth_data[i];
-        }
-
-        float avg_depth = (float)count_reads / window_size;
-        depth_results[idx] = avg_depth;
-    }
-}
-
-__global__ void calcul_depth_count_kernel(int *depth_data_count, int seq_length, int window_size, int step_size, float *depth_results_count) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        int pos_start = (idx * step_size) + 1;
-        int pos_end = pos_start + window_size;
-        int count_reads = 0;
-
-                
-        for (int i = pos_start; i < pos_end; i++) {
-            count_reads += depth_data_count[i];
-        }
-
-        float avg_depth_count = (float)count_reads / window_size;
-        depth_results_count[idx] = avg_depth_count;
-    }
-}
-
-//Kernel pour calculer le GC content
-
-__global__ void calcul_gc_kernel(char *gc_data, int seq_length, int window_size, int step_size, float *gc_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        int pos_start = (idx * step_size) + 1;
-        int pos_end = pos_start + window_size;
-        int gc_count = 0;
-    
-        //printf(gc_data);
-        //printf(" ");
-        
-        for (int i = pos_start; i <= pos_end; ++i) {
-            if ((gc_data[i] == 'G') or (gc_data[i] == 'C') or (gc_data[i] == 'g') or (gc_data[i] == 'c')) {
-                //printf(&gc_data[i]);
-                //printf(" ");
-                gc_count++;
-            }
-        }
-        
-        float avg_gc = ((float)gc_count / window_size) * 100;
-        gc_results[idx] = avg_gc;
-    }
-}
-
-// Kernel pour calculer la mappabilite moyenne ponderee
-
-__global__ void calcul_map_kernel(float *map_data, int seq_length, int window_size, int step_size, float *map_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        int pos_start = (idx * step_size) + 1;
-        int pos_end = pos_start + window_size;
-        float weighted_sum = 0.0;
-        float total_weight = 0.0;
-
-        for (int i = pos_start; i <= pos_end; ++i) {
-            float weight = map_data[i];
-            weighted_sum += weight;
-            total_weight += 1;
-        }
-
-        float avg_map = weighted_sum / total_weight;
-        map_results[idx] = avg_map;
-    }
-}
-
-
-// Kernel pour calculer la lecture de profondeur corrigee par la mappabilitee
-
-__global__ void calcul_depth_correction_kernel(float *depth_results, float *map_results, int seq_length, int window_size, int step_size, float *depth_correction_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float avg_depth = depth_results[idx];
-        float avg_map = map_results[idx];
-
-        // Verification si avg_map est egal a 0 pour eviter la division par 0
-        float depth_correction = (avg_map != 0.0f) ? (avg_depth / avg_map) : 0.0f;
-        depth_correction_results[idx] = depth_correction;
-    }
-}
-
-
-// Kernel pour normaliser la profondeur corrigee
-
-__global__ void normalize_depth_kernel(float *depth_correction, float *gc_results, float m, float *gc_to_median, int seq_length, int window_size, int step_size, float *depth_normalize) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float mGC = gc_to_median[(int)gc_results[idx]];
-        
-        // Verification si mGC est egal a 0 pour eviter la division par 0
-        float depth_normalize_val = (mGC != 0.0f) ? (depth_correction[idx] * m / mGC) : 0.0f;
-        depth_normalize[idx] = depth_normalize_val;
-    }
-}
-
-// Kernel pour calculer le ratio par window
-
-__global__ void ratio_par_window_kernel(float *depth_normalize_val, float mean_chr, int seq_length, int window_size, int step_size, float *ratio_par_window_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float ratio = depth_normalize_val[idx] / mean_chr;
-        ratio_par_window_results[idx] = ratio;
-    }
-}
-
-// Kernel pour calculer le z_score par window sur le ratio
-
-__global__ void z_score_kernel(float *ratio, float mean_ratio, float std_ratio, int seq_length, int window_size, int step_size, float *z_score_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float z_score = (ratio[idx] - mean_ratio) / std_ratio;
-        z_score_results[idx] = z_score;
-    }
-}
-
-// Kernel pour calculer le ratio divise par le ratio moyen
-
-__global__ void ratio_par_mean_ratio_kernel(float *ratio, float mean_ratio, int seq_length, int window_size, int step_size, float *ratio_par_mean_ratio_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float ratio_mean_ratio = ratio[idx] / mean_ratio;
-        ratio_par_mean_ratio_results[idx] = ratio_mean_ratio;
-    }
-}
-
-"""
-)
-
-# Obtention de la fonction de kernel compilée
-calcul_depth_kernel_cuda = mod.get_function("calcul_depth_kernel")
-calcul_depth_count_kernel_cuda = mod.get_function("calcul_depth_count_kernel")
-calcul_gc_kernel_cuda = mod.get_function("calcul_gc_kernel")
-calcul_map_kernel_cuda = mod.get_function("calcul_map_kernel")
-calcul_depth_correction_kernel_cuda = mod.get_function("calcul_depth_correction_kernel")
-normalize_depth_kernel_cuda = mod.get_function("normalize_depth_kernel")
-ratio_par_window_kernel_cuda = mod.get_function("ratio_par_window_kernel")
-z_score_kernel_cuda = mod.get_function("z_score_kernel")
-ratio_par_mean_ratio_kernel_cuda = mod.get_function("ratio_par_mean_ratio_kernel")
-
-
-def merge_intervals(intervals):
-    """
-    Merge overlapping intervals with the same score.
-
-    This function takes a list of intervals and merges those that have the same score.
-
-    Parameters
-    ----------
-    intervals : list of tuples
-        A list where each element is a tuple (start, end, score).
-        The intervals should be sorted by start position.
-
-    Returns
-    -------
-    list of tuples
-        A list of merged intervals (start, end, score) where overlapping intervals with the same score are combined.
-    """
-    merged = []
-    start, end, score = intervals[0]
-    for interval in intervals[1:]:
-        if interval[2] == score:
-            end = interval[1]
-        else:
-            merged.append((start, end, score))
-            start, end, score = interval
-    merged.append((start, end, score))
-    
-    return merged
-
-
-def dico_mappabilite(mappability_file):
-    """
-    Create a dictionary of mappability scores from a file.
-
-    This function reads a mappability file and creates a dictionary with chromosomes as keys and mappability scores as values.
-
-    Parameters
-    ----------
-    mappability_file : str
-        The path to the mappability file. Each line in the file should have the format:
-        chromosome, start_pos, end_pos, score.
-
-    Returns
-    -------
-    dict
-        A dictionary where keys are chromosome names and values are another dictionary with start positions as keys
-        and mappability scores as values.
-    """
-    logging.info("Entering dico_mappability")
-    start_time = time.time()
-
-    mappability_dico = {}
-
-    logging.info(" In dico_mappability : Open file and create the dico")
-    start_time_1 = time.time()
-
-    with open(mappability_file, "r") as f:
-        for line in f:
-            fields = line.strip().split("\t")
-            if len(fields) < 4:
-                continue
-
-            chromosome = fields[0]
-            start_pos = int(fields[1])
-            end_pos = int(fields[2])
-            score = float(fields[3])
-
-            if chromosome not in mappability_dico:
-                mappability_dico[chromosome] = []
-
-            mappability_dico[chromosome].append((start_pos, end_pos, score))
-
-    end_time_1 = time.time()
-    elapsed_time_1 = end_time_1 - start_time_1
-    logging.info(f"In dico_mappability : Leaving file and create the dico (Time taken: {elapsed_time_1:.4f} seconds)")
-
-    # Add position 0 for each chromosome
-    logging.info(" In dico_mappability : Add position 0 for each chromosome")
-    start_time_2 = time.time()
-
-    for chromosome, intervals in mappability_dico.items():
-        if intervals[0][0] != 0:
-            mappability_dico[chromosome].insert(0, (0, intervals[0][0], 0))
-
-    end_time_2 = time.time()
-    elapsed_time_2 = end_time_2 - start_time_2
-    logging.info(f"In dico_mappability : Ending add position 0 for each chromosome (Time taken: {elapsed_time_2:.4f} seconds)")
-
-    # Merge intervals with the same score
-    logging.info(" In dico_mappability : Merge intervals with the same score")
-    start_time_3 = time.time()
-
-    for chromosome, intervals in mappability_dico.items():
-        merged_intervals = merge_intervals(intervals)
-        mappability_dico[chromosome] = {
-            start: score for start, _, score in merged_intervals
-        }
-    
-    end_time_3 = time.time()
-    elapsed_time_3 = end_time_3 - start_time_3
-    logging.info(f"In dico_mappability : Ending merge intervals with the same score (Time taken: {elapsed_time_2:.4f} seconds)")
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving dico_mappability (Time taken: {elapsed_time:.4f} seconds)")
-
-    return mappability_dico
-
-
-def calcul_mappability(seq_length, mappability, chr):
-    """
-    Calculate mappability array for a given sequence length and chromosome.
-
-    This function generates an array of mappability scores for a specific chromosome and sequence length.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    mappability : dict
-        A dictionary containing mappability information, with chromosomes as keys and dictionaries of start positions
-        and scores as values.
-    chr : str
-        The chromosome for which the mappability is calculated.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of mappability scores for the given sequence length.
-    """
-    logging.info(f"Entering calcul_mappability for {chr}")
-    start_time = time.time()
-    
-    map_data = np.zeros(seq_length, dtype=np.float32)
-    sorted_keys = sorted(mappability[chr].keys())
-
-    prev_bound = 0
-    prev_mappability = 0
-
-    logging.info(f"In calcul_mappability : Entering for bound in sorted_keys for {chr}")
-    start_time_1 = time.time()
-
-    for bound in sorted_keys:
-        for i in range(prev_bound, min(seq_length, bound)):
-            map_data[i] = prev_mappability
-        prev_bound = bound
-        prev_mappability = mappability[chr][bound]
- 
-    end_time_1 = time.time()
-    elapsed_time_1 = end_time_1 - start_time_1
-    logging.info(f"In calcul_mappability : Leaving for bound in sorted_keys for {chr} (Time taken: {elapsed_time_1:.4f} seconds)")
-
-    # Fill remaining positions if sequence length exceeds last bound
-    logging.info(f"In calcul_mappability : Entering for i in range(prev_bound, seq_length) for {chr}")
-    start_time_2 = time.time()
-
-    for i in range(prev_bound, seq_length):
-        map_data[i] = prev_mappability
-
-    end_time_2 = time.time()
-    elapsed_time_2 = end_time_2 - start_time_2
-    logging.info(f"In calcul_mappability : Leaving for i in range(prev_bound, seq_length) for {chr} (Time taken: {elapsed_time_2:.4f} seconds)")
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving dico_mappability for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return map_data
-
-
-def parse_fasta(gc_file):
-    """
-    Parse a FASTA file and extract sequences.
-
-    This function reads a FASTA file and extracts the sequences, storing them in a dictionary with headers as keys.
-
-    Parameters
-    ----------
-    gc_file : str
-        The path to the FASTA file. The file should be in standard FASTA format.
-
-    Returns
-    -------
-    dict
-        A dictionary where keys are sequence headers and values are the corresponding sequences.
-    """
-    logging.info("Entering parse_fasta")
-    start_time = time.time()
-    
-    sequences = {}
-    with open(gc_file, "r") as f:
-        data = f.read().split(">")
-        for entry in data[1:]:
-            lines = entry.split("\n")
-            header = lines[0]
-            sequence = "".join(lines[1:])
-            sequences[header] = sequence
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving parse_fasta (Time taken: {elapsed_time:.4f} seconds)")
-
-    return sequences
-
-
-def calcul_gc_content(seq_length, chr, seq):
-    """
-    Calculate the GC content of a given sequence.
-
-    This function generates an array representing the GC content for a specific chromosome and sequence length.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    chr : str
-        The chromosome for which the GC content is calculated.
-    seq : dict
-        A dictionary containing sequences, with chromosome names as keys and sequences as values.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of bytes ('S' dtype) representing the GC content for the given sequence length.
-    """
-    logging.info(f"Entering calcul_gc_content for {chr}")
-    start_time = time.time()
-
-    gc_data = np.zeros(seq_length, dtype="S")
-    for i in range(len(seq[chr])):
-        gc_data[i] = seq[chr][i]
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_gc_content for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return gc_data
-
-
-def calcul_depth_seq(seq_length, bamfile_path, chr):
-    """
-    Calculate the sequencing depth for a given chromosome.
-
-    This function computes the sequencing depth for a specific chromosome and sequence length using a BAM file.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    bamfile_path : pysam.AlignmentFile
-        The path to the BAM file opened with pysam.AlignmentFile.
-    chr : str
-        The chromosome for which the depth is calculated.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of integers representing the sequencing depth for the given sequence length.
-    """
-    logging.info(f"Entering calcul_depth_seq for {chr}")
-    start_time = time.time()
-    
-    depth_data = np.zeros(seq_length, dtype=np.int32)
-    for pileupcolumn in bamfile_path.pileup(reference = chr):
-        pos = pileupcolumn.reference_pos
-        if pos >= seq_length:
-            break
-        depth_data[pos] = pileupcolumn.nsegments
-
-    #depth_data = bamfile_path.pileup().nsegments
-
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return depth_data
-
-
-def calcul_depth_seq_copy(seq_length, bamfile_path, chr):
-    """
-    Calculate the sequencing depth for a given chromosome.
-
-    This function computes the sequencing depth for a specific chromosome and sequence length using a BAM file.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    bamfile_path : pysam.AlignmentFile
-        The path to the BAM file opened with pysam.AlignmentFile.
-    chr : str
-        The chromosome for which the depth is calculated.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of integers representing the sequencing depth for the given sequence length.
-    """
-    logging.info(f"Entering calcul_depth_seq for {chr}")
-    start_time = time.time()
-    
-    depth_data = np.zeros(seq_length, dtype=np.int32)
-    
-    i = 0
-    it = bamfile_path.pileup(reference = chr)
-    while i < (seq_length - 1):
-        t = next(it)
-        pos = t.reference_pos
-        depth_data[pos] = t.nsegments
-        print(i)
-        i += 1
-
-    
-    #for pileupcolumn in bamfile_path.pileup():
-      #  pos = pileupcolumn.reference_pos
-     #   if pos >= seq_length:
-       #     break
-        #depth_data[pos] = pileupcolumn.nsegments
-
-    #print(bamfile_path.pileup())    print("########################")
-#    depth_data = np.array(bamfile_path.pileup().nsegments)
-
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return depth_data
-
-
-def calcul_depth_seq_count(seq_length, bamfile_path, chr):
-    """
-    Calculate the sequencing depth for a given chromosome.
-
-    This function computes the sequencing depth for a specific chromosome and sequence length using a BAM file.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    bamfile_path : pysam.AlignmentFile
-        The path to the BAM file opened with pysam.AlignmentFile.
-    chr : str
-        The chromosome for which the depth is calculated.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of integers representing the sequencing depth for the given sequence length.
-    """
-    logging.info(f"Entering calcul_depth_seq_count for {chr}")
-    start_time = time.time()
-    
-    # Use count_coverage to get coverage depth
-    depth_data_count = bamfile_path.count_coverage(contig = chr)
-    type(depth_data_count)
-    #print(depth_data_count[:10056])
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return depth_data_count
-    
-
-def calcul_depth_seq_samtools(seq_length, bamfile_path, chr, num_threads=12):
-    """
-    Calculate the sequencing depth for a given chromosome using a parallelized bash script.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    bamfile_path : str
-        The path to the BAM file.
-    chr : str
-        The chromosome for which the depth is calculated.
-    num_threads : int
-        The number of threads to use for parallel processing.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of integers representing the sequencing depth for the given sequence length.
-    """
-    logging.info(f"Entering calcul_depth_seq for {chr}")
-    start_time = time.time()
-    
-    #Define the output file for depth results
-    timestamp = datetime.now().strftime("%Y%m%d%H%M%S%f")
-    output_file_1 = f"/work/gad/shared/analyse/test/cnvGPU/test_scalability/tmp_depth_{chr}_{timestamp}.out"
-    
-    # Run the parallelized bash script to calculate depth
-    p = subprocess.Popen(["/work/gad/shared/analyse/test/cnvGPU/test_scalability/samtools_test_by_chr_multithreading_log.sh %s %s %s %s" % (bamfile_path, chr, output_file_1, num_threads)], shell = True, executable = "/bin/bash")
-    
-    p.communicate()
-    
-    # Create the numpy array for depth data
-    depth_data = np.zeros(seq_length, dtype=np.int32)
-
-   # Read the output file and fill the numpy array
-    try:
-        df = pd.read_csv(output_file_1, delim_whitespace=True, header=None, names=['chr', 'pos', 'depth'])
-        df['pos'] -= 1  # Convert to 0-based index
-        df = df[df['pos'] < seq_length]  # Ensure positions are within sequence length
-        depth_data[df['pos']] = df['depth']
-    except Exception as e:
-        logging.error(f"Error reading depth file: {e}")
-    
-    # Clean up the output file
-    subprocess.run(['rm', output_file_1])
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return depth_data
-
-def calcul_med_total(depth_correction_results, chr):
-    """
-    Calculate the median of non-zero depth correction results.
-
-    This function filters out zero values from the depth correction results and computes the median of the remaining values.
-
-    Parameters
-    ----------
-    depth_correction_results : list or numpy.ndarray
-        A list or array of depth correction values.
-
-    Returns
-    -------
-    float
-        The median of the non-zero depth correction values, or 0 if no non-zero values are present.
-    """
-    logging.info(f"Entering calcul_med_total for {chr}")
-    start_time = time.time()
-    
-    depth_correction_results = np.array(depth_correction_results)
-    # Filter results to remove zero values
-    non_zero_results = depth_correction_results[depth_correction_results != 0]
-    # Calculate the median of non-zero results
-    m = np.median(non_zero_results) if non_zero_results.size > 0 else 0
-
-    sys.stderr.write("Chromosome : %s, med_chr : %s\n" % (chr, m))
-    
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_med_total for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-    
-    return m
-
-
-def calcul_med_same_gc(gc_results, depth_correction_results, chr):
-    """
-    Calculate the median depth correction for each unique GC content value.
-
-    This function computes the median depth correction values for each unique GC content value, filtering out zero values.
-
-    Parameters
-    ----------
-    gc_results : list or numpy.ndarray
-        A list or array of GC content values.
-    depth_correction_results : list or numpy.ndarray
-        A list or array of depth correction values.
-
-    Returns
-    -------
-    dict
-        A dictionary where keys are unique GC content values and values are the median depth correction for those GC values.
-    """
-    logging.info(f"Entering calcul_med_same_gc for {chr}")
-    start_time = time.time()
-    
-    mGC = []
-    depth_correction_results_array = np.array(depth_correction_results)
-    unique_gc_values = np.unique(gc_results)
-
-    for gc in unique_gc_values:
-        indices = np.where(
-            gc_results == gc
-        )  # Get positions of each unique GC value in gc_results
-        # Filter depth correction results to remove zero values
-        filtered_depths = depth_correction_results_array[indices][
-            depth_correction_results_array[indices] != 0
-        ]
-
-        if (
-            filtered_depths.size > 0
-        ):  # Calculate median only if filtered results are not empty
-            median_gc = np.median(filtered_depths)
-        else:
-            median_gc = 0  # Or another default value if all results are 0
-
-        mGC.append((gc, median_gc))
-
-    gc_to_median = dict(mGC)
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_med_same_gc for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-    
-    return gc_to_median
-
-
-def calcul_moy_totale(normalize_depth_results, chr):
-    """
-    Calculate the mean of non-zero normalized depth results.
-
-    This function filters out zero values from the normalized depth results and computes the mean of the remaining values.
-
-    Parameters
-    ----------
-    normalize_depth_results : list or numpy.ndarray
-        A list or array of normalized depth values.
-
-    Returns
-    -------
-    float
-        The mean of the non-zero normalized depth values, or 0 if no non-zero values are present.
-    """
-    logging.info(f"Entering calcul_moy_totale for {chr}")
-    start_time = time.time()   
-    
-    normalize_depth_results = np.array(normalize_depth_results)
-    # Filter results to remove zero values
-    non_zero_results = normalize_depth_results[normalize_depth_results != 0]
-    # Calculate the mean of non-zero results
-    mean_chr = np.mean(non_zero_results) if non_zero_results.size > 0 else 0
-
-    sys.stderr.write("Chromosome : %s, mean_chr : %s\n" % (chr, mean_chr))
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_moy_totale for {chr} (Time taken: {elapsed_time:.4f} seconds)")    
-    
-    return mean_chr
-
-
-
-def calcul_std(normalize_depth_results, chr):
-    """
-    Calculate the standard deviation of non-zero normalized depth results.
-
-    This function filters out zero values from the normalized depth results and computes the standard deviation of the remaining values.
-
-    Parameters
-    ----------
-    normalize_depth_results : list or numpy.ndarray
-        A list or array of normalized depth values.
-
-    Returns
-    -------
-    float
-        The standard deviation of the non-zero normalized depth values, or 0 if no non-zero values are present.
-    """
-    logging.info(f"Entering calcul_std for {chr}")
-    start_time = time.time()  
-    
-    normalize_depth_results = np.array(normalize_depth_results)
-    # Filter results to remove zero values
-    non_zero_results = normalize_depth_results[normalize_depth_results != 0]
-    # Calculate the standard deviation of non-zero results
-    std_chr = np.std(non_zero_results) if non_zero_results.size > 0 else 0
-
-
-    sys.stderr.write("Chromosome : %s, std_chr : %s\n" % (chr, std_chr))
-    
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_std for {chr} (Time taken: {elapsed_time:.4f} seconds)")      
-    
-    return std_chr
-
-
-def compute_mean_std_med(ratio_par_window_results, chr):
-    """
-    Compute the mean, standard deviation, and median of non-zero ratio results per window.
-
-    This function filters out zero and -1 values from the ratio results per window and computes the mean, standard deviation, and median of the remaining values. Values greater than or equal to 5 are capped at 5.
-
-    Parameters
-    ----------
-    ratio_par_window_results : list or numpy.ndarray
-        A list or array of ratio values per window.
-
-    Returns
-    -------
-    tuple
-        A tuple containing the mean, standard deviation, and median of the filtered ratio values.
-    """
-    logging.info(f"Entering compute_mean_std_med for {chr}")
-    start_time = time.time()
-
-    # Filter results to remove zero and -1 values
-    ratio_par_window_results = np.array(ratio_par_window_results)
-    non_zero_results = ratio_par_window_results[ratio_par_window_results != 0]
-
-    # Initialize list for stats computation
-    table = []
-
-    for value in non_zero_results:
-        if float(value) >= 5:
-            table.append(5)
-        elif float(value) != -1:
-            table.append(float(value))
-
-    # Calculate the mean, standard deviation, and median of the filtered values
-    mean_ratio = np.mean(table) if table else 0
-    std_ratio = np.std(table) if table else 0
-    med_ratio = np.median(table) if table else 0
-
-    sys.stderr.write("Chromosome : %s, mean_ratio : %s, std_ratio : %s, med_ratio : %s\n" % (chr, mean_ratio, std_ratio, med_ratio))
-    
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving compute_mean_std_med for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-
-    # Return results
-    return mean_ratio, std_ratio, med_ratio
-
-
-def cn_level(x, chr):
-    """
-    Determine the copy number level based on the given value.
-
-    This function returns the copy number level based on the input value `x`.
-
-    Parameters
-    ----------
-    x : float
-        The input value used to determine the copy number level.
-
-    Returns
-    -------
-    int
-        The copy number level:
-        - 0 if x < 0.1
-        - 1 if 0.1 <= x <= 0.75
-        - 2 if 0.75 < x < 1 or round(x) == 1
-        - round(x) if round(x) > 1
-    """
-    if x < 0.1:
-        return 0
-    elif x <= 0.75:
-        return 1
-    elif x < 1:
-        return 2
-    else:
-        rounded_x = round(x)
-        return 2 if rounded_x == 1 else rounded_x
-
-
-def get_sample_name(bamfile_path):
-    """
-    Extract the sample name from a BAM file.
-
-    This function reads the header of a BAM file to extract the sample name from the read groups.
-
-    Parameters
-    ----------
-    bamfile_path : str
-        The path to the BAM file.
-
-    Returns
-    -------
-    str
-        The sample name extracted from the BAM file. If no sample name is found, returns "UnknownSample".
-    """
-
-    logging.info(f"Entering get_sample_name")
-    start_time = time.time()
-
-    with pysam.AlignmentFile(bamfile_path, "rb") as bamfile:
-        for read_group in bamfile.header.get("RG", []):
-            if "SM" in read_group:
-                return read_group["SM"]
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving get_sample_name (Time taken: {elapsed_time:.4f} seconds)")
-    return "UnknownSample"
-
-
-def create_signal(signal, chr, z_score_results, step_size):
-    """
-    Create a signal dictionary for a specific chromosome based on z-score results.
-
-    This function populates a signal dictionary with positions and corresponding z-score results for a given chromosome.
-
-    Parameters
-    ----------
-    signal : dict
-        A dictionary to store the signal data.
-    chr : str
-        The chromosome for which the signal is created.
-    z_score_results : list or numpy.ndarray
-        A list or array of z-score results.
-    step_size : int
-        The step size used to calculate the positions.
-
-    Returns
-    -------
-    None
-        The function modifies the signal dictionary in place.
-    """
-    logging.info(f"Entering create_signal for {chr} (include copy_number_level)")
-    start_time = time.time()
-
-    if chr not in signal:
-        signal[chr] = {}
-    for i in range(len(z_score_results)):
-        pos = (i * step_size) + 1
-        signal[chr][pos] = z_score_results[i]
-    
-    #sys.stderr.write("\t signal %s\n" % signal[chr])
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving create_signal for {chr} (include copy_number_level) (Time taken: {elapsed_time:.4f} seconds)") 
-
-def detect_events(
-    z_score_results,
-    zscore_threshold,
-    events,
-    med_ratio,
-    ratio_par_mean_ratio_results,
-    chr,
-):
-    """
-    Detect genomic events based on z-score results and a z-score threshold.
-
-    This function identifies significant genomic events where z-scores exceed the given threshold. Events are recorded in the `events` dictionary for the specified chromosome.
-
-    Parameters
-    ----------
-    z_score_results : list or numpy.ndarray
-        A list or array of z-score values.
-    zscore_threshold : float
-        The threshold for detecting significant z-score events.
-    events : dict
-        A dictionary to store detected events.
-    med_ratio : float
-        The median ratio used for copy number level calculations.
-    ratio_par_mean_ratio_results : list or numpy.ndarray
-        A list or array of ratio values compared to the mean ratio.
-    chr : str
-        The chromosome for which events are detected.
-
-    Returns
-    -------
-    None
-        The function modifies the events dictionary in place.
-    """
-    logging.info(f"Entering detect_events for {chr}")
-    start_time = time.time()
-    
-    c = 0
-    if chr not in events:
-        events[chr] = {}
-    for i, z_score in enumerate(z_score_results):
-        if abs(z_score) >= zscore_threshold:
-            if med_ratio != 0:
-                c = cn_level(float(ratio_par_mean_ratio_results[i]), chr)
-            if z_score >= 0:
-                c = 3
-            elif c == 2 and z_score < 0:
-                c = 1
-            pos_start = (i * step_size) + 1
-            pos_end = pos_start + window_size
-
-            events[chr][(pos_start, pos_end)] = c
-    #sys.stderr.write("\t events %s\n" % events)
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving detect_events for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-
-def segmentation(events, segment, chr):
-    """
-    Segment the detected events into contiguous regions with the same copy number level.
-
-    This function processes the detected events and groups contiguous regions with the same copy number level into segments.
-
-    Parameters
-    ----------
-    events : dict
-        A dictionary of detected events for each chromosome.
-    segment : dict
-        A dictionary to store the segmented regions.
-
-    Returns
-    -------
-    None
-        The function modifies the segment dictionary in place.
-    """
-    logging.info(f"Entering segmentation for {chr}")
-    start_time = time.time()
-    
-    for k in events.keys():
-        sys.stderr.write("\tfor chromosome %s\n" % k)
-        starts, oldPos, oldLevel = 1, 1, -1
-        for p in sorted(events[k].keys()):
-            level = events[k][p]
-            #sys.stderr.write("\t p %s\n" % p)
-            # new coordinates
-            if p[0] > (oldPos + window_size):
-                if (starts != 1) and (starts != p[0]):
-                    if k not in segment:
-                        segment[k] = {}
-                    index = str(starts) + "-" + str(oldPos)
-                    segment[k][index] = {}
-                    segment[k][index]["start"] = starts
-                    segment[k][index]["end"] = oldPos + window_size
-                    segment[k][index]["cn"] = oldLevel
-                    oldPos, starts, oldLevel = p[0], p[0], level
-                    continue
-                else:
-                    starts = p[0]
-            # case where it's contiguous but different level
-            if level != oldLevel:
-                if oldLevel != -1:
-                    if k not in segment:
-                        segment[k] = {}
-                    index = str(starts) + "-" + str(oldPos)
-                    segment[k][index] = {}
-                    segment[k][index]["start"] = starts
-                    segment[k][index]["end"] = oldPos
-                    segment[k][index]["cn"] = oldLevel
-                    oldPos, starts, oldLevel = p[0], p[0], level
-                    continue
-                else:
-                    oldLevel = level
-            oldPos, oldLevel = p[0], level
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving segmentation for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-
-def display_results_vcf(sample, segment, signal, lengthFilter, output_file, chr):
-    """
-    Generate a VCF file containing structural variant calls based on segmented regions and signal data.
-
-    This function creates a VCF (Variant Call Format) file containing structural variant calls derived from segmented regions and signal data. The structural variant type (DEL for deletion or DUP for duplication) is determined based on copy number levels and signal values. The resulting VCF file includes information about the chromosome, position, type of structural variant, copy number, and other relevant information.
-
-    Parameters
-    ----------
-    sample : str
-        The sample name to be included in the VCF file header.
-    segment : dict
-        A dictionary containing segmented regions with copy number information.
-    signal : dict
-        A dictionary containing signal data for each chromosome.
-    lengthFilter : int
-        The minimum length threshold for including variants in the VCF file.
-    output_file : str
-        The path to the output VCF file.
-
-    Returns
-    -------
-    None
-        This function writes the structural variant calls to the specified output file in VCF format.
-    """
-    global header_written
-    logging.info(f"Entering display_results_vcf for {chr}")
-    start_time = time.time()
-
-    with open(output_file, "a") as f:
-        if not header_written:
-            f.write("##fileformat=VCFv4.2\n")
-            f.write("##source=cnvcaller\n")
-            f.write(
-                '##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n'
-            )
-            f.write(
-                '##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">\n'
-            )
-            f.write(
-                '##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n'
-            )
-            f.write('##INFO=<ID=CN,Number=1,Type=Integer,Description="Copy number">\n')
-            f.write('##ALT=<ID=DEL,Description="Deletion">\n')
-            f.write('##ALT=<ID=DUP,Description="Duplication">\n')
-            f.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n')
-            f.write(
-                "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % (sample)
-            )
-            header_written = True
-
-        for k in segment.keys():
-            #sys.stderr.write("\tfor chromosome %s\n" % k)
-            for elt in sorted(segment[k].keys()):
-                #sys.stderr.write("\tfor elt %s\n" % elt)
-                if segment[k][elt]["start"] == segment[k][elt]["end"]:
-                    continue
-                if (segment[k][elt]["end"] - segment[k][elt]["start"]) < lengthFilter:
-                    continue
-                #sys.stderr.write("\t [segment[k][elt] %s\n" % [segment[k][elt]])
-                if int(signal[k][segment[k][elt]["start"]]) < 0:
-                    f.write(
-                        "%s\t%s\t.\tN\t<DEL>\t.\t.\tSVTYPE=DEL;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
-                        % (
-                            k,
-                            segment[k][elt]["start"],
-                            segment[k][elt]["end"],
-                            int(segment[k][elt]["cn"]),
-                        )
-                    )
-                else:
-                    f.write(
-                        "%s\t%s\t.\tN\t<DUP>\t.\t.\tSVTYPE=DUP;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
-                        % (
-                            k,
-                            segment[k][elt]["start"],
-                            segment[k][elt]["end"],
-                            int(segment[k][elt]["cn"]),
-                        )
-                    )
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving display_results_vcf for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-
-
-def calcul_distance(bamfile_handle, chr, seq_length):
-    logging.info(f"Entering calcul_distance for {chr}")
-    start_time = time.time()
-    
-    distances_list = []
-    
-    for read in bamfile_handle.fetch(chr):
-        if read.is_paired and not read.is_unmapped and not read.mate_is_unmapped:
-            # Calculate absolute template length
-            distance = abs(read.template_length)
-            if distance != 0:
-                distances_list.append(distance)
-            #sys.stderr.write("Chromosome : %s, distances_data : %s\n" % (chr, distances_data))
-    
-    distances_data = np.array(distances_list, dtype=np.int32)
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_distance for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-    
-    return distances_data
-
-def stats_distances(distances_data, chr):
-    logging.info(f"Entering stats_distances for {chr}")
-    start_time = time.time()
-    
-    std_distances = np.std(distances_data)
-    med_distances = np.median(distances_data)
-    
-    MIN_DISTANCE = med_distances - std_distances
-    MAX_DISTANCE = med_distances + std_distances
-
-    sys.stderr.write("Chromosome : %s, std_dist : %s, med_dist : %s\n" % (chr, std_distances, med_distances))
-    
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving stats_distances for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-    
-    # Return results
-    return MIN_DISTANCE, MAX_DISTANCE
-        
-def find_paired_split(chr, MIN_DISTANCE, MAX_DISTANCE, depth_data, output_file_pairs, output_file_splits, bamfile_handle, seq_length):
-    logging.info(f"Entering find_paired_split for {chr}")
-    start_time = time.time()
-    
-    coordinates_pairs, coordinates_splits = defaultdict(int), defaultdict(int)
-
-    unique_pairs, unique_splits = [], []
-
-
-    fragment_size = 1000
-    num_fragments = (seq_length // fragment_size) + 1
-    
-    fragment_abnormal_reads_pairs, fragment_abnormal_reads_splits, fragment_total_reads = np.zeros(num_fragments, dtype=int), np.zeros(num_fragments, dtype=int), np.zeros(num_fragments, dtype=int)
-
-    event_pairs, event_splits = [], []
-
-    for read in bamfile_handle.fetch(chr):
-        fragment_index = read.reference_start // fragment_size
-        fragment_total_reads[fragment_index] += 1
-        if read.is_paired and (read.mapping_quality > 0):
-            key = (read.reference_start, read.cigarstring)
-            if read.is_unmapped or read.mate_is_unmapped:
-                if coordinates_pairs[key] == 0:
-                    fragment_abnormal_reads_pairs[fragment_index] += 1
-                    unique_pairs.append((read, "Unmapped"))
-                coordinates_pairs[key] += 1
-            elif read.is_reverse == read.mate_is_reverse and read.reference_id == read.next_reference_id:
-                if coordinates_pairs[key] == 0:
-                    fragment_abnormal_reads_pairs[fragment_index] += 1
-                    unique_pairs.append((read, "INV"))
-                coordinates_pairs[key] += 1
-            elif abs(read.template_length) < MIN_DISTANCE:
-                if coordinates_pairs[key] == 0:
-                    fragment_abnormal_reads_pairs[fragment_index] += 1
-                    unique_pairs.append((read, "INS"))
-                coordinates_pairs[key] += 1
-            elif abs(read.template_length) > MAX_DISTANCE:
-                if coordinates_pairs[key] == 0:
-                    fragment_abnormal_reads_pairs[fragment_index] += 1
-                    unique_pairs.append((read, "DEL"))
-                coordinates_pairs[key] += 1    
-            elif read.reference_id != read.next_reference_id:
-                if coordinates_pairs[key] == 0:
-                    fragment_abnormal_reads_pairs[fragment_index] += 1
-                    unique_pairs.append((read, "TRN"))
-                coordinates_pairs[key] += 1
-            
-        # Lectures splits
-        elif read.cigartuples and any(cigar_op in [3, 4, 5] for cigar_op, _ in read.cigartuples):
-            if not read.has_tag("SA"):
-                continue
-            key = (read.reference_start, read.cigarstring)
-            if coordinates_splits[key] == 0:
-                fragment_abnormal_reads_splits[fragment_index] += 1
-                
-                # Classification des événements split
-                for cigar_op, cigar_len in read.cigartuples:
-                    if cigar_op == 3:  # Skipped region
-                        unique_splits.append((read, "SKIP"))
-                    elif cigar_op == 4:  # Soft clipping
-                        unique_splits.append((read, "SOFT_CLIP"))
-                    elif cigar_op == 5:  # Hard clipping
-                        unique_splits.append((read, "HARD_CLIP"))                
-                coordinates_splits[key] += 1
-
-
-    logging.info("Filtering pairs")
-    for read, event_type in unique_pairs:
-        fragment_index = read.reference_start // fragment_size
-        threshold = fragment_abnormal_reads_pairs[fragment_index] / fragment_total_reads[fragment_index]
-        if threshold > 0.2:
-            event_pairs.append((read, event_type))
-
-    logging.info("Filtering splits")
-    for read, event_type in unique_splits:
-        fragment_index = read.reference_start // fragment_size
-        threshold = fragment_abnormal_reads_splits[fragment_index] / fragment_total_reads[fragment_index]
-        if threshold > 0.2:
-            event_splits.append((read, event_type))
-
-    chromosome_coordinate_pairs_count = defaultdict(int)
-    reads_by_coordinate_pair = defaultdict(list)
-    tolerance = 400
-    
-    chromosome_coordinate_splits_count = defaultdict(int)
-    reads_by_coordinate_splits = defaultdict(list)
-    
-    
-    logging.info("Merging pairs")   
-    #Paired-reads
-    for read, event_type in event_pairs:
-        start_chr = read.reference_name
-        start_coord = read.reference_start
-        end_chr = read.next_reference_name
-        end_coord = read.next_reference_start
-        
-        found_close_pair = False
-        
-        for (chr_pair, coord_pair, existing_event_type), count in chromosome_coordinate_pairs_count.items():
-            (stored_start_chr, stored_end_chr) = chr_pair
-            (stored_start_coord, stored_end_coord) = coord_pair
-            
-            if (start_chr == stored_start_chr and end_chr == stored_end_chr and
-                are_coordinates_close(start_coord, stored_start_coord, tolerance) and
-                are_coordinates_close(end_coord, stored_end_coord, tolerance) and
-                event_type == existing_event_type):
-                
-                chromosome_coordinate_pairs_count[(chr_pair, coord_pair, event_type)] += 1
-                reads_by_coordinate_pair[(chr_pair, coord_pair, event_type)].append(read)
-                found_close_pair = True
-                break
-        
-        if not found_close_pair:
-            chr_pair = (start_chr, end_chr)
-            coord_pair = (start_coord, end_coord)
-            chromosome_coordinate_pairs_count[(chr_pair, coord_pair, event_type)] += 1
-            reads_by_coordinate_pair[(chr_pair, coord_pair, event_type)].append(read)
-    
-    logging.info("Merging splits")
-    #Split-reads     
-    for read, event_type in event_splits:
-        start_chr = read.reference_name
-        start_coord = read.reference_start
-        end_chr = read.next_reference_name
-        end_coord = read.next_reference_start
-        
-        found_close_splits = False
-        
-        for (chr_splits, coord_splits, existing_event_type), count in chromosome_coordinate_splits_count.items():
-            (stored_start_chr, stored_end_chr) = chr_splits
-            (stored_start_coord, stored_end_coord) = coord_splits
-            
-            if (start_chr == stored_start_chr and end_chr == stored_end_chr and
-                are_coordinates_close(start_coord, stored_start_coord, tolerance) and
-                are_coordinates_close(end_coord, stored_end_coord, tolerance) and
-                event_type == existing_event_type):
-                
-                existing_reads = reads_by_coordinate_splits[(chr_splits, coord_splits, event_type)]
-                if any(are_secondary_alignments_same(read, existing_read) for existing_read in existing_reads):
-                    chromosome_coordinate_splits_count[(chr_splits, coord_splits, event_type)] += 1
-                    reads_by_coordinate_splits[(chr_splits, coord_splits, event_type)].append(read)
-                    found_close_splits = True
-                    break
-        
-        if not found_close_splits:
-            chr_splits = (start_chr, end_chr)
-            coord_splits = (start_coord, end_coord)
-            chromosome_coordinate_splits_count[(chr_splits, coord_splits, event_type)] += 1
-            reads_by_coordinate_splits[(chr_splits, coord_splits, event_type)].append(read)
-
-    logging.info("Writting results for pairs and split")
-
-    with open(output_file_pairs, 'a') as p:
-        for (chr_pair, coord_pair, event_type), count in chromosome_coordinate_pairs_count.items():
-            reads_to_merge = reads_by_coordinate_pair[(chr_pair, coord_pair, event_type)]
-            start_chr, end_chr = chr_pair
-            min_start = min(read.reference_start for read in reads_to_merge)
-            max_start = max(read.reference_start for read in reads_to_merge)
-            min_end = min(read.next_reference_start for read in reads_to_merge)
-            max_end = max(read.next_reference_start for read in reads_to_merge)
-            depth_count = np.mean(depth_data[min_start:max_start+1])
-            read_count = count
-            
-            if read_count > (depth_count * 0.2):
-                p.write(f"{start_chr}\t{min_start}\t{max_start}\t{end_chr}\t{min_end}\t{max_end}\t{event_type}\t{read_count}\t{depth_count}\n")
-                
-    with open(output_file_splits, 'a') as s:
-        for (chr_splits, coord_splits, event_type), count in chromosome_coordinate_splits_count.items():
-            reads_to_merge = reads_by_coordinate_splits[(chr_splits, coord_splits, event_type)]
-            start_chr, end_chr = chr_splits
-            min_start = min(read.reference_start for read in reads_to_merge)
-            max_start = max(read.reference_start for read in reads_to_merge)
-            min_end = min(read.next_reference_start for read in reads_to_merge)
-            max_end = max(read.next_reference_start for read in reads_to_merge)
-            depth_count = np.mean(depth_data[min_start:max_start+1])
-            read_count = count
-            
-            if read_count > (depth_count * 0.2):
-                s.write(f"{start_chr}\t{min_start}\t{max_start}\t{end_chr}\t{min_end}\t{max_end}\t{event_type}\t{read_count}\t{depth_count}\n")
-
-            
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving find_paired_split for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-def are_coordinates_close(coord1, coord2, tolerance):
-    return abs(coord1 - coord2) <= tolerance
-
-def are_secondary_alignments_same(read1, read2):
-    return read1.get_tag("SA") == read2.get_tag("SA")
-             
-def main_calcul(
-    bamfile_path,
-    bamfile_handle,
-    chr,
-    seq_length,
-    window_size,
-    step_size,
-    zscore_threshold,
-    lengthFilter,
-    output_file,
-    output_file_pairs,
-    output_file_splits,
-    sample,
-):
-    """
-    Perform structural variant detection and VCF file generation.
-
-    This function orchestrates a series of computations and data manipulations,
-    leveraging GPU acceleration for performance improvements in genomic data analysis.
-
-    Parameters
-    ----------
-        bamfile_path : str
-            Path to the BAM file containing aligned reads.
-        chr : str
-            Chromosome identifier for which analysis is performed.
-        seq_length : int
-            Length of the chromosome sequence.
-        window_size : int
-            Size of the sliding window used for analysis.
-        step_size : int
-            Size of the step when moving the window along the chromosome.
-        zscore_threshold : float
-            Threshold value for detecting significant events based on Z-scores.
-        lengthFilter : int
-            Minimum length threshold for including variants in the VCF file.
-        output_file : str
-            Path to the output VCF file.
-        sample : str
-            Name of the sample being analyzed.
-
-    Returns
-    -------
-        None
-    """
-
-    sys.stderr.write("\t entering main_calcul\n")
-    global seq
-    events = {}
-    segment = {}
-    signal = {}
-
-    # Appeler les différentes fonctions
-    map_data = calcul_mappability(seq_length, mappability, chr)
-    gc_data = calcul_gc_content(seq_length, chr, seq)
-    #depth_data = calcul_depth_seq(seq_length, bamfile_path, chr)
-    #depth_data_count = calcul_depth_seq_count(seq_length, bamfile_path, chr)
-    #depth_data = calcul_depth_seq_copy(seq_length, bamfile_path, chr)
-    depth_data = calcul_depth_seq_samtools(seq_length, bamfile_path, chr)
-    distances_data = calcul_distance(bamfile_handle, chr, seq_length)
-    MIN_DISTANCE, MAX_DISTANCE = stats_distances(distances_data, chr)
-    find_paired_split(chr, MIN_DISTANCE, MAX_DISTANCE, depth_data, output_file_pairs, output_file_splits, bamfile_handle, seq_length)
-    
-    # Transférer le tableau NumPy vers CUDA
-    d_depth_data = cuda.mem_alloc(depth_data.nbytes)
-    cuda.memcpy_htod(d_depth_data, depth_data)
-    sys.stderr.write(
-        "\t d_depth_data : %s, %s\n"
-        % (d_depth_data, d_depth_data.as_buffer(sys.getsizeof(depth_data)))
-    )
-    
-    #d_depth_data_count = cuda.mem_alloc(depth_data_count.nbytes)
-    #cuda.memcpy_htod(d_depth_data_count, depth_data_count)
-    #sys.stderr.write(
-     #   "\t d_depth_data_count : %s, %s\n"
-      #  % (d_depth_data_count, d_depth_data_count.as_buffer(sys.getsizeof(depth_data_count)))
-    #)
-
-    d_gc_data = cuda.mem_alloc(gc_data.nbytes)
-    cuda.memcpy_htod(d_gc_data, gc_data)
-    sys.stderr.write(
-        "\t d_gc_data : %s, %s\n"
-        % (d_gc_data, d_gc_data.as_buffer(sys.getsizeof(gc_data)))
-    )
-
-    d_map_data = cuda.mem_alloc(map_data.nbytes)
-    cuda.memcpy_htod(d_map_data, map_data)
-    sys.stderr.write(
-        "\t d_map_data : %s, %s\n"
-        % (d_map_data, d_map_data.as_buffer(sys.getsizeof(map_data)))
-    )
-
-    # Calculer la taille de la grille et de bloc pour CUDA
-    block_size = num_cores
-    sys.stderr.write("\t blocksize (nb de threads) = %s\n" % (num_cores))
-    grid_size = int((int((seq_length - window_size) / step_size) + 1) / block_size) + 1
-    sys.stderr.write("\t grid_size = \n")
-
-    # Initialiser les tableaux pour stocker les résultats
-    depth_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de depth_results\n")
-
-    depth_results_count = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de depth_results_count\n")
-
-    gc_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de gc_results\n")
-
-    map_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de map_results\n")
-
-    depth_correction_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de depth_correction_results\n")
-
-    normalize_depth_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de normalize_depth_results\n")
-
-    ratio_par_window_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de ratio_par_window\n")
-
-    z_score_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de z_score_results\n")
-
-    ratio_par_mean_ratio_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de ratio_par_mean_ratio_results\n")
-
-    # Allouer de la mémoire pour les résultats sur le périphérique CUDA
-    d_depth_results = cuda.mem_alloc(depth_results.nbytes)
-    sys.stderr.write(
-        "\t d_depth_results = %s\n"
-        % d_depth_results.as_buffer(sys.getsizeof(d_depth_results))
-    )
-    sys.stderr.write("\t depth_results.nbytes = %s\n" % depth_results.nbytes)
-
-    d_depth_results_count = cuda.mem_alloc(depth_results_count.nbytes)
-    sys.stderr.write(
-        "\t d_depth_results_count = %s\n"
-        % d_depth_results_count.as_buffer(sys.getsizeof(d_depth_results_count))
-    )
-    sys.stderr.write("\t depth_results_count.nbytes = %s\n" % depth_results_count.nbytes)
-
-    d_gc_results = cuda.mem_alloc(gc_results.nbytes)
-    sys.stderr.write(
-        "\t d_gc_results = %s\n" % d_gc_results.as_buffer(sys.getsizeof(d_gc_results))
-    )
-    sys.stderr.write("\t gc_results.nbytes = %s\n" % gc_results.nbytes)
-
-    d_map_results = cuda.mem_alloc(map_results.nbytes)
-    sys.stderr.write(
-        "\t d_map_results = %s\n"
-        % d_map_results.as_buffer(sys.getsizeof(d_map_results))
-    )
-    sys.stderr.write("\t map_results.nbytes = %s\n" % map_results.nbytes)
-
-    d_depth_correction_results = cuda.mem_alloc(depth_correction_results.nbytes)
-    sys.stderr.write(
-        "\t d_depth_correction_results = %s\n"
-        % d_depth_correction_results.as_buffer(
-            sys.getsizeof(d_depth_correction_results)
-        )
-    )
-    sys.stderr.write(
-        "\t depth_correction_results.nbytes = %s\n" % depth_correction_results.nbytes
-    )
-
-    d_normalize_depth_results = cuda.mem_alloc(normalize_depth_results.nbytes)
-    sys.stderr.write(
-        "\t d_normalize_depth_results = %s\n"
-        % d_normalize_depth_results.as_buffer(sys.getsizeof(d_normalize_depth_results))
-    )
-    sys.stderr.write(
-        "\t normalize_depth_results.nbytes = %s\n" % normalize_depth_results.nbytes
-    )
-
-    d_ratio_par_window_results = cuda.mem_alloc(ratio_par_window_results.nbytes)
-    sys.stderr.write(
-        "\t d_ratio_par_window_results = %s\n"
-        % d_ratio_par_window_results.as_buffer(
-            sys.getsizeof(d_ratio_par_window_results)
-        )
-    )
-    sys.stderr.write(
-        "\t ratio_par_window_results.nbytes = %s\n" % ratio_par_window_results.nbytes
-    )
-
-    d_z_score_results = cuda.mem_alloc(z_score_results.nbytes)
-    sys.stderr.write(
-        "\t d_z_score_results = %s\n"
-        % d_z_score_results.as_buffer(sys.getsizeof(d_z_score_results))
-    )
-    sys.stderr.write("\t z_score_results.nbytes = %s\n" % z_score_results.nbytes)
-
-    d_ratio_par_mean_ratio_results = cuda.mem_alloc(ratio_par_mean_ratio_results.nbytes)
-    sys.stderr.write(
-        "\t d_ratio_par_mean_ratio_results = %s\n"
-        % d_ratio_par_mean_ratio_results.as_buffer(
-            sys.getsizeof(d_ratio_par_mean_ratio_results)
-        )
-    )
-    sys.stderr.write(
-        "\t ratio_par_mean_ratio_results.nbytes = %s\n"
-        % ratio_par_mean_ratio_results.nbytes
-    )
-
-    # Appeler la fonction de calcul de profondeur avec CUDA
-    calcul_depth_kernel_cuda(
-        d_depth_data,
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_depth_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction calcul_depth_kernel_cuda\n")
-
-    #calcul_depth_count_kernel_cuda(
-     #   d_depth_data_count,
-      #  np.int32(seq_length),
-       # np.int32(window_size),
-        #np.int32(step_size),
-        #d_depth_results,
-        #block=(block_size, 1, 1),
-        #grid=(grid_size, 1),
-    #)
-    #sys.stderr.write("\t appel fonction calcul_depth_count_kernel_cuda\n")
-
-    calcul_gc_kernel_cuda(
-        d_gc_data,
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_gc_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction calcul_gc_kernel_cuda\n")
-
-    calcul_map_kernel_cuda(
-        d_map_data,
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_map_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction calcul_map_kernel_cuda\n")
-
-    calcul_depth_correction_kernel_cuda(
-        d_depth_results,
-        d_map_results,
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_depth_correction_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction calcul_depth_correction_kernel_cuda\n")
-
-    context.synchronize()
-
-    # Copier les résultats depuis le périphérique CUDA vers l'hôte
-
-    cuda.memcpy_dtoh(depth_results, d_depth_results)  # cuda.memcpy_dtoh(dest, src)
-    sys.stderr.write(
-        "\t Copie les resultats du GPU (d_depth_results) vers le CPU (depth_results)\n"
-    )
-    
-    #cuda.memcpy_dtoh(depth_results_count, d_depth_results_count)
-    #sys.stderr.write(
-     #   "\t Copie les resultats du GPU (d_depth_results_count) vers le CPU (depth_results_count)\n"
-    #)
-
-    cuda.memcpy_dtoh(gc_results, d_gc_results)  # cuda.memcpy_dtoh(dest, src)
-    sys.stderr.write(
-        "\t Copie les resultats du GPU (d_gc_results) vers le CPU (gc_results)\n"
-    )
-
-    cuda.memcpy_dtoh(map_results, d_map_results)  # cuda.memcpy_dtoh(dest, src)
-    sys.stderr.write(
-        "\t Copie les resultats du GPU (d_map_results) vers le CPU (map_results)\n"
-    )
-
-    cuda.memcpy_dtoh(
-        depth_correction_results, d_depth_correction_results
-    )  # cuda.memcpy_dtoh(dest, src)
-    sys.stderr.write(
-        "\t Copie les resultats du GPU (d_depth_correction_results) vers le CPU (depth_correction_results)\n"
-    )
-
-    ### NORMALISATION###
-
-    # Appel fonctions medianes
-    sys.stderr.write("\t appel fonctions calcul medianes\n")
-    m = calcul_med_total(depth_correction_results, chr)
-    gc_to_median = calcul_med_same_gc(gc_results, depth_correction_results, chr)
-
-    # Convertir gc_to_median en un tableau NumPy pour le transfert vers CUDA
-    sys.stderr.write("\t Conversion medianes en tableau numpy\n")
-    gc_to_median_array = np.zeros(int(max(gc_results)) + 1, dtype=np.float32)
-    for gc, median in gc_to_median.items():
-        gc_to_median_array[int(gc)] = median
-
-    # Allouer de la memoire pour gc_to_median sur le peripherique CUDA
-    sys.stderr.write("\t Allocation mémoire médianes GPU\n")
-    d_gc_to_median = cuda.mem_alloc(gc_to_median_array.nbytes)
-    cuda.memcpy_htod(d_gc_to_median, gc_to_median_array)
-
-    # Appeler le kernel de normalisation
-    normalize_depth_kernel_cuda(
-        d_depth_correction_results,
-        d_gc_results,
-        np.float32(m),
-        d_gc_to_median,
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_normalize_depth_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction normalize_depth_kernel_cuda\n")
-
-    context.synchronize()
-
-    # Copier les resultats normalises depuis le peripherique CUDA vers l'hote
-    cuda.memcpy_dtoh(normalize_depth_results, d_normalize_depth_results)
-
-    ### Ratio par window###
-
-    # Appel fonction moyenne
-    sys.stderr.write("\t appel fonction calcul moyenne\n")
-    mean_chr = calcul_moy_totale(normalize_depth_results, chr)
-
-    # Appeler le kernel de ratio
-    ratio_par_window_kernel_cuda(
-        d_normalize_depth_results,
-        np.float32(mean_chr),
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_ratio_par_window_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction ratio_par_window_kernel_cuda\n")
-
-    context.synchronize()
-
-    # Copier les resultats ratio depuis le peripherique CUDA vers l'hote
-    cuda.memcpy_dtoh(ratio_par_window_results, d_ratio_par_window_results)
-
-    # Création table à partir du ratio
-    mean_ratio, std_ratio, med_ratio = compute_mean_std_med(ratio_par_window_results, chr)
-
-    # Appeler le kernel de calcule du ratio divisé par le ratio moyen
-    ratio_par_mean_ratio_kernel_cuda(
-        d_ratio_par_window_results,
-        np.float32(mean_ratio),
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_ratio_par_mean_ratio_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction ratio_par_mean_ratio_kernel_cuda\n")
-
-    # Appeler le kernel de Z-score
-    z_score_kernel_cuda(
-        d_ratio_par_window_results,
-        np.float32(mean_ratio),
-        np.float32(std_ratio),
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_z_score_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction z_score_kernel_cuda\n")
-
-    context.synchronize()
-
-    # Copier les resultats ratio depuis le peripherique CUDA vers l'hote
-    cuda.memcpy_dtoh(ratio_par_mean_ratio_results, d_ratio_par_mean_ratio_results)
-    cuda.memcpy_dtoh(z_score_results, d_z_score_results)
-
-    # Appel fonction create signal
-    create_signal(signal, chr, z_score_results, step_size)
-
-    # Appel fonction detect events
-    detect_events(
-        z_score_results,
-        zscore_threshold,
-        events,
-        med_ratio,
-        ratio_par_mean_ratio_results,
-        chr,
-    )
-
-    # Appel fonction segmentation
-    segmentation(events, segment, chr)
-
-    # Appel fonction display_results_vcf
-    display_results_vcf(sample, segment, signal, lengthFilter, output_file, chr)
-
-    #Ecrire les résultats dans le fichier de sortie
-    #with open(output_file, 'a') as f:
-     #   sys.stderr.write("\t ecriture des fichiers\n")
-      #  for i, (depth_data, depth_correction, depth_normalize_val, gc_data, map_data, ratio, ratio_mean_ratio, z_score) in enumerate(zip(depth_results, depth_correction_results, normalize_depth_results, gc_results, map_results, ratio_par_window_results, ratio_par_mean_ratio_results, z_score_results)):
-       #     pos_start = (i * step_size) + 1
-        #    pos_end = pos_start + window_size
-         #   f.write(f"{chr}\t{pos_start}\t{pos_end}\t{depth_data}\t{depth_correction}\t{depth_normalize_val}\t{gc_data}\t{map_data}\t{ratio}\t{ratio_mean_ratio}\t{z_score}\n")
-
-# Programme principal
-# Calcul nombre de coeurs max pour le GPU
-header_written = False
-
-sample = get_sample_name(bamfile_path)
-device = cuda.Device(0)
-attributes = device.get_attributes()
-num_cores = attributes[1]
-print("Nombre de CPU: ", multiprocessing.cpu_count())
-print(f"Nombre de coeurs max GPU: {num_cores}")
-
-gc_file = "/work/gad/shared/pipeline/grch38/index/grch38_essential.fa"
-mappability_file = "/work/gad/shared/pipeline/grch38/cnv/k100.Umap.MultiTrackMappability.bedgraph"
-seq = parse_fasta(gc_file)
-mappability = dico_mappabilite(mappability_file)
-#bamfile_path_2 = "/work/gad/shared/analyse/test/cnvGPU/test_scalability/dijen1000.bam"
-
-
-if num_chr == "ALL" :
-    with pysam.AlignmentFile(bamfile_path, "rb") as bamfile_handle:
-        for i, seq_length in enumerate(bamfile_handle.lengths):
-            chr = bamfile_handle.references[i]
-            
-            #Exclude chrM
-            if chr == "chrM":
-                continue
-                
-            sys.stderr.write("Chromosome : %s, seq length : %s\n" % (chr, seq_length))
-
-            # Appeler la fonction de calcul de la profondeur moyenne pour ce chromosome
-            main_calcul(
-                bamfile_path,
-                bamfile_handle,
-                chr,
-                seq_length,
-                window_size,
-                step_size,
-                zscore_threshold,
-                lengthFilter,
-                output_file,
-                output_file_pairs,
-                output_file_splits,
-                sample,
-            )
-
-        logging.basicConfig(
-            filename="%s" % (logfile),
-            filemode="a",
-            level=logging.INFO,
-            format="%(asctime)s %(levelname)s - %(message)s",
-        )
-        logging.info("end")
-
-else :
-    with pysam.AlignmentFile(bamfile_path, "rb") as bamfile_handle:
-        seq_length = bamfile_handle.lengths[int(num_chr) - 1]
-        chr = bamfile_handle.references[int(num_chr) - 1]
-        sys.stderr.write("Chromosome : %s, seq length : %s\n" % (chr, seq_length))
-
-        # Appeler la fonction de calcul de la profondeur moyenne pour ce chromosome
-        main_calcul(
-            bamfile_path,
-            bamfile_handle,
-            chr,
-            seq_length,
-            window_size,
-            step_size,
-            zscore_threshold,
-            lengthFilter,
-            output_file,
-            output_file_pairs,
-            output_file_splits,
-            sample,
-        )
-
-    logging.basicConfig(
-        filename="%s" % (logfile),
-        filemode="a",
-        level=logging.INFO,
-        format="%(asctime)s %(levelname)s - %(message)s",
-    )
-    logging.info("end")
diff --git a/test_gpu_time_normalisation.py b/test_gpu_time_normalisation.py
deleted file mode 100644
index 8f0c681..0000000
--- a/test_gpu_time_normalisation.py
+++ /dev/null
@@ -1,1893 +0,0 @@
-import pysam
-import sys
-import getopt
-import logging
-import numpy as np
-from numpy.polynomial import Polynomial
-import pycuda.driver as cuda
-import pycuda.autoinit
-from pycuda.compiler import SourceModule
-import pycuda.gpuarray as gpuarray
-from pycuda.autoinit import context
-import multiprocessing
-from multiprocessing import Process, Queue
-import time
-import subprocess
-import pandas as pd
-import scipy
-
-# Options
-
-
-def parse_arguments():
-    """
-    Parse command-line arguments.
-
-    This function parses the command-line arguments provided to the script and extracts the values for various parameters.
-
-    Parameters
-    ----------
-    None
-
-    Returns
-    -------
-    tuple
-        A tuple containing the following elements:
-        bamfile_path : str or None
-            The path to the BAM file.
-        window_size : int or None
-            The size of the window.
-        step_size : int or None
-            The step size for the analysis.
-        zscore_threshold : float or None
-            The threshold for the Z-score.
-        lengthFilter : int or None
-            The filter for length.
-        output_file : str or None
-            The path to the output file.
-        logfile : str or None
-            The path to the log file.
-
-        Each element can be None if the corresponding argument was not provided.
-    """
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "b:c:w:s:z:l:t:o:e:")
-        (
-            bamfile_path,
-            num_chr,
-            window_size,
-            step_size,
-            zscore_threshold,
-            lengthFilter,
-            output_file,
-            logfile,
-        ) = (None, None, None, None, None, None, None, None)
-        for opt, arg in opts:
-            if opt in ("-b"):
-                bamfile_path = arg
-            if opt in ("-c"):
-                num_chr = arg
-            if opt in ("-w"):
-                window_size = int(arg)
-            if opt in ("-s"):
-                step_size = int(arg)
-            if opt in ("-z"):
-                zscore_threshold = float(arg)
-            if opt in ("-l"):
-                lengthFilter = int(arg)
-            if opt in ("-o"):
-                output_file = arg
-            if opt in ("-e"):
-                logfile = arg
-        return (
-            bamfile_path,
-            num_chr,
-            window_size,
-            step_size,
-            zscore_threshold,
-            lengthFilter,
-            output_file,
-            logfile,
-        )
-    except getopt.GetoptError:
-        print("Invalid argument")
-        sys.exit(1)
-
-
-if __name__ == "__main__":
-    (
-        bamfile_path,
-        num_chr,
-        window_size,
-        step_size,
-        zscore_threshold,
-        lengthFilter,
-        output_file,
-        logfile,
-    ) = parse_arguments()
-    logging.basicConfig(
-        filename="%s" % (logfile),
-        filemode="a",
-        level=logging.INFO,
-        format="%(asctime)s %(levelname)s - %(message)s",
-    )
-    logging.info("start")
-    global seq
-
-# Code CUDA
-mod = SourceModule(
-    """
-//Kernel pour calculer la profondeur moyenne brute
-
-__global__ void calcul_depth_kernel(int *depth_data, int seq_length, int window_size, int step_size, float *depth_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        int pos_start = (idx * step_size) + 1;
-        int pos_end = pos_start + window_size;
-        int count_reads = 0;
-
-                
-        for (int i = pos_start; i < pos_end; i++) {
-            count_reads += depth_data[i];
-        }
-
-        float avg_depth = (float)count_reads / window_size;
-        depth_results[idx] = avg_depth;
-    }
-}
-
-//Kernel pour calculer le GC content
-
-__global__ void calcul_gc_kernel(char *gc_data, int seq_length, int window_size, int step_size, float *gc_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        int pos_start = (idx * step_size) + 1;
-        int pos_end = pos_start + window_size;
-        int gc_count = 0;
-    
-        //printf(gc_data);
-        //printf(" ");
-        
-        for (int i = pos_start; i <= pos_end; ++i) {
-            if ((gc_data[i] == 'G') or (gc_data[i] == 'C') or (gc_data[i] == 'g') or (gc_data[i] == 'c')) {
-                //printf(&gc_data[i]);
-                //printf(" ");
-                gc_count++;
-            }
-        }
-        
-        float avg_gc = ((float)gc_count / window_size) * 100;
-        gc_results[idx] = avg_gc;
-    }
-}
-
-// Kernel pour calculer la mappabilite moyenne ponderee
-
-__global__ void calcul_map_kernel(float *map_data, int seq_length, int window_size, int step_size, float *map_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        int pos_start = (idx * step_size) + 1;
-        int pos_end = pos_start + window_size;
-        float weighted_sum = 0.0;
-        float total_weight = 0.0;
-
-        for (int i = pos_start; i <= pos_end; ++i) {
-            float weight = map_data[i];
-            weighted_sum += weight;
-            total_weight += 1;
-        }
-
-        float avg_map = weighted_sum / total_weight;
-        map_results[idx] = avg_map;
-    }
-}
-
-// Kernel pour filtrer selon le gc
-
-__global__ void filter_read_gc_kernel(float *gc_results, float *depth_results, int n, float *gc_35, float *gc_40, float *gc_45, float *gc_50, float *gc_55) {
-    int idx = blockIdx.x * blockDim.x + threadIdx.x;
-
-    if (idx < n) {
-        float gc_value = gc_results[idx];
-        float depth_value = depth_results[idx];
-        int gc_value_rounded = round(gc_value);
-
-        if (gc_value_rounded == 35) {
-            //printf(gc_value_rounded)
-            int pos = atomicAdd((int*)&gc_35[0], 1);
-            gc_35[pos + 1] = depth_value;
-        } else if (gc_value_rounded == 40) {
-            int pos = atomicAdd((int*)&gc_40[0], 1);
-            gc_40[pos + 1] = depth_value;
-        } else if (gc_value_rounded == 45) {
-            int pos = atomicAdd((int*)&gc_45[0], 1);
-            gc_45[pos + 1] = depth_value;
-        } else if (gc_value_rounded == 50) {
-            int pos = atomicAdd((int*)&gc_50[0], 1);
-            gc_50[pos + 1] = depth_value;
-        } else if (gc_value_rounded == 55) {
-            int pos = atomicAdd((int*)&gc_55[0], 1);
-            gc_55[pos + 1] = depth_value;
-        }
-    }
-}
-
-
-// Kernel pour calculer la lecture de profondeur corrigee par la mappabilitee
-
-__global__ void calcul_depth_correction_kernel(float *depth_results, float *map_results, int seq_length, int window_size, int step_size, float *depth_correction_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float avg_depth = depth_results[idx];
-        float avg_map = map_results[idx];
-
-        // Verification si avg_map est egal a 0 pour eviter la division par 0
-        float depth_correction = (avg_map != 0.0f) ? (avg_depth / avg_map) : 0.0f;
-        depth_correction_results[idx] = depth_correction;
-    }
-}
-
-
-// Kernel pour normaliser la profondeur corrigee
-
-__global__ void normalize_depth_kernel(float *depth_correction, float *gc_results, float m, float *gc_to_median, int seq_length, int window_size, int step_size, float *depth_normalize) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float mGC = gc_to_median[(int)gc_results[idx]];
-        
-        // Verification si mGC est egal a 0 pour eviter la division par 0
-        float depth_normalize_val = (mGC != 0.0f) ? (depth_correction[idx] * m / mGC) : 0.0f;
-        depth_normalize[idx] = depth_normalize_val;
-    }
-}
-
-// Kernel pour calculer le ratio par window
-
-__global__ void ratio_par_window_kernel(float *depth_results, float mean_chr, int seq_length, int window_size, int step_size, float *ratio_par_window_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float ratio = depth_results[idx] / mean_chr;
-        ratio_par_window_results[idx] = ratio;
-    }
-}
-
-// Kernel pour calculer le ratio normalise par window
-
-__global__ void ratio_par_window_norm_kernel(float *normalize_depth_results, float mean_chr_norm, int seq_length, int window_size, int step_size, float *ratio_par_window_norm_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float ratio_norm = normalize_depth_results[idx] / mean_chr_norm;
-        ratio_par_window_norm_results[idx] = ratio_norm;
-    }
-}
-
-// Kernel pour calculer le z_score
-
-__global__ void z_score_kernel(float *ratio_norm, float mean_ratio, float std_ratio, int seq_length, int window_size, int step_size, float *z_score_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float value = ratio_norm[idx];
-
-        // Verification si la valeur est inf ou NaN, et remplacer par 0 si c est le cas
-        if (isinf(value) || isnan(value)) {
-            value = 0.0f;
-        }
-
-        // Calcul du Z-score
-        float z_score = (value - mean_ratio) / std_ratio;
-        z_score_results[idx] = z_score;
-    }
-}
-
-// Kernel pour calculer le ratio divise par le ratio moyen
-
-__global__ void ratio_par_mean_ratio_kernel(float *ratio_norm, float mean_ratio, int seq_length, int window_size, int step_size, float *ratio_par_mean_ratio_results) {
-    int idx = threadIdx.x + blockIdx.x * blockDim.x;
-
-    if (idx < seq_length - window_size + 1) {
-        float value = ratio_norm[idx];
-
-        // Verification si la valeur est inf ou NaN et remplacement par 0 si c est le cas
-        if (isinf(value) || isnan(value)) {
-            value = 0.0f;
-        }
-
-        // Calcul du ratio par rapport au ratio moyen
-        float ratio_mean_ratio = value / mean_ratio;
-        ratio_par_mean_ratio_results[idx] = ratio_mean_ratio;
-    }
-}
-
-"""
-)
-
-# Obtention de la fonction de kernel compilée
-calcul_depth_kernel_cuda = mod.get_function("calcul_depth_kernel")
-calcul_gc_kernel_cuda = mod.get_function("calcul_gc_kernel")
-calcul_map_kernel_cuda = mod.get_function("calcul_map_kernel")
-filter_read_gc_kernel_cuda = mod.get_function("filter_read_gc_kernel")
-calcul_depth_correction_kernel_cuda = mod.get_function("calcul_depth_correction_kernel")
-normalize_depth_kernel_cuda = mod.get_function("normalize_depth_kernel")
-ratio_par_window_kernel_cuda = mod.get_function("ratio_par_window_kernel")
-ratio_par_window_norm_kernel_cuda = mod.get_function("ratio_par_window_norm_kernel")
-z_score_kernel_cuda = mod.get_function("z_score_kernel")
-ratio_par_mean_ratio_kernel_cuda = mod.get_function("ratio_par_mean_ratio_kernel")
-
-
-def merge_intervals(intervals):
-    """
-    Merge overlapping intervals with the same score.
-
-    This function takes a list of intervals and merges those that have the same score.
-
-    Parameters
-    ----------
-    intervals : list of tuples
-        A list where each element is a tuple (start, end, score).
-        The intervals should be sorted by start position.
-
-    Returns
-    -------
-    list of tuples
-        A list of merged intervals (start, end, score) where overlapping intervals with the same score are combined.
-    """
-    merged = []
-    start, end, score = intervals[0]
-    for interval in intervals[1:]:
-        if interval[2] == score:
-            end = interval[1]
-        else:
-            merged.append((start, end, score))
-            start, end, score = interval
-    merged.append((start, end, score))
-    
-    return merged
-
-
-def dico_mappabilite(mappability_file):
-    """
-    Create a dictionary of mappability scores from a file.
-
-    This function reads a mappability file and creates a dictionary with chromosomes as keys and mappability scores as values.
-
-    Parameters
-    ----------
-    mappability_file : str
-        The path to the mappability file. Each line in the file should have the format:
-        chromosome, start_pos, end_pos, score.
-
-    Returns
-    -------
-    dict
-        A dictionary where keys are chromosome names and values are another dictionary with start positions as keys
-        and mappability scores as values.
-    """
-    logging.info("Entering dico_mappability")
-    start_time = time.time()
-
-    mappability_dico = {}
-
-    logging.info(" In dico_mappability : Open file and create the dico")
-    start_time_1 = time.time()
-
-    with open(mappability_file, "r") as f:
-        for line in f:
-            fields = line.strip().split("\t")
-            if len(fields) < 4:
-                continue
-
-            chromosome = fields[0]
-            start_pos = int(fields[1])
-            end_pos = int(fields[2])
-            score = float(fields[3])
-
-            if chromosome not in mappability_dico:
-                mappability_dico[chromosome] = []
-
-            mappability_dico[chromosome].append((start_pos, end_pos, score))
-
-    end_time_1 = time.time()
-    elapsed_time_1 = end_time_1 - start_time_1
-    logging.info(f"In dico_mappability : Leaving file and create the dico (Time taken: {elapsed_time_1:.4f} seconds)")
-
-    # Add position 0 for each chromosome
-    logging.info(" In dico_mappability : Add position 0 for each chromosome")
-    start_time_2 = time.time()
-
-    for chromosome, intervals in mappability_dico.items():
-        if intervals[0][0] != 0:
-            mappability_dico[chromosome].insert(0, (0, intervals[0][0], 0))
-
-    end_time_2 = time.time()
-    elapsed_time_2 = end_time_2 - start_time_2
-    logging.info(f"In dico_mappability : Ending add position 0 for each chromosome (Time taken: {elapsed_time_2:.4f} seconds)")
-
-    # Merge intervals with the same score
-    logging.info(" In dico_mappability : Merge intervals with the same score")
-    start_time_3 = time.time()
-
-    for chromosome, intervals in mappability_dico.items():
-        merged_intervals = merge_intervals(intervals)
-        mappability_dico[chromosome] = {
-            start: score for start, _, score in merged_intervals
-        }
-    
-    end_time_3 = time.time()
-    elapsed_time_3 = end_time_3 - start_time_3
-    logging.info(f"In dico_mappability : Ending merge intervals with the same score (Time taken: {elapsed_time_2:.4f} seconds)")
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving dico_mappability (Time taken: {elapsed_time:.4f} seconds)")
-
-    return mappability_dico
-
-
-def calcul_mappability(seq_length, mappability, chr):
-    """
-    Calculate mappability array for a given sequence length and chromosome.
-
-    This function generates an array of mappability scores for a specific chromosome and sequence length.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    mappability : dict
-        A dictionary containing mappability information, with chromosomes as keys and dictionaries of start positions
-        and scores as values.
-    chr : str
-        The chromosome for which the mappability is calculated.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of mappability scores for the given sequence length.
-    """
-    logging.info(f"Entering calcul_mappability for {chr}")
-    start_time = time.time()
-    
-    map_data = np.zeros(seq_length, dtype=np.float32)
-    sorted_keys = sorted(mappability[chr].keys())
-
-    prev_bound = 0
-    prev_mappability = 0
-
-    logging.info(f"In calcul_mappability : Entering for bound in sorted_keys for {chr}")
-    start_time_1 = time.time()
-
-    for bound in sorted_keys:
-        for i in range(prev_bound, min(seq_length, bound)):
-            map_data[i] = prev_mappability
-        prev_bound = bound
-        prev_mappability = mappability[chr][bound]
- 
-    end_time_1 = time.time()
-    elapsed_time_1 = end_time_1 - start_time_1
-    logging.info(f"In calcul_mappability : Leaving for bound in sorted_keys for {chr} (Time taken: {elapsed_time_1:.4f} seconds)")
-
-    # Fill remaining positions if sequence length exceeds last bound
-    logging.info(f"In calcul_mappability : Entering for i in range(prev_bound, seq_length) for {chr}")
-    start_time_2 = time.time()
-
-    for i in range(prev_bound, seq_length):
-        map_data[i] = prev_mappability
-
-    end_time_2 = time.time()
-    elapsed_time_2 = end_time_2 - start_time_2
-    logging.info(f"In calcul_mappability : Leaving for i in range(prev_bound, seq_length) for {chr} (Time taken: {elapsed_time_2:.4f} seconds)")
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving dico_mappability for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return map_data
-
-
-def parse_fasta(gc_file):
-    """
-    Parse a FASTA file and extract sequences.
-
-    This function reads a FASTA file and extracts the sequences, storing them in a dictionary with headers as keys.
-
-    Parameters
-    ----------
-    gc_file : str
-        The path to the FASTA file. The file should be in standard FASTA format.
-
-    Returns
-    -------
-    dict
-        A dictionary where keys are sequence headers and values are the corresponding sequences.
-    """
-    logging.info("Entering parse_fasta")
-    start_time = time.time()
-    
-    sequences = {}
-    with open(gc_file, "r") as f:
-        data = f.read().split(">")
-        for entry in data[1:]:
-            lines = entry.split("\n")
-            header = lines[0]
-            sequence = "".join(lines[1:])
-            sequences[header] = sequence
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving parse_fasta (Time taken: {elapsed_time:.4f} seconds)")
-
-    return sequences
-
-
-def calcul_gc_content(seq_length, chr, seq):
-    """
-    Calculate the GC content of a given sequence.
-
-    This function generates an array representing the GC content for a specific chromosome and sequence length.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    chr : str
-        The chromosome for which the GC content is calculated.
-    seq : dict
-        A dictionary containing sequences, with chromosome names as keys and sequences as values.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of bytes ('S' dtype) representing the GC content for the given sequence length.
-    """
-    logging.info(f"Entering calcul_gc_content for {chr}")
-    start_time = time.time()
-
-    gc_data = np.zeros(seq_length, dtype="S")
-    for i in range(len(seq[chr])):
-        gc_data[i] = seq[chr][i]
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_gc_content for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return gc_data
-
-
-def calcul_depth_seq(seq_length, bamfile_path, chr):
-    """
-    Calculate the sequencing depth for a given chromosome.
-
-    This function computes the sequencing depth for a specific chromosome and sequence length using a BAM file.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    bamfile_path : pysam.AlignmentFile
-        The path to the BAM file opened with pysam.AlignmentFile.
-    chr : str
-        The chromosome for which the depth is calculated.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of integers representing the sequencing depth for the given sequence length.
-    """
-    logging.info(f"Entering calcul_depth_seq for {chr}")
-    start_time = time.time()
-    
-    depth_data = np.zeros(seq_length, dtype=np.int32)
-    for pileupcolumn in bamfile_path.pileup(reference = chr):
-        pos = pileupcolumn.reference_pos
-        if pos >= seq_length:
-            break
-        depth_data[pos] = pileupcolumn.nsegments
-
-    #depth_data = bamfile_path.pileup().nsegments
-
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return depth_data
-    
-def calcul_depth_seq_samtools(seq_length, bamfile_path, chr, num_threads=12):
-    """
-    Calculate the sequencing depth for a given chromosome using a parallelized bash script.
-
-    Parameters
-    ----------
-    seq_length : int
-        The length of the sequence.
-    bamfile_path : str
-        The path to the BAM file.
-    chr : str
-        The chromosome for which the depth is calculated.
-    num_threads : int
-        The number of threads to use for parallel processing.
-
-    Returns
-    -------
-    numpy.ndarray
-        An array of integers representing the sequencing depth for the given sequence length.
-    """
-    logging.info(f"Entering calcul_depth_seq for {chr}")
-    start_time = time.time()
-    
-    #Define the output file for depth results
-    output_file_1 = f"/work/gad/shared/analyse/test/cnvGPU/test_scalability/tmp_depth_{chr}.out"
-    
-    # Run the parallelized bash script to calculate depth
-    p = subprocess.Popen(["/work/gad/shared/analyse/test/cnvGPU/test_scalability/samtools_test_by_chr_multithreading_log.sh %s %s %s %s" % (bamfile_path, chr, output_file_1, num_threads)], shell = True, executable = "/bin/bash")
-    
-    p.communicate()
-    
-    # Create the numpy array for depth data
-    depth_data = np.zeros(seq_length, dtype=np.int32)
-
-   # Read the output file and fill the numpy array
-    try:
-        df = pd.read_csv(output_file_1, delim_whitespace=True, header=None, names=['chr', 'pos', 'depth'])
-        df['pos'] -= 1  # Convert to 0-based index
-        df = df[df['pos'] < seq_length]  # Ensure positions are within sequence length
-        depth_data[df['pos']] = df['depth']
-    except Exception as e:
-        logging.error(f"Error reading depth file: {e}")
-    
-    # Clean up the output file
-    subprocess.run(['rm', output_file_1])
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    return depth_data
-
-def calcul_med_total(depth_correction_results, chr):
-    """
-    Calculate the median of non-zero depth correction results.
-
-    This function filters out zero values from the depth correction results and computes the median of the remaining values.
-
-    Parameters
-    ----------
-    depth_correction_results : list or numpy.ndarray
-        A list or array of depth correction values.
-
-    Returns
-    -------
-    float
-        The median of the non-zero depth correction values, or 0 if no non-zero values are present.
-    """
-    logging.info(f"Entering calcul_med_total for {chr}")
-    start_time = time.time()
-    
-    depth_correction_results = np.array(depth_correction_results)
-    # Filter results to remove zero values
-    non_zero_results = depth_correction_results[depth_correction_results != 0]
-    # Calculate the median of non-zero results
-    m = np.median(non_zero_results) if non_zero_results.size > 0 else 0
-
-    sys.stderr.write("Chromosome : %s, med_chr : %s\n" % (chr, m))
-    
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_med_total for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-    
-    return m
-
-
-def calcul_med_same_gc(gc_results, depth_correction_results, chr):
-    """
-    Calculate the median depth correction for each unique GC content value.
-
-    This function computes the median depth correction values for each unique GC content value, filtering out zero values.
-
-    Parameters
-    ----------
-    gc_results : list or numpy.ndarray
-        A list or array of GC content values.
-    depth_correction_results : list or numpy.ndarray
-        A list or array of depth correction values.
-
-    Returns
-    -------
-    dict
-        A dictionary where keys are unique GC content values and values are the median depth correction for those GC values.
-    """
-    logging.info(f"Entering calcul_med_same_gc for {chr}")
-    start_time = time.time()
-    
-    mGC = []
-    depth_correction_results_array = np.array(depth_correction_results)
-    unique_gc_values = np.unique(gc_results)
-
-    for gc in unique_gc_values:
-        indices = np.where(
-            gc_results == gc
-        )  # Get positions of each unique GC value in gc_results
-        # Filter depth correction results to remove zero values
-        filtered_depths = depth_correction_results_array[indices][
-            depth_correction_results_array[indices] != 0
-        ]
-
-        if (
-            filtered_depths.size > 0
-        ):  # Calculate median only if filtered results are not empty
-            median_gc = np.median(filtered_depths)
-        else:
-            median_gc = 0  # Or another default value if all results are 0
-
-        mGC.append((gc, median_gc))
-
-    gc_to_median = dict(mGC)
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_med_same_gc for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-    
-    return gc_to_median
-
-
-def calcul_moy_totale(normalize_depth_results, chr):
-    """
-    Calculate the mean of non-zero normalized depth results.
-
-    This function filters out zero values from the normalized depth results and computes the mean of the remaining values.
-
-    Parameters
-    ----------
-    normalize_depth_results : list or numpy.ndarray
-        A list or array of normalized depth values.
-
-    Returns
-    -------
-    float
-        The mean of the non-zero normalized depth values, or 0 if no non-zero values are present.
-    """
-    logging.info(f"Entering calcul_moy_totale for {chr}")
-    start_time = time.time()   
-    
-    normalize_depth_results = np.array(normalize_depth_results)
-    # Filter results to remove zero values
-    non_zero_results = normalize_depth_results[normalize_depth_results != 0]
-    # Calculate the mean of non-zero results
-    mean_chr_norm = np.mean(non_zero_results) if non_zero_results.size > 0 else 0
-
-    logging.info(f"Mean chr_norm_no_zero = {mean_chr_norm}")
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_moy_totale for {chr} (Time taken: {elapsed_time:.4f} seconds)")    
-    
-    return mean_chr_norm
-
-
-def calcul_std(normalize_depth_results, chr):
-    """
-    Calculate the standard deviation of non-zero normalized depth results.
-
-    This function filters out zero values from the normalized depth results and computes the standard deviation of the remaining values.
-
-    Parameters
-    ----------
-    normalize_depth_results : list or numpy.ndarray
-        A list or array of normalized depth values.
-
-    Returns
-    -------
-    float
-        The standard deviation of the non-zero normalized depth values, or 0 if no non-zero values are present.
-    """
-    logging.info(f"Entering calcul_std for {chr}")
-    start_time = time.time()  
-    
-    normalize_depth_results = np.array(normalize_depth_results)
-    # Filter results to remove zero values
-    non_zero_results = normalize_depth_results[normalize_depth_results != 0]
-    # Calculate the standard deviation of non-zero results
-    std_chr = np.std(non_zero_results) if non_zero_results.size > 0 else 0
-
-
-    sys.stderr.write("Chromosome : %s, std_chr : %s\n" % (chr, std_chr))
-    
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calcul_std for {chr} (Time taken: {elapsed_time:.4f} seconds)")      
-    
-    return std_chr
-
-
-def compute_mean_std_med(ratio_par_window_norm_results, chr, normalize_depth_results):
-    """
-    Compute the mean, standard deviation, and median of non-zero ratio results per window.
-
-    This function filters out zero and -1 values from the ratio results per window and computes the mean, standard deviation, and median of the remaining values. Values greater than or equal to 5 are capped at 5.
-
-    Parameters
-    ----------
-    ratio_par_window_norm_results : list or numpy.ndarray
-        A list or array of ratio values per window.
-
-    Returns
-    -------
-    tuple
-        A tuple containing the mean, standard deviation, and median of the filtered ratio values.
-    """
-    logging.info(f"Entering compute_mean_std_med for {chr}")
-    start_time = time.time()
-
-    # Filter results to remove zero and -1 values
-    ratio_par_window_norm_results = np.array(ratio_par_window_norm_results)
-    non_zero_results = ratio_par_window_norm_results[ratio_par_window_norm_results != 0]
-    non_zero_results = non_zero_results[np.isfinite(non_zero_results)]
-
-    # Initialize list for stats computation
-    table = []
-
-    for value in non_zero_results:
-        if float(value) >= 5:
-            table.append(5)
-        elif float(value) != -1:
-            table.append(float(value))
-
-    # Calculate the mean, standard deviation, and median of the filtered values
-    mean_ratio = np.mean(table) if table else 0
-    std_ratio = np.std(table) if table else 0
-    med_ratio = np.median(table) if table else 0
-
-    sys.stderr.write("Chromosome : %s, mean_ratio : %s, std_ratio : %s, med_ratio : %s\n" % (chr, mean_ratio, std_ratio, med_ratio))
-    
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving compute_mean_std_med for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-
-    # Return results
-    return mean_ratio, std_ratio, med_ratio
-
-
-def cn_level(x, chr):
-    """
-    Determine the copy number level based on the given value.
-
-    This function returns the copy number level based on the input value `x`.
-
-    Parameters
-    ----------
-    x : float
-        The input value used to determine the copy number level.
-
-    Returns
-    -------
-    int
-        The copy number level:
-        - 0 if x < 0.1
-        - 1 if 0.1 <= x <= 0.75
-        - 2 if 0.75 < x < 1 or round(x) == 1
-        - round(x) if round(x) > 1
-    """
-    if x < 0.1:
-        return 0
-    elif x <= 0.75:
-        return 1
-    elif x < 1:
-        return 2
-    else:
-        rounded_x = round(x)
-        return 2 if rounded_x == 1 else rounded_x
-
-
-def get_sample_name(bamfile_path):
-    """
-    Extract the sample name from a BAM file.
-
-    This function reads the header of a BAM file to extract the sample name from the read groups.
-
-    Parameters
-    ----------
-    bamfile_path : str
-        The path to the BAM file.
-
-    Returns
-    -------
-    str
-        The sample name extracted from the BAM file. If no sample name is found, returns "UnknownSample".
-    """
-
-    logging.info(f"Entering get_sample_name")
-    start_time = time.time()
-
-    with pysam.AlignmentFile(bamfile_path, "rb") as bamfile:
-        for read_group in bamfile.header.get("RG", []):
-            if "SM" in read_group:
-                return read_group["SM"]
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving get_sample_name (Time taken: {elapsed_time:.4f} seconds)")
-    return "UnknownSample"
-
-
-def create_signal(signal, chr, z_score_results, step_size):
-    """
-    Create a signal dictionary for a specific chromosome based on z-score results.
-
-    This function populates a signal dictionary with positions and corresponding z-score results for a given chromosome.
-
-    Parameters
-    ----------
-    signal : dict
-        A dictionary to store the signal data.
-    chr : str
-        The chromosome for which the signal is created.
-    z_score_results : list or numpy.ndarray
-        A list or array of z-score results.
-    step_size : int
-        The step size used to calculate the positions.
-
-    Returns
-    -------
-    None
-        The function modifies the signal dictionary in place.
-    """
-    logging.info(f"Entering create_signal for {chr} (include copy_number_level)")
-    start_time = time.time()
-
-    if chr not in signal:
-        signal[chr] = {}
-    for i in range(len(z_score_results)):
-        pos = (i * step_size) + 1
-        signal[chr][pos] = z_score_results[i]
-    
-    #sys.stderr.write("\t signal %s\n" % signal[chr])
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving create_signal for {chr} (include copy_number_level) (Time taken: {elapsed_time:.4f} seconds)") 
-
-def detect_events(
-    z_score_results,
-    zscore_threshold,
-    events,
-    med_ratio,
-    ratio_par_mean_ratio_results,
-    chr,
-):
-    """
-    Detect genomic events based on z-score results and a z-score threshold.
-
-    This function identifies significant genomic events where z-scores exceed the given threshold. Events are recorded in the `events` dictionary for the specified chromosome.
-
-    Parameters
-    ----------
-    z_score_results : list or numpy.ndarray
-        A list or array of z-score values.
-    zscore_threshold : float
-        The threshold for detecting significant z-score events.
-    events : dict
-        A dictionary to store detected events.
-    med_ratio : float
-        The median ratio used for copy number level calculations.
-    ratio_par_mean_ratio_results : list or numpy.ndarray
-        A list or array of ratio values compared to the mean ratio.
-    chr : str
-        The chromosome for which events are detected.
-
-    Returns
-    -------
-    None
-        The function modifies the events dictionary in place.
-    """
-    logging.info(f"Entering detect_events for {chr}")
-    start_time = time.time()
-    
-    c = 0
-    if chr not in events:
-        events[chr] = {}
-    for i, z_score in enumerate(z_score_results):
-        if abs(z_score) >= zscore_threshold:
-            if med_ratio != 0:
-                c = cn_level(float(ratio_par_mean_ratio_results[i]), chr)
-            if z_score >= 0:
-                c = 3
-            elif c == 2 and z_score < 0:
-                c = 1
-            pos_start = (i * step_size) + 1
-            pos_end = pos_start + window_size
-
-            events[chr][(pos_start, pos_end)] = c
-    #sys.stderr.write("\t events %s\n" % events)
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving detect_events for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-
-def segmentation(events, segment, chr):
-    """
-    Segment the detected events into contiguous regions with the same copy number level.
-
-    This function processes the detected events and groups contiguous regions with the same copy number level into segments.
-
-    Parameters
-    ----------
-    events : dict
-        A dictionary of detected events for each chromosome.
-    segment : dict
-        A dictionary to store the segmented regions.
-
-    Returns
-    -------
-    None
-        The function modifies the segment dictionary in place.
-    """
-    logging.info(f"Entering segmentation for {chr}")
-    start_time = time.time()
-    
-    for k in events.keys():
-        sys.stderr.write("\tfor chromosome %s\n" % k)
-        starts, oldPos, oldLevel = 1, 1, -1
-        for p in sorted(events[k].keys()):
-            level = events[k][p]
-            #sys.stderr.write("\t p %s\n" % p)
-            # new coordinates
-            if p[0] > (oldPos + window_size):
-                if (starts != 1) and (starts != p[0]):
-                    if k not in segment:
-                        segment[k] = {}
-                    index = str(starts) + "-" + str(oldPos)
-                    segment[k][index] = {}
-                    segment[k][index]["start"] = starts
-                    segment[k][index]["end"] = oldPos + window_size
-                    segment[k][index]["cn"] = oldLevel
-                    oldPos, starts, oldLevel = p[0], p[0], level
-                    continue
-                else:
-                    starts = p[0]
-            # case where it's contiguous but different level
-            if level != oldLevel:
-                if oldLevel != -1:
-                    if k not in segment:
-                        segment[k] = {}
-                    index = str(starts) + "-" + str(oldPos)
-                    segment[k][index] = {}
-                    segment[k][index]["start"] = starts
-                    segment[k][index]["end"] = oldPos
-                    segment[k][index]["cn"] = oldLevel
-                    oldPos, starts, oldLevel = p[0], p[0], level
-                    continue
-                else:
-                    oldLevel = level
-            oldPos, oldLevel = p[0], level
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving segmentation for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-
-def display_results_vcf(sample, segment, signal, lengthFilter, output_file, chr):
-    """
-    Generate a VCF file containing structural variant calls based on segmented regions and signal data.
-
-    This function creates a VCF (Variant Call Format) file containing structural variant calls derived from segmented regions and signal data. The structural variant type (DEL for deletion or DUP for duplication) is determined based on copy number levels and signal values. The resulting VCF file includes information about the chromosome, position, type of structural variant, copy number, and other relevant information.
-
-    Parameters
-    ----------
-    sample : str
-        The sample name to be included in the VCF file header.
-    segment : dict
-        A dictionary containing segmented regions with copy number information.
-    signal : dict
-        A dictionary containing signal data for each chromosome.
-    lengthFilter : int
-        The minimum length threshold for including variants in the VCF file.
-    output_file : str
-        The path to the output VCF file.
-
-    Returns
-    -------
-    None
-        This function writes the structural variant calls to the specified output file in VCF format.
-    """
-    global header_written
-    logging.info(f"Entering display_results_vcf for {chr}")
-    start_time = time.time()
-
-    with open(output_file, "a") as f:
-        if not header_written:
-            f.write("##fileformat=VCFv4.2\n")
-            f.write("##source=cnvcaller\n")
-            f.write(
-                '##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n'
-            )
-            f.write(
-                '##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">\n'
-            )
-            f.write(
-                '##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n'
-            )
-            f.write('##INFO=<ID=CN,Number=1,Type=Integer,Description="Copy number">\n')
-            f.write('##ALT=<ID=DEL,Description="Deletion">\n')
-            f.write('##ALT=<ID=DUP,Description="Duplication">\n')
-            f.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n')
-            f.write(
-                "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % (sample)
-            )
-            header_written = True
-
-        for k in segment.keys():
-            #sys.stderr.write("\tfor chromosome %s\n" % k)
-            for elt in sorted(segment[k].keys()):
-                #sys.stderr.write("\tfor elt %s\n" % elt)
-                if segment[k][elt]["start"] == segment[k][elt]["end"]:
-                    continue
-                if (segment[k][elt]["end"] - segment[k][elt]["start"]) < lengthFilter:
-                    continue
-                #sys.stderr.write("\t [segment[k][elt] %s\n" % [segment[k][elt]])
-                if int(signal[k][segment[k][elt]["start"]]) < 0:
-                    f.write(
-                        "%s\t%s\t.\tN\t<DEL>\t.\t.\tSVTYPE=DEL;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
-                        % (
-                            k,
-                            segment[k][elt]["start"],
-                            segment[k][elt]["end"],
-                            int(segment[k][elt]["cn"]),
-                        )
-                    )
-                else:
-                    f.write(
-                        "%s\t%s\t.\tN\t<DUP>\t.\t.\tSVTYPE=DUP;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
-                        % (
-                            k,
-                            segment[k][elt]["start"],
-                            segment[k][elt]["end"],
-                            int(segment[k][elt]["cn"]),
-                        )
-                    )
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving display_results_vcf for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-
-
-def filter_read_gc(gc_results, depth_results, chr):
-    logging.info(f"Entering filter_read_gc for {chr}")
-    start_time = time.time()
-
-    gc_35, gc_40, gc_45, gc_50, gc_55 = [], [], [], [], []
-
-    for depth_value, gc_value in zip(depth_results, gc_results):
-        gc_value_rounded = round(gc_value)
-        if gc_value_rounded == 35:
-            gc_35.append(depth_value)
-        elif gc_value_rounded == 40:
-            gc_40.append(depth_value)
-        elif gc_value_rounded == 45:
-            gc_45.append(depth_value)
-        elif gc_value_rounded == 50:
-            gc_50.append(depth_value)
-        elif gc_value_rounded == 55:
-            gc_55.append(depth_value)
-    
-    gc_35, gc_40, gc_45, gc_50, gc_55 = map(np.array, [gc_35, gc_40, gc_45, gc_50, gc_55])
-    
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving filter_read_gc for {chr} (Time taken: {elapsed_time:.4f} seconds)")
-
-    gc_35_non_zero = gc_35[gc_35 != 0]
-    
-    np_mean_gc_35 = np.mean(gc_35_non_zero)
-    len_gc_35 = len(gc_35_non_zero)
-    
-    logging.info(f"np_mean_gc_35 = {np_mean_gc_35}, len_gc_35_non_zero = {len_gc_35}, len_gc_35 = {len(gc_35)}")
-
-    return gc_35, gc_40, gc_45, gc_50, gc_55
-    
-def filter_read_gc_gpu(gc_results, d_gc_results, d_depth_results, grid_size, block_size, chr):
-    logging.info(f"Entering filter_read_gc_gpu for {chr}")
-    start_time = time.time()
-    
-    n = len(gc_results)
-    max_size = n + 1
-    
-    gc_35_data = np.zeros(max_size, dtype = np.float32)
-    gc_40_data = np.zeros(max_size, dtype = np.float32)
-    gc_45_data = np.zeros(max_size, dtype = np.float32)
-    gc_50_data = np.zeros(max_size, dtype = np.float32)
-    gc_55_data = np.zeros(max_size, dtype = np.float32)
-    
-    gc_35_gpu = cuda.mem_alloc(gc_35_data.nbytes)
-    gc_40_gpu = cuda.mem_alloc(gc_40_data.nbytes)
-    gc_45_gpu = cuda.mem_alloc(gc_45_data.nbytes)
-    gc_50_gpu = cuda.mem_alloc(gc_50_data.nbytes)
-    gc_55_gpu = cuda.mem_alloc(gc_55_data.nbytes)
-    
-    cuda.memcpy_htod(gc_35_gpu, gc_35_data)
-    cuda.memcpy_htod(gc_40_gpu, gc_40_data)
-    cuda.memcpy_htod(gc_45_gpu, gc_45_data)
-    cuda.memcpy_htod(gc_50_gpu, gc_50_data)
-    cuda.memcpy_htod(gc_55_gpu, gc_55_data)
-    
-    
-    filter_read_gc_kernel_cuda(d_gc_results, d_depth_results, np.int32(n), gc_35_gpu, gc_40_gpu, gc_45_gpu, gc_50_gpu, gc_55_gpu, block=(block_size, 1, 1), grid=(grid_size, 1))
-
-    # Copy back results
-    gc_35 = np.zeros(max_size, dtype = np.float32)
-    gc_40 = np.zeros(max_size, dtype = np.float32)
-    gc_45 = np.zeros(max_size, dtype = np.float32)
-    gc_50 = np.zeros(max_size, dtype = np.float32)
-    gc_55 = np.zeros(max_size, dtype = np.float32)
-    
-    cuda.memcpy_dtoh(gc_35, gc_35_gpu)
-    cuda.memcpy_dtoh(gc_40, gc_40_gpu)
-    cuda.memcpy_dtoh(gc_45, gc_45_gpu)
-    cuda.memcpy_dtoh(gc_50, gc_50_gpu)
-    cuda.memcpy_dtoh(gc_55, gc_55_gpu)
-    
-    gc_35_non_zero = gc_35[gc_35 != 0]  # Filtre les valeurs égales à 0
-    
-    np_mean_gc_35 = np.mean(gc_35_non_zero)
-    len_gc_35 = len(gc_35_non_zero)
-    
-    logging.info(f"np_mean_gc_35 = {np_mean_gc_35}, len_gc_35_non_zero = {len_gc_35}, len_gc_35 = {len(gc_35)}")
-    
-    gc_35 = gc_35[gc_35 != 0]
-    gc_40 = gc_40[gc_40 != 0]
-    gc_45 = gc_40[gc_40 != 0]
-    gc_50 = gc_40[gc_40 != 0]
-    gc_55 = gc_40[gc_40 != 0]
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving filter_read_gc for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-
-    return gc_35, gc_40, gc_45, gc_50, gc_55
-
-def calc_polynom(gc_35, gc_40, gc_45, gc_50, gc_55, chr):
-    logging.info(f"Entering calc_polynom for {chr}")
-    start_time = time.time()
-    
-    polynomials = []
-    depth_arrays = [gc_35, gc_40, gc_45, gc_50, gc_55]
-
-    for depths in depth_arrays:
-        if len(depths) > 0:
-            x = np.arange(len(depths))  # Créer un tableau d'indices pour les abscisses
-            coeffs = np.polyfit(x, depths, 3)  # Ajuster un polynôme de degré 3
-            p = np.poly1d(coeffs)  # Créer un polynôme à partir des coefficients
-            polynomials.append(p)
-        else:
-            polynomials.append(None)
-            
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calc_polynom for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-    return polynomials
-
-def calc_mean_chr(depth_results, chr):
-    logging.info(f"Entering calc_mean_chr for {chr}")
-    start_time = time.time()   
-    
-    depth_results = np.array(depth_results)
-    # Filter results to remove zero values
-    non_zero_results = depth_results[depth_results != 0]
-    # Calculate the mean of non-zero results
-    mean_chr = np.mean(non_zero_results) if non_zero_results.size > 0 else 0
-
-    sys.stderr.write("Chromosome : %s, mean_chr : %s\n" % (chr, mean_chr))
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving calc_mean_chr for {chr} (Time taken: {elapsed_time:.4f} seconds)")    
-    
-    return mean_chr
-
-def find_polynom(ratio_par_window_results, chr, polynomials, depth_results):
-    logging.info(f"Entering find_polynom for {chr}")
-    start_time = time.time()
-    
-    # Convert ratio_par_window_results to numpy array
-    ratio_par_window_results = np.array(ratio_par_window_results)
-    cn = ratio_par_window_results * 2
-    cn_to_test = cn[(cn >= 1.5) & (cn < 2.5)]
-    cn_unique = np.unique(cn_to_test)
-
-    best_polynom = None
-    best_knorm_diff = float('inf')
-    
-    depth_results_non_zero = depth_results[depth_results != 0]
-    depth_results_filter = np.unique(depth_results_non_zero)
-    
-    logging.info(f"depth_results_len = {len(depth_results)}, depth_results_non_zero_len = {len(depth_results_non_zero)}, depth_results_filter_len = {len(depth_results_filter)}")
-    
-    # Precompute k / 2 for each unique cn value
-    cn_unique_div2 = cn_unique / 2
-
-    # Iterate over each polynomial
-    for polynom in polynomials:
-        logging.info(f"polynom = {polynom}\tpolynomials = {polynomials}\tcn_unique = {cn_unique}")
-        if polynom is not None:
-            # Calculate knorm for all cn_unique values at once
-            knorms = polynom(cn_unique) * cn_unique_div2[:, None]
-            
-            # Calculate the differences between knorms and each RCi, then find the minimum difference
-            for RCi in depth_results_filter:
-                knorm_diffs = np.abs(knorms - RCi)
-                min_knorm_diff = np.min(knorm_diffs)
-
-                if min_knorm_diff < best_knorm_diff:
-                    best_knorm_diff = min_knorm_diff
-                    best_polynom = polynom
-
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving find_polynom for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
-    return best_polynom
-
-def normalize(depth_results, best_polynom, map_results):
-    logging.info("Entering normalization")
-    start_time = time.time()
-    
-    normalize_depth_results = []
-    for i in range(len(depth_results)):
-        if depth_results[i] != 0 and map_results[i] != 0:
-            normalized_value = depth_results[i] / (best_polynom(i) * map_results[i]) if best_polynom is not None else depth_results[i]
-            #logging.info(f"normalized_value = {normalized_value}\ti = {i}\tdepth_results[i] = {depth_results[i]}\tbest_polynom(i) = {best_polynom(i)}\tmap_results[i] = {map_results[i]}")
-            normalize_depth_results.append(normalized_value)
-        else:
-            normalize_depth_results.append(0)
-    
-    #normalize_depth_results_unique = np.unique(normalize_depth_results)
-    #logging.info(f"normalize_depth_results_unique = {normalize_depth_results_unique}") 
-    
-    
-    has_nan = np.isnan(normalize_depth_results).any()
-    has_inf = np.isinf(normalize_depth_results).any()
-
-    if has_nan:
-        logging.warning(f"{chr} contient des valeurs NaN.")
-    if has_inf:
-        logging.warning(f"{chr} contient des valeurs Inf.")
-    
-    end_time = time.time()
-    elapsed_time = end_time - start_time
-    logging.info(f"Leaving normalization (Time taken: {elapsed_time:.4f} seconds)") 
-    
-    return np.array(normalize_depth_results)
-
-def main_calcul(
-    bamfile_path,
-    chr,
-    seq_length,
-    window_size,
-    step_size,
-    zscore_threshold,
-    lengthFilter,
-    output_file,
-    sample,
-):
-    """
-    Perform structural variant detection and VCF file generation.
-
-    This function orchestrates a series of computations and data manipulations,
-    leveraging GPU acceleration for performance improvements in genomic data analysis.
-
-    Parameters
-    ----------
-        bamfile_path : str
-            Path to the BAM file containing aligned reads.
-        chr : str
-            Chromosome identifier for which analysis is performed.
-        seq_length : int
-            Length of the chromosome sequence.
-        window_size : int
-            Size of the sliding window used for analysis.
-        step_size : int
-            Size of the step when moving the window along the chromosome.
-        zscore_threshold : float
-            Threshold value for detecting significant events based on Z-scores.
-        lengthFilter : int
-            Minimum length threshold for including variants in the VCF file.
-        output_file : str
-            Path to the output VCF file.
-        sample : str
-            Name of the sample being analyzed.
-
-    Returns
-    -------
-        None
-    """
-
-    sys.stderr.write("\t entering main_calcul\n")
-    global seq
-    events = {}
-    segment = {}
-    signal = {}
-
-    # Appeler les différentes fonctions
-    map_data = calcul_mappability(seq_length, mappability, chr)
-    gc_data = calcul_gc_content(seq_length, chr, seq)
-    #depth_data = calcul_depth_seq(seq_length, bamfile_path, chr)
-    depth_data = calcul_depth_seq_samtools(seq_length, bamfile_path, chr)
-    
-    # Transférer le tableau NumPy vers CUDA
-    d_depth_data = cuda.mem_alloc(depth_data.nbytes)
-    cuda.memcpy_htod(d_depth_data, depth_data)
-    sys.stderr.write(
-        "\t d_depth_data : %s, %s\n"
-        % (d_depth_data, d_depth_data.as_buffer(sys.getsizeof(depth_data)))
-    )
-
-    d_gc_data = cuda.mem_alloc(gc_data.nbytes)
-    cuda.memcpy_htod(d_gc_data, gc_data)
-    sys.stderr.write(
-        "\t d_gc_data : %s, %s\n"
-        % (d_gc_data, d_gc_data.as_buffer(sys.getsizeof(gc_data)))
-    )
-
-    d_map_data = cuda.mem_alloc(map_data.nbytes)
-    cuda.memcpy_htod(d_map_data, map_data)
-    sys.stderr.write(
-        "\t d_map_data : %s, %s\n"
-        % (d_map_data, d_map_data.as_buffer(sys.getsizeof(map_data)))
-    )
-
-    # Calculer la taille de la grille et de bloc pour CUDA
-    block_size = num_cores
-    sys.stderr.write("\t blocksize (nb de threads) = %s\n" % (num_cores))
-    grid_size = int((int((seq_length - window_size) / step_size) + 1) / block_size) + 1
-    sys.stderr.write("\t grid_size = \n")
-
-    # Initialiser les tableaux pour stocker les résultats
-    depth_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de depth_results\n")
-
-    gc_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de gc_results\n")
-
-    map_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de map_results\n")
-
-    depth_correction_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de depth_correction_results\n")
-
-    normalize_depth_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de normalize_depth_results\n")
-
-    ratio_par_window_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de ratio_par_window\n")
-
-    ratio_par_window_norm_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de ratio_par_window_norm\n")
-
-    z_score_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de z_score_results\n")
-
-    ratio_par_mean_ratio_results = np.zeros(
-        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
-    )
-    sys.stderr.write("\t Definition de ratio_par_mean_ratio_results\n")
-
-    # Allouer de la mémoire pour les résultats sur le périphérique CUDA
-    d_depth_results = cuda.mem_alloc(depth_results.nbytes)
-    sys.stderr.write(
-        "\t d_depth_results = %s\n"
-        % d_depth_results.as_buffer(sys.getsizeof(d_depth_results))
-    )
-    sys.stderr.write("\t depth_results.nbytes = %s\n" % depth_results.nbytes)
-
-    d_gc_results = cuda.mem_alloc(gc_results.nbytes)
-    sys.stderr.write(
-        "\t d_gc_results = %s\n" % d_gc_results.as_buffer(sys.getsizeof(d_gc_results))
-    )
-    sys.stderr.write("\t gc_results.nbytes = %s\n" % gc_results.nbytes)
-
-    d_map_results = cuda.mem_alloc(map_results.nbytes)
-    sys.stderr.write(
-        "\t d_map_results = %s\n"
-        % d_map_results.as_buffer(sys.getsizeof(d_map_results))
-    )
-    sys.stderr.write("\t map_results.nbytes = %s\n" % map_results.nbytes)
-
-    d_depth_correction_results = cuda.mem_alloc(depth_correction_results.nbytes)
-    sys.stderr.write(
-        "\t d_depth_correction_results = %s\n"
-        % d_depth_correction_results.as_buffer(
-            sys.getsizeof(d_depth_correction_results)
-        )
-    )
-    sys.stderr.write(
-        "\t depth_correction_results.nbytes = %s\n" % depth_correction_results.nbytes
-    )
-
-    #d_normalize_depth_results = cuda.mem_alloc(normalize_depth_results.nbytes)
-    #sys.stderr.write(
-     #   "\t d_normalize_depth_results = %s\n"
-      #  % d_normalize_depth_results.as_buffer(sys.getsizeof(d_normalize_depth_results))
-    #)
-    #sys.stderr.write(
-     #   "\t normalize_depth_results.nbytes = %s\n" % normalize_depth_results.nbytes
-    #)
-
-    d_ratio_par_window_results = cuda.mem_alloc(ratio_par_window_results.nbytes)
-    sys.stderr.write(
-        "\t d_ratio_par_window_results = %s\n"
-        % d_ratio_par_window_results.as_buffer(
-            sys.getsizeof(d_ratio_par_window_results)
-        )
-    )
-    sys.stderr.write(
-        "\t ratio_par_window_results.nbytes = %s\n" % ratio_par_window_results.nbytes
-    )
-
-    d_ratio_par_window_norm_results = cuda.mem_alloc(ratio_par_window_norm_results.nbytes)
-    sys.stderr.write(
-        "\t d_ratio_par_window_norm_results = %s\n"
-        % d_ratio_par_window_norm_results.as_buffer(
-            sys.getsizeof(d_ratio_par_window_norm_results)
-        )
-    )
-    sys.stderr.write(
-        "\t ratio_par_window_norm_results.nbytes = %s\n" % ratio_par_window_norm_results.nbytes
-    )
-
-    d_z_score_results = cuda.mem_alloc(z_score_results.nbytes)
-    sys.stderr.write(
-        "\t d_z_score_results = %s\n"
-        % d_z_score_results.as_buffer(sys.getsizeof(d_z_score_results))
-    )
-    sys.stderr.write("\t z_score_results.nbytes = %s\n" % z_score_results.nbytes)
-
-    d_ratio_par_mean_ratio_results = cuda.mem_alloc(ratio_par_mean_ratio_results.nbytes)
-    sys.stderr.write(
-        "\t d_ratio_par_mean_ratio_results = %s\n"
-        % d_ratio_par_mean_ratio_results.as_buffer(
-            sys.getsizeof(d_ratio_par_mean_ratio_results)
-        )
-    )
-    sys.stderr.write(
-        "\t ratio_par_mean_ratio_results.nbytes = %s\n"
-        % ratio_par_mean_ratio_results.nbytes
-    )
-
-    # Appeler la fonction de calcul de profondeur avec CUDA
-    calcul_depth_kernel_cuda(
-        d_depth_data,
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_depth_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction calcul_depth_kernel_cuda\n")
-
-    calcul_gc_kernel_cuda(
-        d_gc_data,
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_gc_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction calcul_gc_kernel_cuda\n")
-
-    calcul_map_kernel_cuda(
-        d_map_data,
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_map_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction calcul_map_kernel_cuda\n")
-
-    #calcul_depth_correction_kernel_cuda(
-     #   d_depth_results,
-      #  d_map_results,
-       # np.int32(seq_length),
-        #np.int32(window_size),
-        #np.int32(step_size),
-        #d_depth_correction_results,
-        #block=(block_size, 1, 1),
-        #grid=(grid_size, 1),
-    #)
-    #sys.stderr.write("\t appel fonction calcul_depth_correction_kernel_cuda\n")
-
-    context.synchronize()
-
-    # Copier les résultats depuis le périphérique CUDA vers l'hôte
-
-    cuda.memcpy_dtoh(depth_results, d_depth_results)  # cuda.memcpy_dtoh(dest, src)
-    sys.stderr.write(
-        "\t Copie les resultats du GPU (d_depth_results) vers le CPU (depth_results)\n"
-    )
-
-    cuda.memcpy_dtoh(gc_results, d_gc_results)  # cuda.memcpy_dtoh(dest, src)
-    sys.stderr.write(
-        "\t Copie les resultats du GPU (d_gc_results) vers le CPU (gc_results)\n"
-    )
-
-    cuda.memcpy_dtoh(map_results, d_map_results)  # cuda.memcpy_dtoh(dest, src)
-    sys.stderr.write(
-        "\t Copie les resultats du GPU (d_map_results) vers le CPU (map_results)\n"
-    )
-
-   # cuda.memcpy_dtoh(
-    #    depth_correction_results, d_depth_correction_results
-   # )  # cuda.memcpy_dtoh(dest, src)
-    #sys.stderr.write(
-     #   "\t Copie les resultats du GPU (d_depth_correction_results) vers le CPU (depth_correction_results)\n"
-    #)
-
-    ### NORMALISATION###
-
-    #Appel fonction read_depth selon gc
-    sys.stderr.write("\t appel fonctions filter_results\n")
-    #gc_35, gc_40, gc_45, gc_50, gc_55 = filter_read_gc(gc_results, depth_results, chr)
-    gc_35, gc_40, gc_45, gc_50, gc_55 = filter_read_gc_gpu(gc_results, d_gc_results, d_depth_results, grid_size, block_size, chr)
-
-    #Appel fonction calcul polynomes
-    sys.stderr.write("\t appel fonctions calc_polynom\n")
-    polynomials = calc_polynom(gc_35, gc_40, gc_45, gc_50, gc_55, chr)
-    
-    #Appel fonction calcul moyenne
-    sys.stderr.write("\t appel fonctions calc_mean_chr\n")
-    mean_chr = calc_mean_chr(depth_results, chr)
-    
-    # Appeler le kernel de ratio
-    sys.stderr.write("\t appel fonction ratio_par_window_kernel_cuda\n")
-    ratio_par_window_kernel_cuda(
-        d_depth_results,
-        np.float32(mean_chr),
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_ratio_par_window_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-
-
-    context.synchronize()
-    
-    # Copier les resultats ratio depuis le peripherique CUDA vers l'hote
-    sys.stderr.write("\tCopie des resultats ratio depuis le peripherique CUDA vers l'hote\n")
-    cuda.memcpy_dtoh(ratio_par_window_results, d_ratio_par_window_results)
-    
-    #Appel fonction meilleur polynome
-    sys.stderr.write("\t appel fonction find_polynom\n")
-    best_polynom = find_polynom(ratio_par_window_results, chr, polynomials, depth_results)
-    
-    #Appel fonction normalisation
-    sys.stderr.write("\t appel fonction normalize\n")
-    normalize_depth_results = normalize(depth_results, best_polynom, map_results)
-    
-    # Appel fonctions medianes
-    #sys.stderr.write("\t appel fonctions calcul medianes\n")
-    #m = calcul_med_total(depth_correction_results, chr)
-    #gc_to_median = calcul_med_same_gc(gc_results, depth_correction_results, chr)
-
-    # Convertir gc_to_median en un tableau NumPy pour le transfert vers CUDA
-    #sys.stderr.write("\t Conversion medianes en tableau numpy\n")
-    #gc_to_median_array = np.zeros(int(max(gc_results)) + 1, dtype=np.float32)
-    #for gc, median in gc_to_median.items():
-     #   gc_to_median_array[int(gc)] = median
-
-    # Allouer de la memoire pour gc_to_median sur le peripherique CUDA
- #   sys.stderr.write("\t Allocation mémoire médianes GPU\n")
-  #  d_gc_to_median = cuda.mem_alloc(gc_to_median_array.nbytes)
-   # cuda.memcpy_htod(d_gc_to_median, gc_to_median_array)
-
-    # Appeler le kernel de normalisation
-    #normalize_depth_kernel_cuda(
-      #  d_depth_correction_results,
-     #   d_gc_results,
-     #   np.float32(m),
-     #   d_gc_to_median,
-     #   np.int32(seq_length),
-     #   np.int32(window_size),
-     #   np.int32(step_size),
-     #   d_normalize_depth_results,
-     #   block=(block_size, 1, 1),
-     #   grid=(grid_size, 1),
-    #)
-    #sys.stderr.write("\t appel fonction normalize_depth_kernel_cuda\n")
-
-    #context.synchronize()
-
-    #Copier les resultats normalises depuis l'hôte vers le peripherique CUDA
-    sys.stderr.write("\t Copie des resultats normalises depuis l'hôte vers le peripherique CUDA\n")
-    d_normalize_depth_results = cuda.mem_alloc(normalize_depth_results.nbytes)
-    cuda.memcpy_htod(d_normalize_depth_results, normalize_depth_results)
-    
-    ### Ratio par window###
-
-    # Appel fonction moyenne
-    sys.stderr.write("\t appel fonction calcul moyenne\n")
-    mean_chr_norm = calcul_moy_totale(normalize_depth_results, chr)
-
-    # Appeler le kernel de ratio normalise
-    ratio_par_window_norm_kernel_cuda(
-        d_normalize_depth_results,
-        np.float32(mean_chr_norm),
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_ratio_par_window_norm_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction ratio_par_window_norm_kernel_cuda\n")
-
-    context.synchronize()
-
-    # Copier les resultats ratio depuis le peripherique CUDA vers l'hote
-    sys.stderr.write("\t Copie des resultats ratio depuis le peripherique CUDA vers l'hote\n")
-    cuda.memcpy_dtoh(ratio_par_window_norm_results, d_ratio_par_window_norm_results)
-
-    # Création table à partir du ratio
-    sys.stderr.write("\t Appel fonction compute_mean_std_med\n")
-    mean_ratio, std_ratio, med_ratio = compute_mean_std_med(ratio_par_window_norm_results, chr, normalize_depth_results)
-
-    # Appeler le kernel de calcule du ratio divisé par le ratio moyen
-    ratio_par_mean_ratio_kernel_cuda(
-        d_ratio_par_window_norm_results,
-        np.float32(mean_ratio),
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_ratio_par_mean_ratio_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction ratio_par_mean_ratio_kernel_cuda\n")
-
-    # Appeler le kernel de Z-score
-    z_score_kernel_cuda(
-        d_ratio_par_window_norm_results,
-        np.float32(mean_ratio),
-        np.float32(std_ratio),
-        np.int32(seq_length),
-        np.int32(window_size),
-        np.int32(step_size),
-        d_z_score_results,
-        block=(block_size, 1, 1),
-        grid=(grid_size, 1),
-    )
-    sys.stderr.write("\t appel fonction z_score_kernel_cuda\n")
-
-    context.synchronize()
-
-    # Copier les resultats ratio depuis le peripherique CUDA vers l'hote
-    cuda.memcpy_dtoh(ratio_par_mean_ratio_results, d_ratio_par_mean_ratio_results)
-    cuda.memcpy_dtoh(z_score_results, d_z_score_results)
-
-    # Appel fonction create signal
-    create_signal(signal, chr, z_score_results, step_size)
-
-    # Appel fonction detect events
-    detect_events(
-        z_score_results,
-        zscore_threshold,
-        events,
-        med_ratio,
-        ratio_par_mean_ratio_results,
-        chr,
-    )
-
-    # Appel fonction segmentation
-    segmentation(events, segment, chr)
-
-    # Appel fonction display_results_vcf
-    #display_results_vcf(sample, segment, signal, lengthFilter, output_file, chr)
-
-    #Ecrire les résultats dans le fichier de sortie
-    with open(output_file, 'a') as f:
-        sys.stderr.write("\t ecriture des fichiers\n")
-        for i, (depth_data, depth_normalize_val, gc_data, map_data, ratio_norm, ratio_mean_ratio, z_score) in enumerate(zip(depth_results, normalize_depth_results, gc_results, map_results, ratio_par_window_norm_results, ratio_par_mean_ratio_results, z_score_results)):
-            pos_start = (i * step_size) + 1
-            pos_end = pos_start + window_size
-            f.write(f"{chr}\t{pos_start}\t{pos_end}\t{depth_data}\t{depth_normalize_val}\t{gc_data}\t{map_data}\t{ratio_norm}\t{ratio_mean_ratio}\t{z_score}\n")
-
-# Programme principal
-# Calcul nombre de coeurs max pour le GPU
-header_written = False
-
-sample = get_sample_name(bamfile_path)
-device = cuda.Device(0)
-attributes = device.get_attributes()
-num_cores = attributes[1]
-print("Nombre de CPU: ", multiprocessing.cpu_count())
-print(f"Nombre de coeurs max GPU: {num_cores}")
-
-gc_file = "/work/gad/shared/pipeline/grch38/index/grch38_essential.fa"
-mappability_file = "/work/gad/shared/pipeline/grch38/cnv/k100.Umap.MultiTrackMappability.bedgraph"
-seq = parse_fasta(gc_file)
-mappability = dico_mappabilite(mappability_file)
-#bamfile_path_2 = "/work/gad/shared/analyse/test/cnvGPU/test_scalability/dijen1000.bam"
-
-
-if num_chr == "ALL" :
-    with pysam.AlignmentFile(bamfile_path, "rb") as bamfile_handle:
-        for i, seq_length in enumerate(bamfile_handle.lengths):
-            chr = bamfile_handle.references[i]
-            
-            #Exclude chrM
-            if chr == "chrM":
-                continue
-                
-            sys.stderr.write("Chromosome : %s, seq length : %s\n" % (chr, seq_length))
-
-            # Appeler la fonction de calcul de la profondeur moyenne pour ce chromosome
-            main_calcul(
-                bamfile_path,
-                chr,
-                seq_length,
-                window_size,
-                step_size,
-                zscore_threshold,
-                lengthFilter,
-                output_file,
-                sample,
-            )
-
-        logging.basicConfig(
-            filename="%s" % (logfile),
-            filemode="a",
-            level=logging.INFO,
-            format="%(asctime)s %(levelname)s - %(message)s",
-        )
-        logging.info("end")
-
-else :
-    with pysam.AlignmentFile(bamfile_path, "rb") as bamfile_handle:
-        seq_length = bamfile_handle.lengths[int(num_chr) - 1]
-        chr = bamfile_handle.references[int(num_chr) - 1]
-        sys.stderr.write("Chromosome : %s, seq length : %s\n" % (chr, seq_length))
-
-        # Appeler la fonction de calcul de la profondeur moyenne pour ce chromosome
-        main_calcul(
-            bamfile_path,
-            chr,
-            seq_length,
-            window_size,
-            step_size,
-            zscore_threshold,
-            lengthFilter,
-            output_file,
-            sample,
-        )
-
-    logging.basicConfig(
-        filename="%s" % (logfile),
-        filemode="a",
-        level=logging.INFO,
-        format="%(asctime)s %(levelname)s - %(message)s",
-    )
-    logging.info("end")
-- 
2.17.1