#!/usr/bin/python

### GAD PIPELINE ###
## find_SV_gpu.py
## Version : 2.10.0
## Description : This script find CNV and SV events
## Usage : python3 cnv_sv_caller_gpu.py -b <input/bamfile> -c <num_chr (int) | "All"> -w 100 -s 10 -z 1.5 -l 200 -o <output/vcf/cnv> -p <output/tsv/pairs> -m <output/tsv/splits> -e <log/file>
## Output : An events file for CNV, paired-reads and split-reads
## Requirements : python 3.0+, pysam, pandas, getopt, numpy, pycuda, multiprocessing, subprocess, collections

## Author : Theo.Serralta@u-bourgogne.fr | theo.serralta@gmail.com
## Creation Date : 20230928
## last revision date : 20240719
## Known bugs : Don't find correct events

import pysam
import sys
import getopt
import logging
import numpy as np
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
from pycuda.autoinit import context
import multiprocessing
from multiprocessing import Process, Queue
import time
import subprocess
import pandas as pd
from collections import defaultdict
from datetime import datetime

# Options
def parse_arguments():
    """
    Parse command-line arguments.

    This function parses the command-line arguments provided to the script and extracts values for various parameters
    related to CNV and SV detection in genomic data.

    Parameters
    ----------
    None

    Returns
    -------
    tuple
        A tuple containing the following elements:
        bamfile_path : Optional[str]
            The path to the BAM file containing genomic data.
        num_chr : Optional[str]
            The specific chromosome number (if any) to be processed.
        window_size : Optional[int]
            The size of the window used for analysis.
        step_size : Optional[int]
            The step size between windows for scanning the genome.
        zscore_threshold : Optional[float]
            The Z-score threshold to identify significant variants.
        lengthFilter : Optional[int]
            The minimum length filter for variants.
        output_file : Optional[str]
            The path where the main output file will be saved.
        output_file_pairs : Optional[str]
            The path for the output file that stores paired reads data.
        output_file_splits : Optional[str]
            The path for the output file that stores split reads data.
        logfile : Optional[str]
            The path where the log file will be stored.

        Each element can be None if the corresponding argument was not provided.
    """
    try:
        opts, args = getopt.getopt(sys.argv[1:], "b:c:w:s:z:l:t:o:p:m:e:")
        (
            bamfile_path,
            num_chr,
            window_size,
            step_size,
            zscore_threshold,
            lengthFilter,
            output_file,
            output_file_pairs,
            output_file_splits,
            logfile,
        ) = (None, None, None, None, None,None,None, None, None, None)
        for opt, arg in opts:
            if opt in ("-b"):
                bamfile_path = arg
            if opt in ("-c"):
                num_chr = arg
            if opt in ("-w"):
                window_size = int(arg)
            if opt in ("-s"):
                step_size = int(arg)
            if opt in ("-z"):
                zscore_threshold = float(arg)
            if opt in ("-l"):
                lengthFilter = int(arg)
            if opt in ("-o"):
                output_file = arg
            if opt in ("-p"):
                output_file_pairs = arg
            if opt in ("-m"):
                output_file_splits = arg
            if opt in ("-e"):
                logfile = arg
        return (
            bamfile_path,
            num_chr,
            window_size,
            step_size,
            zscore_threshold,
            lengthFilter,
            output_file,
            output_file_pairs,
            output_file_splits,
            logfile,
        )
    except getopt.GetoptError:
        print("Invalid argument")
        sys.exit(1)


if __name__ == "__main__":
    (
        bamfile_path,
        num_chr,
        window_size,
        step_size,
        zscore_threshold,
        lengthFilter,
        output_file,
        output_file_pairs,
        output_file_splits,
        logfile,
    ) = parse_arguments()
    logging.basicConfig(
        filename="%s" % (logfile),
        filemode="a",
        level=logging.INFO,
        format="%(asctime)s %(levelname)s - %(message)s",
    )
    logging.info("start")
    global seq

# Code CUDA
mod = SourceModule(
    """
// Kernel to compute the raw average depth

__global__ void calcul_depth_kernel(int *depth_data, int seq_length, int window_size, int step_size, float *depth_results) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;

    if (idx < seq_length - window_size + 1) {
        int pos_start = (idx * step_size) + 1;
        int pos_end = pos_start + window_size;
        int count_reads = 0;

        for (int i = pos_start; i < pos_end; i++) {
            count_reads += depth_data[i];
        }

        float avg_depth = (float)count_reads / window_size;
        depth_results[idx] = avg_depth;
    }
}

// Kernel to compute GC content

__global__ void calcul_gc_kernel(char *gc_data, int seq_length, int window_size, int step_size, float *gc_results) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;

    if (idx < seq_length - window_size + 1) {
        int pos_start = (idx * step_size) + 1;
        int pos_end = pos_start + window_size;
        int gc_count = 0;

        for (int i = pos_start; i <= pos_end; ++i) {
            if ((gc_data[i] == 'G') || (gc_data[i] == 'C') || (gc_data[i] == 'g') || (gc_data[i] == 'c')) {
                gc_count++;
            }
        }

        float avg_gc = ((float)gc_count / window_size) * 100;
        gc_results[idx] = avg_gc;
    }
}

// Kernel to compute weighted average mappability

__global__ void calcul_map_kernel(float *map_data, int seq_length, int window_size, int step_size, float *map_results) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;

    if (idx < seq_length - window_size + 1) {
        int pos_start = (idx * step_size) + 1;
        int pos_end = pos_start + window_size;
        float weighted_sum = 0.0;
        float total_weight = 0.0;

        for (int i = pos_start; i <= pos_end; ++i) {
            float weight = map_data[i];
            weighted_sum += weight;
            total_weight += 1;
        }

        float avg_map = weighted_sum / total_weight;
        map_results[idx] = avg_map;
    }
}

// Kernel to filter based on GC content

__global__ void filter_read_gc_kernel(float *gc_results, float *depth_results, int n, float *gc_35, float *gc_40, float *gc_45, float *gc_50, float *gc_55) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;

    if (idx < n) {
        float gc_value = gc_results[idx];
        float depth_value = depth_results[idx];
        int gc_value_rounded = round(gc_value);

        if (gc_value_rounded == 35) {
            int pos = atomicAdd((int*)&gc_35[0], 1);
            gc_35[pos + 1] = depth_value;
        } else if (gc_value_rounded == 40) {
            int pos = atomicAdd((int*)&gc_40[0], 1);
            gc_40[pos + 1] = depth_value;
        } else if (gc_value_rounded == 45) {
            int pos = atomicAdd((int*)&gc_45[0], 1);
            gc_45[pos + 1] = depth_value;
        } else if (gc_value_rounded == 50) {
            int pos = atomicAdd((int*)&gc_50[0], 1);
            gc_50[pos + 1] = depth_value;
        } else if (gc_value_rounded == 55) {
            int pos = atomicAdd((int*)&gc_55[0], 1);
            gc_55[pos + 1] = depth_value;
        }
    }
}


// Kernel to compute depth corrected by mappability

__global__ void calcul_depth_correction_kernel(float *depth_results, float *map_results, int seq_length, int window_size, int step_size, float *depth_correction_results) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;

    if (idx < seq_length - window_size + 1) {
        float avg_depth = depth_results[idx];
        float avg_map = map_results[idx];

        // Check if avg_map is equal to 0 to avoid division by 0
        float depth_correction = (avg_map != 0.0f) ? (avg_depth / avg_map) : 0.0f;
        depth_correction_results[idx] = depth_correction;
    }
}


// Kernel to normalize corrected depth

__global__ void normalize_depth_kernel(float *depth_correction, float *gc_results, float m, float *gc_to_median, int seq_length, int window_size, int step_size, float *depth_normalize) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;

    if (idx < seq_length - window_size + 1) {
        float mGC = gc_to_median[(int)gc_results[idx]];
        
        // Check if mGC is equal to 0 to avoid division by 0
        float depth_normalize_val = (mGC != 0.0f) ? (depth_correction[idx] * m / mGC) : 0.0f;
        depth_normalize[idx] = depth_normalize_val;
    }
}

// Kernel to compute ratio per window

__global__ void ratio_par_window_kernel(float *depth_results, float mean_chr, int seq_length, int window_size, int step_size, float *ratio_par_window_results) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;

    if (idx < seq_length - window_size + 1) {
        float ratio = depth_results[idx] / mean_chr;
        ratio_par_window_results[idx] = ratio;
    }
}

// Kernel to compute normalized ratio per window

__global__ void ratio_par_window_norm_kernel(float *normalize_depth_results, float mean_chr_norm, int seq_length, int window_size, int step_size, float *ratio_par_window_norm_results) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;

    if (idx < seq_length - window_size + 1) {
        float ratio_norm = normalize_depth_results[idx] / mean_chr_norm;
        ratio_par_window_norm_results[idx] = ratio_norm;
    }
}

// Kernel to compute Z-score

__global__ void z_score_kernel(float *ratio_norm, float mean_ratio, float std_ratio, int seq_length, int window_size, int step_size, float *z_score_results) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;

    if (idx < seq_length - window_size + 1) {
        float value = ratio_norm[idx];

        // Check if the value is infinite or NaN, and replace it with 0 if so
        if (isinf(value) || isnan(value)) {
            value = 0.0f;
        }

        // Compute the Z-score
        float z_score = (value - mean_ratio) / std_ratio;
        z_score_results[idx] = z_score;
    }
}

// Kernel to compute the ratio divided by the mean ratio

__global__ void ratio_par_mean_ratio_kernel(float *ratio_norm, float mean_ratio, int seq_length, int window_size, int step_size, float *ratio_par_mean_ratio_results) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;

    if (idx < seq_length - window_size + 1) {
        float value = ratio_norm[idx];

        // Check if the value is infinite or NaN and replace it with 0 if so
        if (isinf(value) || isnan(value)) {
            value = 0.0f;
        }

        // Compute the ratio relative to the mean ratio
        float ratio_mean_ratio = value / mean_ratio;
        ratio_par_mean_ratio_results[idx] = ratio_mean_ratio;
    }
}

"""
)

# Retrieving the compiled kernel function
calcul_depth_kernel_cuda = mod.get_function("calcul_depth_kernel")
calcul_gc_kernel_cuda = mod.get_function("calcul_gc_kernel")
calcul_map_kernel_cuda = mod.get_function("calcul_map_kernel")
filter_read_gc_kernel_cuda = mod.get_function("filter_read_gc_kernel")
calcul_depth_correction_kernel_cuda = mod.get_function("calcul_depth_correction_kernel")
normalize_depth_kernel_cuda = mod.get_function("normalize_depth_kernel")
ratio_par_window_kernel_cuda = mod.get_function("ratio_par_window_kernel")
ratio_par_window_norm_kernel_cuda = mod.get_function("ratio_par_window_norm_kernel")
z_score_kernel_cuda = mod.get_function("z_score_kernel")
ratio_par_mean_ratio_kernel_cuda = mod.get_function("ratio_par_mean_ratio_kernel")


def merge_intervals(intervals):
    """
    Merge overlapping intervals with the same score.

    This function takes a list of intervals and merges those that have the same score.

    Parameters
    ----------
    intervals : list of tuples
        A list where each element is a tuple (start, end, score).
        The intervals should be sorted by start position.

    Returns
    -------
    list of tuples
        A list of merged intervals (start, end, score) where overlapping intervals with the same score are combined.
    """
    merged = []
    start, end, score = intervals[0]
    for interval in intervals[1:]:
        if interval[2] == score:
            end = interval[1]
        else:
            merged.append((start, end, score))
            start, end, score = interval
    merged.append((start, end, score))
    
    return merged


def dico_mappabilite(mappability_file):
    """
    Create a dictionary of mappability scores from a file.

    This function reads a mappability file and creates a dictionary with chromosomes as keys and mappability scores as values.

    Parameters
    ----------
    mappability_file : str
        The path to the mappability file. Each line in the file should have the format:
        chromosome, start_pos, end_pos, score.

    Returns
    -------
    dict
        A dictionary where keys are chromosome names and values are another dictionary with start positions as keys
        and mappability scores as values.
    """
    logging.info("Entering dico_mappability")
    start_time = time.time()

    mappability_dico = {}

    logging.info(" In dico_mappability : Open file and create the dico")
    start_time_1 = time.time()

    with open(mappability_file, "r") as f:
        for line in f:
            fields = line.strip().split("\t")
            if len(fields) < 4:
                continue

            chromosome = fields[0]
            start_pos = int(fields[1])
            end_pos = int(fields[2])
            score = float(fields[3])

            if chromosome not in mappability_dico:
                mappability_dico[chromosome] = []

            mappability_dico[chromosome].append((start_pos, end_pos, score))

    end_time_1 = time.time()
    elapsed_time_1 = end_time_1 - start_time_1
    logging.info(f"In dico_mappability : Leaving file and create the dico (Time taken: {elapsed_time_1:.4f} seconds)")

    # Add position 0 for each chromosome
    logging.info(" In dico_mappability : Add position 0 for each chromosome")
    start_time_2 = time.time()

    for chromosome, intervals in mappability_dico.items():
        if intervals[0][0] != 0:
            mappability_dico[chromosome].insert(0, (0, intervals[0][0], 0))

    end_time_2 = time.time()
    elapsed_time_2 = end_time_2 - start_time_2
    logging.info(f"In dico_mappability : Ending add position 0 for each chromosome (Time taken: {elapsed_time_2:.4f} seconds)")

    # Merge intervals with the same score
    logging.info(" In dico_mappability : Merge intervals with the same score")
    start_time_3 = time.time()

    for chromosome, intervals in mappability_dico.items():
        merged_intervals = merge_intervals(intervals)
        mappability_dico[chromosome] = {
            start: score for start, _, score in merged_intervals
        }
    
    end_time_3 = time.time()
    elapsed_time_3 = end_time_3 - start_time_3
    logging.info(f"In dico_mappability : Ending merge intervals with the same score (Time taken: {elapsed_time_3:.4f} seconds)")

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving dico_mappability (Time taken: {elapsed_time:.4f} seconds)")

    return mappability_dico


def calcul_mappability(seq_length, mappability, chr):
    """
    Calculate mappability array for a given sequence length and chromosome.

    This function generates an array of mappability scores for a specific chromosome and sequence length.

    Parameters
    ----------
    seq_length : int
        The length of the sequence.
    mappability : dict
        A dictionary containing mappability information, with chromosomes as keys and dictionaries of start positions
        and scores as values.
    chr : str
        The chromosome for which the mappability is calculated.

    Returns
    -------
    numpy.ndarray
        An array of mappability scores for the given sequence length.
    """
    logging.info(f"Entering calcul_mappability for {chr}")
    start_time = time.time()
    
    map_data = np.zeros(seq_length, dtype=np.float32)
    sorted_keys = sorted(mappability[chr].keys())

    prev_bound = 0
    prev_mappability = 0

    logging.info(f"In calcul_mappability : Entering for bound in sorted_keys for {chr}")
    start_time_1 = time.time()

    for bound in sorted_keys:
        for i in range(prev_bound, min(seq_length, bound)):
            map_data[i] = prev_mappability
        prev_bound = bound
        prev_mappability = mappability[chr][bound]
 
    end_time_1 = time.time()
    elapsed_time_1 = end_time_1 - start_time_1
    logging.info(f"In calcul_mappability : Leaving for bound in sorted_keys for {chr} (Time taken: {elapsed_time_1:.4f} seconds)")

    # Fill remaining positions if sequence length exceeds last bound
    logging.info(f"In calcul_mappability : Entering for i in range(prev_bound, seq_length) for {chr}")
    start_time_2 = time.time()

    for i in range(prev_bound, seq_length):
        map_data[i] = prev_mappability

    end_time_2 = time.time()
    elapsed_time_2 = end_time_2 - start_time_2
    logging.info(f"In calcul_mappability : Leaving for i in range(prev_bound, seq_length) for {chr} (Time taken: {elapsed_time_2:.4f} seconds)")

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving dico_mappability for {chr} (Time taken: {elapsed_time:.4f} seconds)")

    return map_data


def parse_fasta(gc_file):
    """
    Parse a FASTA file and extract sequences.

    This function reads a FASTA file and extracts the sequences, storing them in a dictionary with headers as keys.

    Parameters
    ----------
    gc_file : str
        The path to the FASTA file. The file should be in standard FASTA format.

    Returns
    -------
    dict
        A dictionary where keys are sequence headers and values are the corresponding sequences.
    """
    logging.info("Entering parse_fasta")
    start_time = time.time()
    
    sequences = {}
    with open(gc_file, "r") as f:
        data = f.read().split(">")
        for entry in data[1:]:
            lines = entry.split("\n")
            header = lines[0]
            sequence = "".join(lines[1:])
            sequences[header] = sequence

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving parse_fasta (Time taken: {elapsed_time:.4f} seconds)")

    return sequences


def calcul_gc_content(seq_length, chr, seq):
    """
    Calculate the GC content of a given sequence.

    This function generates an array representing the GC content for a specific chromosome and sequence length.

    Parameters
    ----------
    seq_length : int
        The length of the sequence.
    chr : str
        The chromosome for which the GC content is calculated.
    seq : dict
        A dictionary containing sequences, with chromosome names as keys and sequences as values.

    Returns
    -------
    numpy.ndarray
        An array of bytes ('S' dtype) representing the GC content for the given sequence length.
    """
    logging.info(f"Entering calcul_gc_content for {chr}")
    start_time = time.time()

    gc_data = np.zeros(seq_length, dtype="S")
    for i in range(len(seq[chr])):
        gc_data[i] = seq[chr][i]

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving calcul_gc_content for {chr} (Time taken: {elapsed_time:.4f} seconds)")

    return gc_data

def calcul_depth_seq_samtools(seq_length, bamfile_path, chr, num_threads=12):
    """
    Calculate the sequencing depth for a given chromosome using a parallelized bash script.

    This function uses a bash script with multithreading to compute the depth for a specific chromosome 
    from a BAM file, and returns the depth values as a numpy array.

    Parameters
    ----------
    seq_length : int
        The length of the sequence.
    bamfile_path : str
        The path to the BAM file containing sequencing data.
    chr : str
        The chromosome for which the depth is calculated.
    num_threads : int, optional
        The number of threads to use for parallel processing (default is 12).

    Returns
    -------
    numpy.ndarray
        An array of integers representing the sequencing depth for the specified sequence length.
    """
    logging.info(f"Entering calcul_depth_seq for {chr}")
    start_time = time.time()
    
    #Define the output file for depth results
    timestamp = datetime.now().strftime("%Y%m%d%H%M%S%f")
    output_file_1 = f"/work/gad/shared/analyse/test/cnvGPU/test_scalability/tmp_depth_{chr}_{timestamp}.out"
    
    # Run the parallelized bash script to calculate depth
    p = subprocess.Popen(["/work/gad/shared/analyse/test/cnvGPU/test_scalability/samtools_test_by_chr_multithreading_log.sh %s %s %s %s" % (bamfile_path, chr, output_file_1, num_threads)], shell = True, executable = "/bin/bash")
    
    p.communicate()
    
    # Create the numpy array for depth data
    depth_data = np.zeros(seq_length, dtype=np.int32)

   # Read the output file and fill the numpy array
    try:
        df = pd.read_csv(output_file_1, delim_whitespace=True, header=None, names=['chr', 'pos', 'depth'])
        df['pos'] -= 1  # Convert to 0-based index
        df = df[df['pos'] < seq_length]  # Ensure positions are within sequence length
        depth_data[df['pos']] = df['depth']
    except Exception as e:
        logging.error(f"Error reading depth file: {e}")
    
    # Clean up the output file
    subprocess.run(['rm', output_file_1])

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")

    return depth_data

def calcul_med_total(depth_correction_results, chr):
    """
    Calculate the median of non-zero depth correction results.

    This function filters out zero values from the depth correction results and computes the median 
    of the remaining non-zero values.

    Parameters
    ----------
    depth_correction_results : list or numpy.ndarray
        A list or array of depth correction values.
    chr : str
        The chromosome identifier for which the median is calculated.

    Returns
    -------
    float
        The median of the non-zero depth correction values, or 0 if no non-zero values are present.
    """
    logging.info(f"Entering calcul_med_total for {chr}")
    start_time = time.time()
    
    depth_correction_results = np.array(depth_correction_results)
    # Filter results to remove zero values
    non_zero_results = depth_correction_results[depth_correction_results != 0]
    # Calculate the median of non-zero results
    m = np.median(non_zero_results) if non_zero_results.size > 0 else 0

    sys.stderr.write("Chromosome : %s, med_chr : %s\n" % (chr, m))
    
    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving calcul_med_total for {chr} (Time taken: {elapsed_time:.4f} seconds)")
    
    return m


def calcul_med_same_gc(gc_results, depth_correction_results, chr):
    """
    Calculate the median depth correction for each unique GC content value.

    This function computes the median depth correction values for each unique GC content value, filtering out zero values.

    Parameters
    ----------
    gc_results : list or numpy.ndarray
        A list or array of GC content values.
    depth_correction_results : list or numpy.ndarray
        A list or array of depth correction values.

    Returns
    -------
    dict
        A dictionary where keys are unique GC content values and values are the median depth correction for those GC values.
    """
    logging.info(f"Entering calcul_med_same_gc for {chr}")
    start_time = time.time()
    
    mGC = []
    depth_correction_results_array = np.array(depth_correction_results)
    unique_gc_values = np.unique(gc_results)

    for gc in unique_gc_values:
        indices = np.where(
            gc_results == gc
        )  # Get positions of each unique GC value in gc_results
        # Filter depth correction results to remove zero values
        filtered_depths = depth_correction_results_array[indices][
            depth_correction_results_array[indices] != 0
        ]

        if (
            filtered_depths.size > 0
        ):  # Calculate median only if filtered results are not empty
            median_gc = np.median(filtered_depths)
        else:
            median_gc = 0  # Or another default value if all results are 0

        mGC.append((gc, median_gc))

    gc_to_median = dict(mGC)

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving calcul_med_same_gc for {chr} (Time taken: {elapsed_time:.4f} seconds)")
    
    return gc_to_median


def calcul_moy_totale(normalize_depth_results, chr):
    """
    Calculate the mean of non-zero normalized depth results.

    This function filters out zero values from the normalized depth results and computes the mean of the remaining values.

    Parameters
    ----------
    normalize_depth_results : list or numpy.ndarray
        A list or array of normalized depth values.

    Returns
    -------
    float
        The mean of the non-zero normalized depth values, or 0 if no non-zero values are present.
    """
    logging.info(f"Entering calcul_moy_totale for {chr}")
    start_time = time.time()   
    
    normalize_depth_results = np.array(normalize_depth_results)
    # Filter results to remove zero values
    non_zero_results = normalize_depth_results[normalize_depth_results != 0]
    # Calculate the mean of non-zero results
    mean_chr_norm = np.mean(non_zero_results) if non_zero_results.size > 0 else 0

    logging.info(f"Mean chr_norm_no_zero = {mean_chr_norm}")

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving calcul_moy_totale for {chr} (Time taken: {elapsed_time:.4f} seconds)")    
    
    return mean_chr_norm

def calcul_std(normalize_depth_results, chr):
    """
    Calculate the standard deviation of non-zero normalized depth results.

    This function filters out zero values from the normalized depth results and computes the standard deviation 
    of the remaining non-zero values.

    Parameters
    ----------
    normalize_depth_results : list or numpy.ndarray
        A list or array of normalized depth values.
    chr : str
        The chromosome identifier for which the standard deviation is calculated.

    Returns
    -------
    float
        The standard deviation of the non-zero normalized depth values, or 0 if no non-zero values are present.
    """
    logging.info(f"Entering calcul_std for {chr}")
    start_time = time.time()  
    
    normalize_depth_results = np.array(normalize_depth_results)
    # Filter results to remove zero values
    non_zero_results = normalize_depth_results[normalize_depth_results != 0]
    # Calculate the standard deviation of non-zero results
    std_chr = np.std(non_zero_results) if non_zero_results.size > 0 else 0


    sys.stderr.write("Chromosome : %s, std_chr : %s\n" % (chr, std_chr))
    
    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving calcul_std for {chr} (Time taken: {elapsed_time:.4f} seconds)")      
    
    return std_chr


def compute_mean_std_med(ratio_par_window_norm_results, chr, normalize_depth_results):
    """
    Compute the mean, standard deviation, and median of non-zero ratio results per window.

    This function filters out zero and -1 values from the ratio results per window and computes the mean, 
    standard deviation, and median of the remaining values. Values greater than or equal to 5 are capped at 5.

    Parameters
    ----------
    ratio_par_window_norm_results : list or numpy.ndarray
        A list or array of ratio values per window.
    chr : str
        The chromosome identifier for which the statistics are calculated.

    Returns
    -------
    tuple
        A tuple containing the mean, standard deviation, and median of the filtered ratio values:
        (mean_ratio, std_ratio, med_ratio).
    """
    logging.info(f"Entering compute_mean_std_med for {chr}")
    start_time = time.time()

    # Filter results to remove zero and -1 values
    ratio_par_window_norm_results = np.array(ratio_par_window_norm_results)
    non_zero_results = ratio_par_window_norm_results[ratio_par_window_norm_results != 0]
    non_zero_results = non_zero_results[np.isfinite(non_zero_results)]

    # Initialize list for stats computation
    table = []

    for value in non_zero_results:
        if float(value) >= 5:
            table.append(5)
        elif float(value) != -1:
            table.append(float(value))

    # Calculate the mean, standard deviation, and median of the filtered values
    mean_ratio = np.mean(table) if table else 0
    std_ratio = np.std(table) if table else 0
    med_ratio = np.median(table) if table else 0

    sys.stderr.write("Chromosome : %s, mean_ratio : %s, std_ratio : %s, med_ratio : %s\n" % (chr, mean_ratio, std_ratio, med_ratio))
    
    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving compute_mean_std_med for {chr} (Time taken: {elapsed_time:.4f} seconds)") 

    # Return results
    return mean_ratio, std_ratio, med_ratio


def cn_level(x, chr):
    """
    Determine the copy number level based on the given value.

    This function returns the copy number level based on the input value `x`.

    Parameters
    ----------
    x : float
        The input value used to determine the copy number level.
    chr : str
        The chromosome identifier for which the copy number level is calculated.

    Returns
    -------
    int
        The copy number level:
        - 0 if x < 0.1
        - 1 if 0.1 <= x <= 0.75
        - 2 if 0.75 < x < 1 or round(x) == 1
        - round(x) if round(x) > 1
    """
    if x < 0.1:
        return 0
    elif x <= 0.75:
        return 1
    elif x < 1:
        return 2
    else:
        rounded_x = round(x)
        return 2 if rounded_x == 1 else rounded_x


def get_sample_name(bamfile_path):
    """
    Extract the sample name from a BAM file.

    This function reads the header of a BAM file to extract the sample name from the read groups.

    Parameters
    ----------
    bamfile_path : str
        The path to the BAM file.

    Returns
    -------
    str
        The sample name extracted from the BAM file. If no sample name is found, returns "UnknownSample".
    """
    logging.info(f"Entering get_sample_name")
    start_time = time.time()

    with pysam.AlignmentFile(bamfile_path, "rb") as bamfile:
        for read_group in bamfile.header.get("RG", []):
            if "SM" in read_group:
                return read_group["SM"]

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving get_sample_name (Time taken: {elapsed_time:.4f} seconds)")
    
    return "UnknownSample"

def filter_read_gc(gc_results, depth_results, chr):
    """
    Filter depth results by GC content categories.

    This function groups depth values based on rounded GC content categories (35, 40, 45, 50, 55) and returns 
    the depth values for each category as separate arrays.

    Parameters
    ----------
    gc_results : list or numpy.ndarray
        A list or array of GC content values.
    depth_results : list or numpy.ndarray
        A list or array of depth values corresponding to the GC content values.
    chr : str
        The chromosome identifier for which the filtering is performed.

    Returns
    -------
    tuple
        A tuple containing five numpy arrays, each corresponding to depth values for rounded GC content categories:
        (gc_35, gc_40, gc_45, gc_50, gc_55).
    """
    logging.info(f"Entering filter_read_gc for {chr}")
    start_time = time.time()

    gc_35, gc_40, gc_45, gc_50, gc_55 = [], [], [], [], []

    for depth_value, gc_value in zip(depth_results, gc_results):
        gc_value_rounded = round(gc_value)
        if gc_value_rounded == 35:
            gc_35.append(depth_value)
        elif gc_value_rounded == 40:
            gc_40.append(depth_value)
        elif gc_value_rounded == 45:
            gc_45.append(depth_value)
        elif gc_value_rounded == 50:
            gc_50.append(depth_value)
        elif gc_value_rounded == 55:
            gc_55.append(depth_value)
    
    gc_35, gc_40, gc_45, gc_50, gc_55 = map(np.array, [gc_35, gc_40, gc_45, gc_50, gc_55])
    
    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving filter_read_gc for {chr} (Time taken: {elapsed_time:.4f} seconds)")

    gc_35_non_zero = gc_35[gc_35 != 0]
    
    np_mean_gc_35 = np.mean(gc_35_non_zero)
    len_gc_35 = len(gc_35_non_zero)
    
    logging.info(f"np_mean_gc_35 = {np_mean_gc_35}, len_gc_35_non_zero = {len_gc_35}, len_gc_35 = {len(gc_35)}")

    return gc_35, gc_40, gc_45, gc_50, gc_55
    
def filter_read_gc_gpu(gc_results, d_gc_results, d_depth_results, grid_size, block_size, chr):
    """
    Filter depth results by GC content categories using GPU computation.

    This function utilizes a GPU kernel to group depth values based on rounded GC content categories (35, 40, 45, 50, 55) 
    and returns the depth values for each category as separate arrays. The function allocates memory on the GPU, 
    copies the data, executes the kernel, and retrieves the results.

    Parameters
    ----------
    gc_results : list or numpy.ndarray
        A list or array of GC content values.
    d_gc_results : pycuda.gpuarray.GPUArray
        A GPU array containing GC content values.
    d_depth_results : pycuda.gpuarray.GPUArray
        A GPU array containing depth values.
    grid_size : int
        The grid size for the GPU kernel execution.
    block_size : int
        The block size for the GPU kernel execution.
    chr : str
        The chromosome identifier for which the filtering is performed.

    Returns
    -------
    tuple
        A tuple containing five numpy arrays, each corresponding to depth values for rounded GC content categories:
        (gc_35, gc_40, gc_45, gc_50, gc_55).
    """
    logging.info(f"Entering filter_read_gc_gpu for {chr}")
    start_time = time.time()
    
    n = len(gc_results)
    max_size = n + 1
    
    gc_35_data = np.zeros(max_size, dtype = np.float32)
    gc_40_data = np.zeros(max_size, dtype = np.float32)
    gc_45_data = np.zeros(max_size, dtype = np.float32)
    gc_50_data = np.zeros(max_size, dtype = np.float32)
    gc_55_data = np.zeros(max_size, dtype = np.float32)
    
    gc_35_gpu = cuda.mem_alloc(gc_35_data.nbytes)
    gc_40_gpu = cuda.mem_alloc(gc_40_data.nbytes)
    gc_45_gpu = cuda.mem_alloc(gc_45_data.nbytes)
    gc_50_gpu = cuda.mem_alloc(gc_50_data.nbytes)
    gc_55_gpu = cuda.mem_alloc(gc_55_data.nbytes)
    
    cuda.memcpy_htod(gc_35_gpu, gc_35_data)
    cuda.memcpy_htod(gc_40_gpu, gc_40_data)
    cuda.memcpy_htod(gc_45_gpu, gc_45_data)
    cuda.memcpy_htod(gc_50_gpu, gc_50_data)
    cuda.memcpy_htod(gc_55_gpu, gc_55_data)
    
    
    filter_read_gc_kernel_cuda(d_gc_results, d_depth_results, np.int32(n), gc_35_gpu, gc_40_gpu, gc_45_gpu, gc_50_gpu, gc_55_gpu, block=(block_size, 1, 1), grid=(grid_size, 1))

    # Copy back results
    gc_35 = np.zeros(max_size, dtype = np.float32)
    gc_40 = np.zeros(max_size, dtype = np.float32)
    gc_45 = np.zeros(max_size, dtype = np.float32)
    gc_50 = np.zeros(max_size, dtype = np.float32)
    gc_55 = np.zeros(max_size, dtype = np.float32)
    
    cuda.memcpy_dtoh(gc_35, gc_35_gpu)
    cuda.memcpy_dtoh(gc_40, gc_40_gpu)
    cuda.memcpy_dtoh(gc_45, gc_45_gpu)
    cuda.memcpy_dtoh(gc_50, gc_50_gpu)
    cuda.memcpy_dtoh(gc_55, gc_55_gpu)
    
    gc_35_non_zero = gc_35[gc_35 != 0]  # Filtre les valeurs égales à 0
    
    np_mean_gc_35 = np.mean(gc_35_non_zero)
    len_gc_35 = len(gc_35_non_zero)
    
    logging.info(f"np_mean_gc_35 = {np_mean_gc_35}, len_gc_35_non_zero = {len_gc_35}, len_gc_35 = {len(gc_35)}")
    
    gc_35 = gc_35[gc_35 != 0]
    gc_40 = gc_40[gc_40 != 0]
    gc_45 = gc_40[gc_40 != 0]
    gc_50 = gc_40[gc_40 != 0]
    gc_55 = gc_40[gc_40 != 0]

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving filter_read_gc for {chr} (Time taken: {elapsed_time:.4f} seconds)") 

    return gc_35, gc_40, gc_45, gc_50, gc_55

def calc_polynom(gc_35, gc_40, gc_45, gc_50, gc_55, chr):
    """
    Calculate polynomial fits for depth arrays grouped by GC content.

    This function computes a 3rd-degree polynomial fit for each depth array (gc_35, gc_40, gc_45, gc_50, gc_55).
    Each fit is represented as a polynomial object. If a depth array is empty, `None` is returned for that GC category.

    Parameters
    ----------
    gc_35 : numpy.ndarray
        Array of depth values for GC content category 35.
    gc_40 : numpy.ndarray
        Array of depth values for GC content category 40.
    gc_45 : numpy.ndarray
        Array of depth values for GC content category 45.
    gc_50 : numpy.ndarray
        Array of depth values for GC content category 50.
    gc_55 : numpy.ndarray
        Array of depth values for GC content category 55.
    chr : str
        The chromosome identifier for which the polynomial fits are calculated.

    Returns
    -------
    list
        A list of polynomial objects (numpy.poly1d) for each GC content category, where `None` is returned
        for categories with empty depth arrays.
    """
    logging.info(f"Entering calc_polynom for {chr}")
    start_time = time.time()
    
    polynomials = []
    depth_arrays = [gc_35, gc_40, gc_45, gc_50, gc_55]

    for depths in depth_arrays:
        if len(depths) > 0:
            x = np.arange(len(depths))  # Créer un tableau d'indices pour les abscisses
            coeffs = np.polyfit(x, depths, 3)  # Ajuster un polynôme de degré 3
            p = np.poly1d(coeffs)  # Créer un polynôme à partir des coefficients
            polynomials.append(p)
        else:
            polynomials.append(None)
            
    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving calc_polynom for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
    return polynomials

def calc_mean_chr(depth_results, chr):
    """
    Calculate the mean of non-zero depth results for a chromosome.

    This function filters out zero values from the depth results and computes the mean of the remaining 
    non-zero values for the specified chromosome.

    Parameters
    ----------
    depth_results : list or numpy.ndarray
        A list or array of depth values.
    chr : str
        The chromosome identifier for which the mean depth is calculated.

    Returns
    -------
    float
        The mean of the non-zero depth values, or 0 if no non-zero values are present.
    """
    logging.info(f"Entering calc_mean_chr for {chr}")
    start_time = time.time()   
    
    depth_results = np.array(depth_results)
    # Filter results to remove zero values
    non_zero_results = depth_results[depth_results != 0]
    # Calculate the mean of non-zero results
    mean_chr = np.mean(non_zero_results) if non_zero_results.size > 0 else 0

    sys.stderr.write("Chromosome : %s, mean_chr : %s\n" % (chr, mean_chr))

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving calc_mean_chr for {chr} (Time taken: {elapsed_time:.4f} seconds)")    
    
    return mean_chr

def find_polynom(ratio_par_window_results, chr, polynomials, depth_results):
    """
    Find the best polynomial fit for copy number ratio results.

    This function identifies the polynomial from a list of polynomials that best fits the copy number (CN) 
    values calculated from the ratio per window results. The best fit is determined by minimizing the 
    difference between normalized polynomial values and unique depth values.

    Parameters
    ----------
    ratio_par_window_results : list or numpy.ndarray
        A list or array of ratio values per window.
    chr : str
        The chromosome identifier for which the best polynomial is determined.
    polynomials : list of numpy.poly1d or None
        A list of polynomial objects, where each polynomial corresponds to a GC content category.
        Polynomials may be `None` if no fit is available for that category.
    depth_results : list or numpy.ndarray
        A list or array of depth values used for comparison with polynomial predictions.

    Returns
    -------
    numpy.poly1d or None
        The polynomial that best fits the data based on the minimum difference in normalized values, 
        or `None` if no suitable polynomial is found.
    """
    logging.info(f"Entering find_polynom for {chr}")
    start_time = time.time()
    
    # Convert ratio_par_window_results to numpy array
    ratio_par_window_results = np.array(ratio_par_window_results)
    cn = ratio_par_window_results * 2
    cn_to_test = cn[(cn >= 1.5) & (cn < 2.5)]
    cn_unique = np.unique(cn_to_test)

    best_polynom = None
    best_knorm_diff = float('inf')
    
    depth_results_non_zero = depth_results[depth_results != 0]
    depth_results_filter = np.unique(depth_results_non_zero)
    
    logging.info(f"depth_results_len = {len(depth_results)}, depth_results_non_zero_len = {len(depth_results_non_zero)}, depth_results_filter_len = {len(depth_results_filter)}")
    
    # Precompute k / 2 for each unique cn value
    cn_unique_div2 = cn_unique / 2

    # Iterate over each polynomial
    for polynom in polynomials:
        logging.info(f"polynom = {polynom}\tpolynomials = {polynomials}\tcn_unique = {cn_unique}")
        if polynom is not None:
            # Calculate knorm for all cn_unique values at once
            knorms = polynom(cn_unique) * cn_unique_div2[:, None]
            
            # Calculate the differences between knorms and each RCi, then find the minimum difference
            for RCi in depth_results_filter:
                knorm_diffs = np.abs(knorms - RCi)
                min_knorm_diff = np.min(knorm_diffs)

                if min_knorm_diff < best_knorm_diff:
                    best_knorm_diff = min_knorm_diff
                    best_polynom = polynom

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving find_polynom for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
    return best_polynom

def normalize(depth_results, best_polynom, map_results):
    """
    Normalize depth results using a polynomial model and mappability results.

    This function normalizes depth values by dividing each non-zero depth value by the product of the 
    corresponding polynomial value and mappability score. If no polynomial is provided, the depth value is 
    used as-is. Zero values in either depth or mappability results are set to zero in the output.

    Parameters
    ----------
    depth_results : list or numpy.ndarray
        A list or array of depth values to be normalized.
    best_polynom : numpy.poly1d or None
        A polynomial model for normalization. If `None`, no polynomial normalization is applied.
    map_results : list or numpy.ndarray
        A list or array of mappability values corresponding to the depth results.

    Returns
    -------
    numpy.ndarray
        An array of normalized depth values, where zero values are retained where either depth or mappability 
        was zero.
    """
    logging.info("Entering normalization")
    start_time = time.time()
    
    normalize_depth_results = []
    for i in range(len(depth_results)):
        if depth_results[i] != 0 and map_results[i] != 0:
            normalized_value = depth_results[i] / (best_polynom(i) * map_results[i]) if best_polynom is not None else depth_results[i]
            #logging.info(f"normalized_value = {normalized_value}\ti = {i}\tdepth_results[i] = {depth_results[i]}\tbest_polynom(i) = {best_polynom(i)}\tmap_results[i] = {map_results[i]}")
            normalize_depth_results.append(normalized_value)
        else:
            normalize_depth_results.append(0)
    
    #normalize_depth_results_unique = np.unique(normalize_depth_results)
    #logging.info(f"normalize_depth_results_unique = {normalize_depth_results_unique}") 
    
    has_nan = np.isnan(normalize_depth_results).any()
    has_inf = np.isinf(normalize_depth_results).any()

    if has_nan:
        logging.warning(f"{chr} contient des valeurs NaN.")
    if has_inf:
        logging.warning(f"{chr} contient des valeurs Inf.")
    
    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving normalization (Time taken: {elapsed_time:.4f} seconds)") 
    
    return np.array(normalize_depth_results)

def create_signal(signal, chr, z_score_results, step_size):
    """
    Create a signal dictionary for a specific chromosome based on z-score results.

    This function populates a signal dictionary with positions and corresponding z-score results for a given chromosome.
    Each position is calculated based on the step size and is mapped to the corresponding z-score value.

    Parameters
    ----------
    signal : dict
        A dictionary to store the signal data, where chromosome names are keys and position-to-z-score mappings are values.
    chr : str
        The chromosome for which the signal is created.
    z_score_results : list or numpy.ndarray
        A list or array of z-score results.
    step_size : int
        The step size used to calculate the positions.

    Returns
    -------
    None
        The function modifies the `signal` dictionary in place.
    """
    logging.info(f"Entering create_signal for {chr} (include copy_number_level)")
    start_time = time.time()

    if chr not in signal:
        signal[chr] = {}
    for i in range(len(z_score_results)):
        pos = (i * step_size) + 1
        signal[chr][pos] = z_score_results[i]
    
    #sys.stderr.write("\t signal %s\n" % signal[chr])
    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving create_signal for {chr} (include copy_number_level) (Time taken: {elapsed_time:.4f} seconds)") 

def detect_events(
    z_score_results,
    zscore_threshold,
    events,
    med_ratio,
    ratio_par_mean_ratio_results,
    chr,
):
    """
    Detect genomic events based on z-score results and a z-score threshold.

    This function identifies significant genomic events where z-scores exceed the given threshold. Events are recorded in the `events` dictionary for the specified chromosome.

    Parameters
    ----------
    z_score_results : list or numpy.ndarray
        A list or array of z-score values.
    zscore_threshold : float
        The threshold for detecting significant z-score events.
    events : dict
        A dictionary to store detected events.
    med_ratio : float
        The median ratio used for copy number level calculations.
    ratio_par_mean_ratio_results : list or numpy.ndarray
        A list or array of ratio values compared to the mean ratio.
    chr : str
        The chromosome for which events are detected.

    Returns
    -------
    None
        The function modifies the events dictionary in place.
    """
    logging.info(f"Entering detect_events for {chr}")
    start_time = time.time()
    
    c = 0
    if chr not in events:
        events[chr] = {}
    for i, z_score in enumerate(z_score_results):
        if abs(z_score) >= zscore_threshold:
            if med_ratio != 0:
                c = cn_level(float(ratio_par_mean_ratio_results[i]), chr)
            if z_score >= 0:
                c = 3
            elif c == 2 and z_score < 0:
                c = 1
            pos_start = (i * step_size) + 1
            pos_end = pos_start + window_size

            events[chr][(pos_start, pos_end)] = c
    #sys.stderr.write("\t events %s\n" % events)

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving detect_events for {chr} (Time taken: {elapsed_time:.4f} seconds)") 

def segmentation(events, segment, chr):
    """
    Segment the detected events into contiguous regions with the same copy number level.

    This function processes the detected events and groups contiguous regions with the same copy number level 
    into segments, modifying the `segment` dictionary in place to store the start, end, and copy number 
    information for each contiguous region.

    Parameters
    ----------
    events : dict
        A dictionary of detected events for each chromosome. The keys are chromosome names, and the values are dictionaries 
        with positions as keys and copy number levels as values.
    segment : dict
        A dictionary to store the segmented regions, with chromosome names as keys. Each chromosome's value is a dictionary 
        where keys represent segment ranges (e.g., "start-end") and values are dictionaries containing 'start', 'end', and 
        'cn' (copy number level) for the segment.
    chr : str
        The chromosome identifier being processed.

    Returns
    -------
    None
        The function modifies the `segment` dictionary in place, adding segmented regions.
    """
    logging.info(f"Entering segmentation for {chr}")
    start_time = time.time()
    
    for k in events.keys():
        sys.stderr.write("\tfor chromosome %s\n" % k)
        starts, oldPos, oldLevel = 1, 1, -1
        for p in sorted(events[k].keys()):
            level = events[k][p]
            #sys.stderr.write("\t p %s\n" % p)
            # new coordinates
            if p[0] > (oldPos + window_size):
                if (starts != 1) and (starts != p[0]):
                    if k not in segment:
                        segment[k] = {}
                    index = str(starts) + "-" + str(oldPos)
                    segment[k][index] = {}
                    segment[k][index]["start"] = starts
                    segment[k][index]["end"] = oldPos + window_size
                    segment[k][index]["cn"] = oldLevel
                    oldPos, starts, oldLevel = p[0], p[0], level
                    continue
                else:
                    starts = p[0]
            # case where it's contiguous but different level
            if level != oldLevel:
                if oldLevel != -1:
                    if k not in segment:
                        segment[k] = {}
                    index = str(starts) + "-" + str(oldPos)
                    segment[k][index] = {}
                    segment[k][index]["start"] = starts
                    segment[k][index]["end"] = oldPos
                    segment[k][index]["cn"] = oldLevel
                    oldPos, starts, oldLevel = p[0], p[0], level
                    continue
                else:
                    oldLevel = level
            oldPos, oldLevel = p[0], level

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving segmentation for {chr} (Time taken: {elapsed_time:.4f} seconds)") 

def display_results_vcf(sample, segment, signal, lengthFilter, output_file, chr, normalize_depth_results):
    """
    Generate a VCF file containing structural variant calls based on segmented regions and signal data.

    This function creates a VCF (Variant Call Format) file containing structural variant calls derived from 
    segmented regions and signal data. The structural variant type (DEL for deletion or DUP for duplication) 
    is determined based on copy number levels and signal values. The resulting VCF file includes information 
    about the chromosome, position, type of structural variant, copy number, and other relevant information.

    Parameters
    ----------
    sample : str
        The sample name to be included in the VCF file header.
    segment : dict
        A dictionary containing segmented regions with copy number information.
    signal : dict
        A dictionary containing signal data for each chromosome.
    lengthFilter : int
        The minimum length threshold for including variants in the VCF file.
    output_file : str
        The path to the output VCF file.
    chr : str
        The chromosome identifier for which the VCF output is generated.
    normalize_depth_results : list or numpy.ndarray
        An array or list of normalized depth values used to filter structural variant calls.

    Returns
    -------
    None
        This function writes the structural variant calls to the specified output file in VCF format.
    """
    global header_written
    logging.info(f"Entering display_results_vcf for {chr}")
    start_time = time.time()

    with open(output_file, "a") as f:
        if not header_written:
            f.write("##fileformat=VCFv4.2\n")
            f.write("##source=cnvcaller\n")
            f.write(
                '##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n'
            )
            f.write(
                '##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">\n'
            )
            f.write(
                '##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n'
            )
            f.write('##INFO=<ID=CN,Number=1,Type=Integer,Description="Copy number">\n')
            f.write('##ALT=<ID=DEL,Description="Deletion">\n')
            f.write('##ALT=<ID=DUP,Description="Duplication">\n')
            f.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n')
            f.write(
                "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % (sample)
            )
            header_written = True

        for k in segment.keys():
            #sys.stderr.write("\tfor chromosome %s\n" % k)
            for elt in sorted(segment[k].keys()):
                #sys.stderr.write("\tfor elt %s\n" % elt)
                if segment[k][elt]["start"] == segment[k][elt]["end"]:
                    continue
                if (segment[k][elt]["end"] - segment[k][elt]["start"]) < lengthFilter:
                    continue
                #sys.stderr.write("\t [segment[k][elt] %s\n" % [segment[k][elt]])
                if np.mean(normalize_depth_results[segment[k][elt]["start"]:segment[k][elt]["end"]]) >= 10:
                    if int(signal[k][segment[k][elt]["start"]]) < 0:
                        f.write(
                            "%s\t%s\t.\tN\t<DEL>\t.\t.\tSVTYPE=DEL;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
                            % (
                                k,
                                segment[k][elt]["start"],
                                segment[k][elt]["end"],
                                int(segment[k][elt]["cn"]),
                            )
                        )
                    else:
                        f.write(
                            "%s\t%s\t.\tN\t<DUP>\t.\t.\tSVTYPE=DUP;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
                            % (
                                k,
                                segment[k][elt]["start"],
                                segment[k][elt]["end"],
                                int(segment[k][elt]["cn"]),
                            )
                        )

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving display_results_vcf for {chr} (Time taken: {elapsed_time:.4f} seconds)") 


def calcul_distance(bamfile_handle, chr, seq_length):
    """
    Calculate template distances for paired-end reads in a specified chromosome.

    This function calculates the absolute template length (distance) for each paired-end read in the specified chromosome, 
    filtering out unmapped reads and reads with unmapped mates. The distances are stored in an array for further analysis.

    Parameters
    ----------
    bamfile_handle : pysam.AlignmentFile
        A handle to an open BAM file for reading.
    chr : str
        The chromosome identifier for which distances are calculated.
    seq_length : int
        The length of the sequence (not directly used in this function, but may be needed for context).

    Returns
    -------
    numpy.ndarray
        An array of integer distances representing the absolute template lengths for paired-end reads in the chromosome.
    """
    logging.info(f"Entering calcul_distance for {chr}")
    start_time = time.time()
    
    distances_list = []
    
    for read in bamfile_handle.fetch(chr):
        if read.is_paired and not read.is_unmapped and not read.mate_is_unmapped:
            # Calculate absolute template length
            distance = abs(read.template_length)
            if distance != 0:
                distances_list.append(distance)
            #sys.stderr.write("Chromosome : %s, distances_data : %s\n" % (chr, distances_data))
    
    distances_data = np.array(distances_list, dtype=np.int32)

    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving calcul_distance for {chr} (Time taken: {elapsed_time:.4f} seconds)")
    
    return distances_data

def stats_distances(distances_data, chr):
    """
    Calculate statistics on template distances for a specified chromosome.

    This function calculates the standard deviation and median of the template distances for a given chromosome.
    It then uses these values to determine a minimum and maximum distance range based on one standard deviation 
    from the median.

    Parameters
    ----------
    distances_data : numpy.ndarray
        An array of integer distances representing the template lengths for paired-end reads.
    chr : str
        The chromosome identifier for which the distance statistics are calculated.

    Returns
    -------
    tuple
        A tuple containing:
        - MIN_DISTANCE : float, the lower bound distance (median - standard deviation).
        - MAX_DISTANCE : float, the upper bound distance (median + standard deviation).
    """
    logging.info(f"Entering stats_distances for {chr}")
    start_time = time.time()
    
    std_distances = np.std(distances_data)
    med_distances = np.median(distances_data)
    
    MIN_DISTANCE = med_distances - std_distances
    MAX_DISTANCE = med_distances + std_distances

    sys.stderr.write("Chromosome : %s, std_dist : %s, med_dist : %s\n" % (chr, std_distances, med_distances))
    
    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving stats_distances for {chr} (Time taken: {elapsed_time:.4f} seconds)") 
    
    # Return results
    return MIN_DISTANCE, MAX_DISTANCE
        
def find_paired_split(chr, MIN_DISTANCE, MAX_DISTANCE, depth_data, output_file_pairs, output_file_splits, bamfile_handle, seq_length):
    """
    Detect paired-end and split-read structural variant signals in a chromosome.

    This function scans paired-end and split-read data from a BAM file to identify structural variant signals
    (e.g., inversions, deletions, insertions, translocations) based on distance thresholds and alignment patterns.
    Detected signals are filtered and grouped based on proximity and event type, and then written to specified output files.

    Parameters
    ----------
    chr : str
        The chromosome identifier for which structural variant signals are detected.
    MIN_DISTANCE : int
        The minimum template length to consider as an abnormal distance for paired-end reads.
    MAX_DISTANCE : int
        The maximum template length to consider as an abnormal distance for paired-end reads.
    depth_data : numpy.ndarray
        An array of depth values for the chromosome.
    output_file_pairs : str
        The path to the output file for storing paired-end read signals.
    output_file_splits : str
        The path to the output file for storing split-read signals.
    bamfile_handle : pysam.AlignmentFile
        A handle to an open BAM file for reading.
    seq_length : int
        The length of the sequence being processed.

    Returns
    -------
    None
        The function writes paired-end and split-read structural variant signals to the specified output files.
    """
    logging.info(f"Entering find_paired_split for {chr}")
    start_time = time.time()
    
    coordinates_pairs, coordinates_splits = defaultdict(int), defaultdict(int)

    unique_pairs, unique_splits = [], []


    fragment_size = 1000
    num_fragments = (seq_length // fragment_size) + 1
    
    fragment_abnormal_reads_pairs, fragment_abnormal_reads_splits, fragment_total_reads = np.zeros(num_fragments, dtype=int), np.zeros(num_fragments, dtype=int), np.zeros(num_fragments, dtype=int)

    event_pairs, event_splits = [], []

    for read in bamfile_handle.fetch(chr):
        fragment_index = read.reference_start // fragment_size
        fragment_total_reads[fragment_index] += 1
        if read.is_paired and (read.mapping_quality > 0):
            key = (read.reference_start, read.cigarstring)
            if read.is_unmapped or read.mate_is_unmapped:
                if coordinates_pairs[key] == 0:
                    fragment_abnormal_reads_pairs[fragment_index] += 1
                    unique_pairs.append((read, "Unmapped"))
                coordinates_pairs[key] += 1
            elif read.is_reverse == read.mate_is_reverse and read.reference_id == read.next_reference_id:
                if coordinates_pairs[key] == 0:
                    fragment_abnormal_reads_pairs[fragment_index] += 1
                    unique_pairs.append((read, "INV"))
                coordinates_pairs[key] += 1
            elif abs(read.template_length) < MIN_DISTANCE:
                if coordinates_pairs[key] == 0:
                    fragment_abnormal_reads_pairs[fragment_index] += 1
                    unique_pairs.append((read, "INS"))
                coordinates_pairs[key] += 1
            elif abs(read.template_length) > MAX_DISTANCE:
                if coordinates_pairs[key] == 0:
                    fragment_abnormal_reads_pairs[fragment_index] += 1
                    unique_pairs.append((read, "DEL"))
                coordinates_pairs[key] += 1    
            elif read.reference_id != read.next_reference_id:
                if coordinates_pairs[key] == 0:
                    fragment_abnormal_reads_pairs[fragment_index] += 1
                    unique_pairs.append((read, "TRN"))
                coordinates_pairs[key] += 1
            
        # Lectures splits
        elif read.cigartuples and any(cigar_op in [3, 4, 5] for cigar_op, _ in read.cigartuples):
            if not read.has_tag("SA"):
                continue
            key = (read.reference_start, read.cigarstring)
            if coordinates_splits[key] == 0:
                fragment_abnormal_reads_splits[fragment_index] += 1
                
                # Classification des événements split
                for cigar_op, cigar_len in read.cigartuples:
                    if cigar_op == 3:  # Skipped region
                        unique_splits.append((read, "SKIP"))
                    elif cigar_op == 4:  # Soft clipping
                        unique_splits.append((read, "SOFT_CLIP"))
                    elif cigar_op == 5:  # Hard clipping
                        unique_splits.append((read, "HARD_CLIP"))                
                coordinates_splits[key] += 1


    logging.info("Filtering pairs")
    for read, event_type in unique_pairs:
        fragment_index = read.reference_start // fragment_size
        threshold = fragment_abnormal_reads_pairs[fragment_index] / fragment_total_reads[fragment_index]
        if threshold > 0.2:
            event_pairs.append((read, event_type))

    logging.info("Filtering splits")
    for read, event_type in unique_splits:
        fragment_index = read.reference_start // fragment_size
        threshold = fragment_abnormal_reads_splits[fragment_index] / fragment_total_reads[fragment_index]
        if threshold > 0.2:
            event_splits.append((read, event_type))

    chromosome_coordinate_pairs_count = defaultdict(int)
    reads_by_coordinate_pair = defaultdict(list)
    tolerance = 400
    
    chromosome_coordinate_splits_count = defaultdict(int)
    reads_by_coordinate_splits = defaultdict(list)
    
    
    logging.info("Merging pairs")   
    #Paired-reads
    for read, event_type in event_pairs:
        start_chr = read.reference_name
        start_coord = read.reference_start
        end_chr = read.next_reference_name
        end_coord = read.next_reference_start
        
        found_close_pair = False
        
        for (chr_pair, coord_pair, existing_event_type), count in chromosome_coordinate_pairs_count.items():
            (stored_start_chr, stored_end_chr) = chr_pair
            (stored_start_coord, stored_end_coord) = coord_pair
            
            if (start_chr == stored_start_chr and end_chr == stored_end_chr and
                are_coordinates_close(start_coord, stored_start_coord, tolerance) and
                are_coordinates_close(end_coord, stored_end_coord, tolerance) and
                event_type == existing_event_type):
                
                chromosome_coordinate_pairs_count[(chr_pair, coord_pair, event_type)] += 1
                reads_by_coordinate_pair[(chr_pair, coord_pair, event_type)].append(read)
                found_close_pair = True
                break
        
        if not found_close_pair:
            chr_pair = (start_chr, end_chr)
            coord_pair = (start_coord, end_coord)
            chromosome_coordinate_pairs_count[(chr_pair, coord_pair, event_type)] += 1
            reads_by_coordinate_pair[(chr_pair, coord_pair, event_type)].append(read)
    
    logging.info("Merging splits")
    #Split-reads     
    for read, event_type in event_splits:
        start_chr = read.reference_name
        start_coord = read.reference_start
        end_chr = read.next_reference_name
        end_coord = read.next_reference_start
        
        found_close_splits = False
        
        for (chr_splits, coord_splits, existing_event_type), count in chromosome_coordinate_splits_count.items():
            (stored_start_chr, stored_end_chr) = chr_splits
            (stored_start_coord, stored_end_coord) = coord_splits
            
            if (start_chr == stored_start_chr and end_chr == stored_end_chr and
                are_coordinates_close(start_coord, stored_start_coord, tolerance) and
                are_coordinates_close(end_coord, stored_end_coord, tolerance) and
                event_type == existing_event_type):
                
                existing_reads = reads_by_coordinate_splits[(chr_splits, coord_splits, event_type)]
                if any(are_secondary_alignments_same(read, existing_read) for existing_read in existing_reads):
                    chromosome_coordinate_splits_count[(chr_splits, coord_splits, event_type)] += 1
                    reads_by_coordinate_splits[(chr_splits, coord_splits, event_type)].append(read)
                    found_close_splits = True
                    break
        
        if not found_close_splits:
            chr_splits = (start_chr, end_chr)
            coord_splits = (start_coord, end_coord)
            chromosome_coordinate_splits_count[(chr_splits, coord_splits, event_type)] += 1
            reads_by_coordinate_splits[(chr_splits, coord_splits, event_type)].append(read)

    logging.info("Writting results for pairs and split")

    with open(output_file_pairs, 'a') as p:
        for (chr_pair, coord_pair, event_type), count in chromosome_coordinate_pairs_count.items():
            reads_to_merge = reads_by_coordinate_pair[(chr_pair, coord_pair, event_type)]
            start_chr, end_chr = chr_pair
            min_start = min(read.reference_start for read in reads_to_merge)
            max_start = max(read.reference_start for read in reads_to_merge)
            min_end = min(read.next_reference_start for read in reads_to_merge)
            max_end = max(read.next_reference_start for read in reads_to_merge)
            depth_count = np.mean(depth_data[min_start:max_start+1])
            read_count = count
            
            if read_count > (depth_count * 0.2) and depth_count >= 10:
                p.write(f"{start_chr}\t{min_start}\t{max_start}\t{end_chr}\t{min_end}\t{max_end}\t{event_type}\t{read_count}\t{depth_count}\n")
                
    with open(output_file_splits, 'a') as s:
        for (chr_splits, coord_splits, event_type), count in chromosome_coordinate_splits_count.items():
            reads_to_merge = reads_by_coordinate_splits[(chr_splits, coord_splits, event_type)]
            start_chr, end_chr = chr_splits
            min_start = min(read.reference_start for read in reads_to_merge)
            max_start = max(read.reference_start for read in reads_to_merge)
            min_end = min(read.next_reference_start for read in reads_to_merge)
            max_end = max(read.next_reference_start for read in reads_to_merge)
            depth_count = np.mean(depth_data[min_start:max_start+1])
            read_count = count
            
            if read_count > (depth_count * 0.2) and depth_count >= 10:
                s.write(f"{start_chr}\t{min_start}\t{max_start}\t{end_chr}\t{min_end}\t{max_end}\t{event_type}\t{read_count}\t{depth_count}\n")
            
    end_time = time.time()
    elapsed_time = end_time - start_time
    logging.info(f"Leaving find_paired_split for {chr} (Time taken: {elapsed_time:.4f} seconds)")

def are_coordinates_close(coord1, coord2, tolerance):
    """
    Check if two genomic coordinates are within a specified tolerance.

    Parameters
    ----------
    coord1 : int
        The first genomic coordinate.
    coord2 : int
        The second genomic coordinate.
    tolerance : int
        The maximum distance within which the coordinates are considered close.

    Returns
    -------
    bool
        True if the absolute difference between the coordinates is less than or equal to the tolerance, False otherwise.
    """
    return abs(coord1 - coord2) <= tolerance

def are_secondary_alignments_same(read1, read2):
    """
    Check if two reads have the same secondary alignment tag.

    This function compares the secondary alignment tags (SA) of two reads to determine if they are the same.

    Parameters
    ----------
    read1 : pysam.AlignedSegment
        The first read to compare.
    read2 : pysam.AlignedSegment
        The second read to compare.

    Returns
    -------
    bool
        True if the secondary alignment tags of the reads are identical, False otherwise.
    """
    return read1.get_tag("SA") == read2.get_tag("SA")
             
def main_calcul(
    bamfile_path,
    bamfile_handle,
    chr,
    seq_length,
    window_size,
    step_size,
    zscore_threshold,
    lengthFilter,
    output_file,
    output_file_pairs,
    output_file_splits,
    sample,
):
    """
    Perform copy number variation (CNV) and structural variant (SV) detection with GPU-accelerated computations.

    This function orchestrates a complex series of computations for genomic data analysis, including CNV and SV detection,
    and uses GPU acceleration to enhance performance. The process includes mappability, GC content, and depth calculations,
    as well as distance metrics and normalization. Results are saved in various output files, including a VCF file for 
    structural variants.

    Parameters
    ----------
    bamfile_path : str
        Path to the BAM file containing aligned reads.
    bamfile_handle : pysam.AlignmentFile
        An open handle to the BAM file for read access.
    chr : str
        Chromosome identifier for which analysis is performed.
    seq_length : int
        Length of the chromosome sequence.
    window_size : int
        Size of the sliding window used for analysis.
    step_size : int
        Size of the step when moving the window along the chromosome.
    zscore_threshold : float
        Threshold for detecting significant events based on Z-scores.
    lengthFilter : int
        Minimum length threshold for including variants in the VCF file.
    output_file : str
        Path to the main output VCF file containing variant calls.
    output_file_pairs : str
        Path to the output file for paired-read SV events.
    output_file_splits : str
        Path to the output file for split-read SV events.
    sample : str
        Sample name used in the VCF header.

    Returns
    -------
    None
        The function performs CNV and SV detection, along with associated calculations, and writes the results to 
        specified output files.
    
    Notes
    -----
    - Uses CUDA-enabled GPU processing to perform mappability, GC content, depth, and normalization calculations 
      efficiently over large genomic datasets.
    - Detects CNVs by analyzing depth and copy number variations across windows and applies Z-score analysis to 
      identify statistically significant changes.
    - Identifies SVs (such as inversions, deletions, and translocations) based on read pairing and alignment patterns.
    - Writes detailed output for each stage, including paired-read and split-read SVs, in separate files.
    """
    sys.stderr.write("\t entering main_calcul\n")
    global seq
    events = {}
    segment = {}
    signal = {}

    # Appeler les différentes fonctions
    map_data = calcul_mappability(seq_length, mappability, chr)
    gc_data = calcul_gc_content(seq_length, chr, seq)
    depth_data = calcul_depth_seq_samtools(seq_length, bamfile_path, chr)
    distances_data = calcul_distance(bamfile_handle, chr, seq_length)
    MIN_DISTANCE, MAX_DISTANCE = stats_distances(distances_data, chr)
    find_paired_split(chr, MIN_DISTANCE, MAX_DISTANCE, depth_data, output_file_pairs, output_file_splits, bamfile_handle, seq_length)
    
    # Transférer le tableau NumPy vers CUDA
    d_depth_data = cuda.mem_alloc(depth_data.nbytes)
    cuda.memcpy_htod(d_depth_data, depth_data)
    sys.stderr.write(
        "\t d_depth_data : %s, %s\n"
        % (d_depth_data, d_depth_data.as_buffer(sys.getsizeof(depth_data)))
    )

    d_gc_data = cuda.mem_alloc(gc_data.nbytes)
    cuda.memcpy_htod(d_gc_data, gc_data)
    sys.stderr.write(
        "\t d_gc_data : %s, %s\n"
        % (d_gc_data, d_gc_data.as_buffer(sys.getsizeof(gc_data)))
    )

    d_map_data = cuda.mem_alloc(map_data.nbytes)
    cuda.memcpy_htod(d_map_data, map_data)
    sys.stderr.write(
        "\t d_map_data : %s, %s\n"
        % (d_map_data, d_map_data.as_buffer(sys.getsizeof(map_data)))
    )

    # Calculer la taille de la grille et de bloc pour CUDA
    block_size = num_cores
    sys.stderr.write("\t blocksize (nb de threads) = %s\n" % (num_cores))
    grid_size = int((int((seq_length - window_size) / step_size) + 1) / block_size) + 1
    sys.stderr.write("\t grid_size = \n")

    # Initialiser les tableaux pour stocker les résultats
    depth_results = np.zeros(
        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
    )
    sys.stderr.write("\t Definition de depth_results\n")

    gc_results = np.zeros(
        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
    )
    sys.stderr.write("\t Definition de gc_results\n")

    map_results = np.zeros(
        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
    )
    sys.stderr.write("\t Definition de map_results\n")

    depth_correction_results = np.zeros(
        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
    )
    sys.stderr.write("\t Definition de depth_correction_results\n")

    normalize_depth_results = np.zeros(
        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
    )
    sys.stderr.write("\t Definition de normalize_depth_results\n")

    ratio_par_window_results = np.zeros(
        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
    )
    sys.stderr.write("\t Definition de ratio_par_window\n")

    ratio_par_window_norm_results = np.zeros(
        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
    )
    sys.stderr.write("\t Definition de ratio_par_window_norm\n")

    z_score_results = np.zeros(
        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
    )
    sys.stderr.write("\t Definition de z_score_results\n")

    ratio_par_mean_ratio_results = np.zeros(
        int((seq_length - window_size) / step_size) + 1, dtype=np.float32
    )
    sys.stderr.write("\t Definition de ratio_par_mean_ratio_results\n")

    # Allouer de la mémoire pour les résultats sur le périphérique CUDA
    d_depth_results = cuda.mem_alloc(depth_results.nbytes)
    sys.stderr.write(
        "\t d_depth_results = %s\n"
        % d_depth_results.as_buffer(sys.getsizeof(d_depth_results))
    )
    sys.stderr.write("\t depth_results.nbytes = %s\n" % depth_results.nbytes)

    d_gc_results = cuda.mem_alloc(gc_results.nbytes)
    sys.stderr.write(
        "\t d_gc_results = %s\n" % d_gc_results.as_buffer(sys.getsizeof(d_gc_results))
    )
    sys.stderr.write("\t gc_results.nbytes = %s\n" % gc_results.nbytes)

    d_map_results = cuda.mem_alloc(map_results.nbytes)
    sys.stderr.write(
        "\t d_map_results = %s\n"
        % d_map_results.as_buffer(sys.getsizeof(d_map_results))
    )
    sys.stderr.write("\t map_results.nbytes = %s\n" % map_results.nbytes)

    d_depth_correction_results = cuda.mem_alloc(depth_correction_results.nbytes)
    sys.stderr.write(
        "\t d_depth_correction_results = %s\n"
        % d_depth_correction_results.as_buffer(
            sys.getsizeof(d_depth_correction_results)
        )
    )
    sys.stderr.write(
        "\t depth_correction_results.nbytes = %s\n" % depth_correction_results.nbytes
    )

    #d_normalize_depth_results = cuda.mem_alloc(normalize_depth_results.nbytes)
    #sys.stderr.write(
     #   "\t d_normalize_depth_results = %s\n"
      #  % d_normalize_depth_results.as_buffer(sys.getsizeof(d_normalize_depth_results))
    #)
    #sys.stderr.write(
     #   "\t normalize_depth_results.nbytes = %s\n" % normalize_depth_results.nbytes
    #)

    d_ratio_par_window_results = cuda.mem_alloc(ratio_par_window_results.nbytes)
    sys.stderr.write(
        "\t d_ratio_par_window_results = %s\n"
        % d_ratio_par_window_results.as_buffer(
            sys.getsizeof(d_ratio_par_window_results)
        )
    )
    sys.stderr.write(
        "\t ratio_par_window_results.nbytes = %s\n" % ratio_par_window_results.nbytes
    )

    d_ratio_par_window_norm_results = cuda.mem_alloc(ratio_par_window_norm_results.nbytes)
    sys.stderr.write(
        "\t d_ratio_par_window_norm_results = %s\n"
        % d_ratio_par_window_norm_results.as_buffer(
            sys.getsizeof(d_ratio_par_window_norm_results)
        )
    )
    sys.stderr.write(
        "\t ratio_par_window_norm_results.nbytes = %s\n" % ratio_par_window_norm_results.nbytes
    )

    d_z_score_results = cuda.mem_alloc(z_score_results.nbytes)
    sys.stderr.write(
        "\t d_z_score_results = %s\n"
        % d_z_score_results.as_buffer(sys.getsizeof(d_z_score_results))
    )
    sys.stderr.write("\t z_score_results.nbytes = %s\n" % z_score_results.nbytes)

    d_ratio_par_mean_ratio_results = cuda.mem_alloc(ratio_par_mean_ratio_results.nbytes)
    sys.stderr.write(
        "\t d_ratio_par_mean_ratio_results = %s\n"
        % d_ratio_par_mean_ratio_results.as_buffer(
            sys.getsizeof(d_ratio_par_mean_ratio_results)
        )
    )
    sys.stderr.write(
        "\t ratio_par_mean_ratio_results.nbytes = %s\n"
        % ratio_par_mean_ratio_results.nbytes
    )

    # Appeler la fonction de calcul de profondeur avec CUDA
    calcul_depth_kernel_cuda(
        d_depth_data,
        np.int32(seq_length),
        np.int32(window_size),
        np.int32(step_size),
        d_depth_results,
        block=(block_size, 1, 1),
        grid=(grid_size, 1),
    )
    sys.stderr.write("\t appel fonction calcul_depth_kernel_cuda\n")

    calcul_gc_kernel_cuda(
        d_gc_data,
        np.int32(seq_length),
        np.int32(window_size),
        np.int32(step_size),
        d_gc_results,
        block=(block_size, 1, 1),
        grid=(grid_size, 1),
    )
    sys.stderr.write("\t appel fonction calcul_gc_kernel_cuda\n")

    calcul_map_kernel_cuda(
        d_map_data,
        np.int32(seq_length),
        np.int32(window_size),
        np.int32(step_size),
        d_map_results,
        block=(block_size, 1, 1),
        grid=(grid_size, 1),
    )
    sys.stderr.write("\t appel fonction calcul_map_kernel_cuda\n")

    #calcul_depth_correction_kernel_cuda(
     #   d_depth_results,
      #  d_map_results,
       # np.int32(seq_length),
        #np.int32(window_size),
        #np.int32(step_size),
        #d_depth_correction_results,
        #block=(block_size, 1, 1),
        #grid=(grid_size, 1),
    #)
    #sys.stderr.write("\t appel fonction calcul_depth_correction_kernel_cuda\n")

    context.synchronize()

    # Copier les résultats depuis le périphérique CUDA vers l'hôte

    cuda.memcpy_dtoh(depth_results, d_depth_results)  # cuda.memcpy_dtoh(dest, src)
    sys.stderr.write(
        "\t Copie les resultats du GPU (d_depth_results) vers le CPU (depth_results)\n"
    )

    cuda.memcpy_dtoh(gc_results, d_gc_results)  # cuda.memcpy_dtoh(dest, src)
    sys.stderr.write(
        "\t Copie les resultats du GPU (d_gc_results) vers le CPU (gc_results)\n"
    )

    cuda.memcpy_dtoh(map_results, d_map_results)  # cuda.memcpy_dtoh(dest, src)
    sys.stderr.write(
        "\t Copie les resultats du GPU (d_map_results) vers le CPU (map_results)\n"
    )

   # cuda.memcpy_dtoh(
    #    depth_correction_results, d_depth_correction_results
   # )  # cuda.memcpy_dtoh(dest, src)
    #sys.stderr.write(
     #   "\t Copie les resultats du GPU (d_depth_correction_results) vers le CPU (depth_correction_results)\n"
    #)

    ### NORMALISATION###

    #Appel fonction read_depth selon gc
    sys.stderr.write("\t appel fonctions filter_results\n")
    #gc_35, gc_40, gc_45, gc_50, gc_55 = filter_read_gc(gc_results, depth_results, chr)
    gc_35, gc_40, gc_45, gc_50, gc_55 = filter_read_gc_gpu(gc_results, d_gc_results, d_depth_results, grid_size, block_size, chr)

    #Appel fonction calcul polynomes
    sys.stderr.write("\t appel fonctions calc_polynom\n")
    polynomials = calc_polynom(gc_35, gc_40, gc_45, gc_50, gc_55, chr)
    
    #Appel fonction calcul moyenne
    sys.stderr.write("\t appel fonctions calc_mean_chr\n")
    mean_chr = calc_mean_chr(depth_results, chr)
    
    # Appeler le kernel de ratio
    sys.stderr.write("\t appel fonction ratio_par_window_kernel_cuda\n")
    ratio_par_window_kernel_cuda(
        d_depth_results,
        np.float32(mean_chr),
        np.int32(seq_length),
        np.int32(window_size),
        np.int32(step_size),
        d_ratio_par_window_results,
        block=(block_size, 1, 1),
        grid=(grid_size, 1),
    )


    context.synchronize()
    
    # Copier les resultats ratio depuis le peripherique CUDA vers l'hote
    sys.stderr.write("\tCopie des resultats ratio depuis le peripherique CUDA vers l'hote\n")
    cuda.memcpy_dtoh(ratio_par_window_results, d_ratio_par_window_results)
    
    #Appel fonction meilleur polynome
    sys.stderr.write("\t appel fonction find_polynom\n")
    best_polynom = find_polynom(ratio_par_window_results, chr, polynomials, depth_results)
    
    #Appel fonction normalisation
    sys.stderr.write("\t appel fonction normalize\n")
    normalize_depth_results = normalize(depth_results, best_polynom, map_results)
    
    # Appel fonctions medianes
    #sys.stderr.write("\t appel fonctions calcul medianes\n")
    #m = calcul_med_total(depth_correction_results, chr)
    #gc_to_median = calcul_med_same_gc(gc_results, depth_correction_results, chr)

    # Convertir gc_to_median en un tableau NumPy pour le transfert vers CUDA
    #sys.stderr.write("\t Conversion medianes en tableau numpy\n")
    #gc_to_median_array = np.zeros(int(max(gc_results)) + 1, dtype=np.float32)
    #for gc, median in gc_to_median.items():
     #   gc_to_median_array[int(gc)] = median

    # Allouer de la memoire pour gc_to_median sur le peripherique CUDA
 #   sys.stderr.write("\t Allocation mémoire médianes GPU\n")
  #  d_gc_to_median = cuda.mem_alloc(gc_to_median_array.nbytes)
   # cuda.memcpy_htod(d_gc_to_median, gc_to_median_array)

    # Appeler le kernel de normalisation
    #normalize_depth_kernel_cuda(
      #  d_depth_correction_results,
     #   d_gc_results,
     #   np.float32(m),
     #   d_gc_to_median,
     #   np.int32(seq_length),
     #   np.int32(window_size),
     #   np.int32(step_size),
     #   d_normalize_depth_results,
     #   block=(block_size, 1, 1),
     #   grid=(grid_size, 1),
    #)
    #sys.stderr.write("\t appel fonction normalize_depth_kernel_cuda\n")

    #context.synchronize()

    #Copier les resultats normalises depuis l'hôte vers le peripherique CUDA
    sys.stderr.write("\t Copie des resultats normalises depuis l'hôte vers le peripherique CUDA\n")
    d_normalize_depth_results = cuda.mem_alloc(normalize_depth_results.nbytes)
    cuda.memcpy_htod(d_normalize_depth_results, normalize_depth_results)

    ### Ratio par window###

    # Appel fonction moyenne
    sys.stderr.write("\t appel fonction calcul moyenne\n")
    mean_chr_norm = calcul_moy_totale(normalize_depth_results, chr)

    # Appeler le kernel de ratio normalise
    ratio_par_window_norm_kernel_cuda(
        d_normalize_depth_results,
        np.float32(mean_chr_norm),
        np.int32(seq_length),
        np.int32(window_size),
        np.int32(step_size),
        d_ratio_par_window_norm_results,
        block=(block_size, 1, 1),
        grid=(grid_size, 1),
    )
    sys.stderr.write("\t appel fonction ratio_par_window_norm_kernel_cuda\n")

    context.synchronize()

    # Copier les resultats ratio depuis le peripherique CUDA vers l'hote
    sys.stderr.write("\t Copie des resultats ratio depuis le peripherique CUDA vers l'hote\n")
    cuda.memcpy_dtoh(ratio_par_window_norm_results, d_ratio_par_window_norm_results)

    # Création table à partir du ratio
    sys.stderr.write("\t Appel fonction compute_mean_std_med\n")
    mean_ratio, std_ratio, med_ratio = compute_mean_std_med(ratio_par_window_norm_results, chr, normalize_depth_results)

    # Appeler le kernel de calcule du ratio divisé par le ratio moyen
    ratio_par_mean_ratio_kernel_cuda(
        d_ratio_par_window_norm_results,
        np.float32(mean_ratio),
        np.int32(seq_length),
        np.int32(window_size),
        np.int32(step_size),
        d_ratio_par_mean_ratio_results,
        block=(block_size, 1, 1),
        grid=(grid_size, 1),
    )
    sys.stderr.write("\t appel fonction ratio_par_mean_ratio_kernel_cuda\n")

    # Appeler le kernel de Z-score
    z_score_kernel_cuda(
        d_ratio_par_window_norm_results,
        np.float32(mean_ratio),
        np.float32(std_ratio),
        np.int32(seq_length),
        np.int32(window_size),
        np.int32(step_size),
        d_z_score_results,
        block=(block_size, 1, 1),
        grid=(grid_size, 1),
    )
    sys.stderr.write("\t appel fonction z_score_kernel_cuda\n")

    context.synchronize()

    # Copier les resultats ratio depuis le peripherique CUDA vers l'hote
    cuda.memcpy_dtoh(ratio_par_mean_ratio_results, d_ratio_par_mean_ratio_results)
    cuda.memcpy_dtoh(z_score_results, d_z_score_results)

    # Appel fonction create signal
    create_signal(signal, chr, z_score_results, step_size)

    # Appel fonction detect events
    detect_events(
        z_score_results,
        zscore_threshold,
        events,
        med_ratio,
        ratio_par_mean_ratio_results,
        chr,
    )

    # Appel fonction segmentation
    segmentation(events, segment, chr)

    # Appel fonction display_results_vcf
    display_results_vcf(sample, segment, signal, lengthFilter, output_file, chr)

    # #Ecrire les résultats dans le fichier de sortie
    # with open(output_file, 'a') as f:
    #     sys.stderr.write("\t ecriture des fichiers\n")
    #     for i, (depth_data, depth_normalize_val, gc_data, map_data, ratio_norm, ratio_mean_ratio, z_score) in enumerate(zip(depth_results, normalize_depth_results, gc_results, map_results, ratio_par_window_norm_results, ratio_par_mean_ratio_results, z_score_results)):
    #         pos_start = (i * step_size) + 1
    #         pos_end = pos_start + window_size
    #         f.write(f"{chr}\t{pos_start}\t{pos_end}\t{depth_data}\t{depth_normalize_val}\t{gc_data}\t{map_data}\t{ratio_norm}\t{ratio_mean_ratio}\t{z_score}\n")

# Programme principal
# Calcul nombre de coeurs max pour le GPU
header_written = False

sample = get_sample_name(bamfile_path)
device = cuda.Device(0)
attributes = device.get_attributes()
num_cores = attributes[1]
print("Nombre de CPU: ", multiprocessing.cpu_count())
print(f"Nombre de coeurs max GPU: {num_cores}")

gc_file = "/work/gad/shared/pipeline/grch38/index/grch38_essential.fa"
mappability_file = "/work/gad/shared/pipeline/grch38/cnv/k100.Umap.MultiTrackMappability.bedgraph"
seq = parse_fasta(gc_file)
mappability = dico_mappabilite(mappability_file)
#bamfile_path_2 = "/work/gad/shared/analyse/test/cnvGPU/test_scalability/dijen1000.bam"


if num_chr == "ALL" :
    with pysam.AlignmentFile(bamfile_path, "rb") as bamfile_handle:
        for i, seq_length in enumerate(bamfile_handle.lengths):
            chr = bamfile_handle.references[i]
            
            #Exclude chrM
            if chr == "chrM":
                continue
                
            sys.stderr.write("Chromosome : %s, seq length : %s\n" % (chr, seq_length))

            # Appeler la fonction de calcul de la profondeur moyenne pour ce chromosome
            main_calcul(
                bamfile_path,
                bamfile_handle,
                chr,
                seq_length,
                window_size,
                step_size,
                zscore_threshold,
                lengthFilter,
                output_file,
                output_file_pairs,
                output_file_splits,
                sample,
            )

        logging.basicConfig(
            filename="%s" % (logfile),
            filemode="a",
            level=logging.INFO,
            format="%(asctime)s %(levelname)s - %(message)s",
        )
        logging.info("end")

else :
    with pysam.AlignmentFile(bamfile_path, "rb") as bamfile_handle:
        seq_length = bamfile_handle.lengths[int(num_chr) - 1]
        chr = bamfile_handle.references[int(num_chr) - 1]
        sys.stderr.write("Chromosome : %s, seq length : %s\n" % (chr, seq_length))

        # Appeler la fonction de calcul de la profondeur moyenne pour ce chromosome
        main_calcul(
            bamfile_path,
            bamfile_handle,
            chr,
            seq_length,
            window_size,
            step_size,
            zscore_threshold,
            lengthFilter,
            output_file,
            output_file_pairs,
            output_file_splits,
            sample,
        )

    logging.basicConfig(
        filename="%s" % (logfile),
        filemode="a",
        level=logging.INFO,
        format="%(asctime)s %(levelname)s - %(message)s",
    )
    logging.info("end")