Commit 91be97c9 authored by Theo Serralta's avatar Theo Serralta

Formating code

parent 59eeb957
......@@ -14,10 +14,49 @@ import time
# Options
def parse_arguments():
"""
Parse command-line arguments.
This function parses the command-line arguments provided to the script and extracts the values for various parameters.
Parameters
----------
None
Returns
-------
tuple
A tuple containing the following elements:
bamfile_path : str or None
The path to the BAM file.
window_size : int or None
The size of the window.
step_size : int or None
The step size for the analysis.
zscore_threshold : float or None
The threshold for the Z-score.
lengthFilter : int or None
The filter for length.
output_file : str or None
The path to the output file.
logfile : str or None
The path to the log file.
Each element can be None if the corresponding argument was not provided.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'b:w:s:z:l:t:o:e:')
bamfile_path, window_size, step_size, zscore_threshold, lengthFilter, output_file, logfile = None, None, None, None, None, None, None
opts, args = getopt.getopt(sys.argv[1:], "b:w:s:z:l:t:o:e:")
(
bamfile_path,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
logfile,
) = (None, None, None, None, None, None, None)
for opt, arg in opts:
if opt in ("-b"):
bamfile_path = arg
......@@ -33,19 +72,42 @@ def parse_arguments():
output_file = arg
if opt in ("-e"):
logfile = arg
return bamfile_path, window_size, step_size, zscore_threshold, lengthFilter, output_file, logfile
return (
bamfile_path,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
logfile,
)
except getopt.GetoptError:
print('Invalid argument')
print("Invalid argument")
sys.exit(1)
if __name__ == '__main__':
bamfile_path, window_size, step_size, zscore_threshold, lengthFilter, output_file, logfile = parse_arguments()
logging.basicConfig(filename='%s' % (logfile), filemode='a', level=logging.INFO, format='%(asctime)s %(levelname)s - %(message)s')
logging.info('start')
if __name__ == "__main__":
(
bamfile_path,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
logfile,
) = parse_arguments()
logging.basicConfig(
filename="%s" % (logfile),
filemode="a",
level=logging.INFO,
format="%(asctime)s %(levelname)s - %(message)s",
)
logging.info("start")
global seq
# Code CUDA
mod = SourceModule("""
mod = SourceModule(
"""
//Kernel pour calculer la profondeur moyenne brute
__global__ void calcul_depth_kernel(int *depth_data, int seq_length, int window_size, int step_size, float *depth_results) {
......@@ -178,7 +240,8 @@ __global__ void ratio_par_mean_ratio_kernel(float *ratio, float mean_ratio, int
}
}
""")
"""
)
# Obtention de la fonction de kernel compilée
calcul_depth_kernel_cuda = mod.get_function("calcul_depth_kernel")
......@@ -190,11 +253,24 @@ ratio_par_window_kernel_cuda = mod.get_function("ratio_par_window_kernel")
z_score_kernel_cuda = mod.get_function("z_score_kernel")
ratio_par_mean_ratio_kernel_cuda = mod.get_function("ratio_par_mean_ratio_kernel")
#############################################
######<---Fonctions mappability--->#########
#############################################
def merge_intervals(intervals):
#sys.stderr.write("\t Entering merge_intervals\n")
"""
Merge overlapping intervals with the same score.
This function takes a list of intervals and merges those that have the same score.
Parameters
----------
intervals : list of tuples
A list where each element is a tuple (start, end, score).
The intervals should be sorted by start position.
Returns
-------
list of tuples
A list of merged intervals (start, end, score) where overlapping intervals with the same score are combined.
"""
merged = []
start, end, score = intervals[0]
for interval in intervals[1:]:
......@@ -206,13 +282,31 @@ def merge_intervals(intervals):
merged.append((start, end, score))
return merged
def dico_mappabilite(mappability_file):
"""
Create a dictionary of mappability scores from a file.
This function reads a mappability file and creates a dictionary with chromosomes as keys and mappability scores as values.
Parameters
----------
mappability_file : str
The path to the mappability file. Each line in the file should have the format:
chromosome, start_pos, end_pos, score.
Returns
-------
dict
A dictionary where keys are chromosome names and values are another dictionary with start positions as keys
and mappability scores as values.
"""
sys.stderr.write("\t Entering dico_mappabilite\n")
mappability_dico = {}
with open(mappability_file, 'r') as f:
with open(mappability_file, "r") as f:
for line in f:
fields = line.strip().split('\t')
fields = line.strip().split("\t")
if len(fields) < 4:
continue
......@@ -226,24 +320,46 @@ def dico_mappabilite(mappability_file):
mappability_dico[chromosome].append((start_pos, end_pos, score))
# Ajout de la position 0 pour chaque chromosome
# Add position 0 for each chromosome
for chromosome, intervals in mappability_dico.items():
if intervals[0][0] != 0:
mappability_dico[chromosome].insert(0, (0, intervals[0][0], 0))
# Fusion des intervalles ayant le même score
# Merge intervals with the same score
for chromosome, intervals in mappability_dico.items():
merged_intervals = merge_intervals(intervals)
mappability_dico[chromosome] = {start: score for start, _, score in merged_intervals}
mappability_dico[chromosome] = {
start: score for start, _, score in merged_intervals
}
sys.stderr.write("\t Leaving dico_mappabilite\n")
return mappability_dico #Dictionnaire avec les bornes de mappabilité en fonction des positions pour chaque chromosome.
return mappability_dico
def calcul_mappability(seq_length, mappability, chr):
"""
Calculate mappability array for a given sequence length and chromosome.
This function generates an array of mappability scores for a specific chromosome and sequence length.
Parameters
----------
seq_length : int
The length of the sequence.
mappability : dict
A dictionary containing mappability information, with chromosomes as keys and dictionaries of start positions
and scores as values.
chr : str
The chromosome for which the mappability is calculated.
Returns
-------
numpy.ndarray
An array of mappability scores for the given sequence length.
"""
sys.stderr.write("\t Entering calcul_mappability \n")
map_data = np.zeros(seq_length, dtype=np.float32)
sorted_keys = sorted(mappability[chr].keys())
#sys.stderr.write("\t sorted_keys =\n")
prev_bound = 0
prev_mappability = 0
......@@ -261,18 +377,31 @@ def calcul_mappability(seq_length, mappability, chr):
sys.stderr.write("\t Leaving calcul_mappability \n")
return map_data
#############################################
######<---Fonctions calcul gc--->############
#############################################
def parse_fasta(gc_file):
"""
Parse a FASTA file and extract sequences.
This function reads a FASTA file and extracts the sequences, storing them in a dictionary with headers as keys.
Parameters
----------
gc_file : str
The path to the FASTA file. The file should be in standard FASTA format.
Returns
-------
dict
A dictionary where keys are sequence headers and values are the corresponding sequences.
"""
sys.stderr.write("\t Entering parse_fasta\n")
sequences = {}
with open(gc_file, 'r') as f:
data = f.read().split('>')
with open(gc_file, "r") as f:
data = f.read().split(">")
for entry in data[1:]:
lines = entry.split('\n')
lines = entry.split("\n")
header = lines[0]
sequence = ''.join(lines[1:])
sequence = "".join(lines[1:])
sequences[header] = sequence
sys.stderr.write("\t Leaving parse_fasta\n")
......@@ -280,103 +409,213 @@ def parse_fasta(gc_file):
def calcul_gc_content(seq_length, chr, seq):
"""
Calculate the GC content of a given sequence.
This function generates an array representing the GC content for a specific chromosome and sequence length.
Parameters
----------
seq_length : int
The length of the sequence.
chr : str
The chromosome for which the GC content is calculated.
seq : dict
A dictionary containing sequences, with chromosome names as keys and sequences as values.
Returns
-------
numpy.ndarray
An array of bytes ('S' dtype) representing the GC content for the given sequence length.
"""
sys.stderr.write("\t Entering calcul_gc_content\n")
gc_data = np.zeros(seq_length, dtype="S")
for i in range(len(seq[chr])):
gc_data[i] = seq[chr][i]
#print(gc_data[9950:10200])
sys.stderr.write("\t Leaving calcul_gc_content\n")
return gc_data
##############################################
######<---Fonctions calcul Depth Seq--->######
##############################################
def calcul_depth_seq(seq_length, bamfile_path, chr):
"""
Calculate the sequencing depth for a given chromosome.
This function computes the sequencing depth for a specific chromosome and sequence length using a BAM file.
Parameters
----------
seq_length : int
The length of the sequence.
bamfile_path : pysam.AlignmentFile
The path to the BAM file opened with pysam.AlignmentFile.
chr : str
The chromosome for which the depth is calculated.
Returns
-------
numpy.ndarray
An array of integers representing the sequencing depth for the given sequence length.
"""
sys.stderr.write("\t Entering calcul_depth_seq\n")
depth_data = np.zeros(seq_length, dtype=np.int32)
for pileupcolumn in bamfile_path.pileup():
#sys.stderr.write("%s : %s \n" % (pileupcolumn.reference_pos, pileupcolumn.nsegments))
if pileupcolumn.reference_pos > seq_length:
if pileupcolumn.reference_pos >= seq_length:
break
depth_data[pileupcolumn.reference_pos] = pileupcolumn.nsegments
sys.stderr.write("\t Leaving calcul_depth_seq\n")
return depth_data
#############################################
######<---Fonctions calcul medianes--->######
#############################################
def calcul_med_total(depth_correction_results):
"""
Calculate the median of non-zero depth correction results.
This function filters out zero values from the depth correction results and computes the median of the remaining values.
Parameters
----------
depth_correction_results : list or numpy.ndarray
A list or array of depth correction values.
Returns
-------
float
The median of the non-zero depth correction values, or 0 if no non-zero values are present.
"""
sys.stderr.write("\t entering calcul_med_total\n")
depth_correction_results = np.array(depth_correction_results)
# Filtrer les résultats pour enlever les valeurs égales à 0
# Filter results to remove zero values
non_zero_results = depth_correction_results[depth_correction_results != 0]
# Calculer la médiane des résultats non nuls
# Calculate the median of non-zero results
m = np.median(non_zero_results) if non_zero_results.size > 0 else 0
sys.stderr.write("\t Leaving calcul_med_total\n")
return m
def calcul_med_same_gc(gc_results, depth_correction_results):
"""
Calculate the median depth correction for each unique GC content value.
This function computes the median depth correction values for each unique GC content value, filtering out zero values.
Parameters
----------
gc_results : list or numpy.ndarray
A list or array of GC content values.
depth_correction_results : list or numpy.ndarray
A list or array of depth correction values.
Returns
-------
dict
A dictionary where keys are unique GC content values and values are the median depth correction for those GC values.
"""
sys.stderr.write("\t entering calcul_med_same_gc\n")
mGC = []
depth_correction_results_array = np.array(depth_correction_results)
unique_gc_values = np.unique(gc_results)
for gc in unique_gc_values:
indices = np.where(gc_results == gc) # Donne les positions où se trouve chaque valeur de unique_gc_values dans le tableau gc_results
# Filtrer les résultats de depth_correction pour enlever les valeurs égales à 0
filtered_depths = depth_correction_results_array[indices][depth_correction_results_array[indices] != 0]
if filtered_depths.size > 0: # Calculer la médiane seulement si les résultats filtrés ne sont pas vides
indices = np.where(
gc_results == gc
) # Get positions of each unique GC value in gc_results
# Filter depth correction results to remove zero values
filtered_depths = depth_correction_results_array[indices][
depth_correction_results_array[indices] != 0
]
if (
filtered_depths.size > 0
): # Calculate median only if filtered results are not empty
median_gc = np.median(filtered_depths)
else:
median_gc = 0 # Ou une autre valeur par défaut si tous les résultats sont 0
median_gc = 0 # Or another default value if all results are 0
#print(f'valeur {gc} position {indices}, filtered_depths = {filtered_depths} et median_gc = {median_gc}')
mGC.append((gc, median_gc))
gc_to_median = dict(mGC)
sys.stderr.write("\t Leaving calcul_med_same_gc\n")
#print(gc_to_median)
return gc_to_median
###########################################
######<---Fonction calcul moyenne--->######
###########################################
def calcul_moy_totale(normalize_depth_results):
"""
Calculate the mean of non-zero normalized depth results.
This function filters out zero values from the normalized depth results and computes the mean of the remaining values.
Parameters
----------
normalize_depth_results : list or numpy.ndarray
A list or array of normalized depth values.
Returns
-------
float
The mean of the non-zero normalized depth values, or 0 if no non-zero values are present.
"""
sys.stderr.write("\t entering calcul_moy_totale\n")
normalize_depth_results = np.array(normalize_depth_results)
# Filtrer les résultats pour enlever les valeurs égales à 0
# Filter results to remove zero values
non_zero_results = normalize_depth_results[normalize_depth_results != 0]
# Calculer la mpyenne des résultats non nuls
# Calculate the mean of non-zero results
mean_chr = np.mean(non_zero_results) if non_zero_results.size > 0 else 0
print(mean_chr)
sys.stderr.write("\t Leaving calcul_moy_totale\n")
return mean_chr
def calcul_moy_totale_ratio(ratio_par_window_results):
"""
Calculate the mean of non-zero ratio results per window.
This function filters out zero values from the ratio results per window and computes the mean of the remaining values.
Parameters
----------
ratio_par_window_results : list or numpy.ndarray
A list or array of ratio values per window.
Returns
-------
float
The mean of the non-zero ratio values per window, or 0 if no non-zero values are present.
"""
sys.stderr.write("\t entering calcul_moy_totale_ratio\n")
ratio_par_window_results = np.array(ratio_par_window_results)
# Filtrer les résultats pour enlever les valeurs égales à 0
# Filter results to remove zero values
non_zero_results = ratio_par_window_results[ratio_par_window_results != 0]
# Calculer la mpyenne des résultats non nuls
# Calculate the mean of non-zero results
mean_ratio = np.mean(non_zero_results) if non_zero_results.size > 0 else 0
print(mean_ratio)
sys.stderr.write("\t Leaving calcul_moy_totale_ratio\n")
return mean_ratio
#############################################
######<---Fonction calcul std--->############
#############################################
def calcul_std(normalize_depth_results):
"""
Calculate the standard deviation of non-zero normalized depth results.
This function filters out zero values from the normalized depth results and computes the standard deviation of the remaining values.
Parameters
----------
normalize_depth_results : list or numpy.ndarray
A list or array of normalized depth values.
Returns
-------
float
The standard deviation of the non-zero normalized depth values, or 0 if no non-zero values are present.
"""
sys.stderr.write("\t entering calcul_std\n")
normalize_depth_results = np.array(normalize_depth_results)
# Filtrer les résultats pour enlever les valeurs égales à 0
# Filter results to remove zero values
non_zero_results = normalize_depth_results[normalize_depth_results != 0]
# Calculer le std des résultats non nuls
# Calculate the standard deviation of non-zero results
std_chr = np.std(non_zero_results) if non_zero_results.size > 0 else 0
print(std_chr)
sys.stderr.write("\t Leaving calcul_std\n")
......@@ -384,27 +623,55 @@ def calcul_std(normalize_depth_results):
def calcul_std_ratio(ratio_par_window_results):
"""
Calculate the standard deviation of non-zero ratio results per window.
This function filters out zero values from the ratio results per window and computes the standard deviation of the remaining values.
Parameters
----------
ratio_par_window_results : list or numpy.ndarray
A list or array of ratio values per window.
Returns
-------
float
The standard deviation of the non-zero ratio values per window, or 0 if no non-zero values are present.
"""
sys.stderr.write("\t entering calcul_std_ratio\n")
ratio_par_window_results = np.array(ratio_par_window_results)
# Filtrer les résultats pour enlever les valeurs égales à 0
# Filter results to remove zero values
non_zero_results = ratio_par_window_results[ratio_par_window_results != 0]
# Calculer le std des résultats non nuls
# Calculate the standard deviation of non-zero results
std_ratio = np.std(non_zero_results) if non_zero_results.size > 0 else 0
print(std_ratio)
sys.stderr.write("\t Leaving calcul_std_ratio\n")
return std_ratio
##############################################
######<---Fonction statistiques ratio--->#####
##############################################
def compute_mean_and_std(ratio_par_window_results):
"""
Compute the mean, standard deviation, and median of non-zero ratio results per window.
This function filters out zero and -1 values from the ratio results per window and computes the mean, standard deviation, and median of the remaining values. Values greater than or equal to 5 are capped at 5.
Parameters
----------
ratio_par_window_results : list or numpy.ndarray
A list or array of ratio values per window.
Returns
-------
tuple
A tuple containing the mean, standard deviation, and median of the filtered ratio values.
"""
sys.stderr.write("Computing stats : \n")
# Filtrer les résultats pour enlever les valeurs égales à 0
# Filter results to remove zero and -1 values
ratio_par_window_results = np.array(ratio_par_window_results)
non_zero_results = ratio_par_window_results[ratio_par_window_results != 0]
# Initialiser les listes pour le calcul des stats
# Initialize list for stats computation
table = []
for value in non_zero_results:
......@@ -413,27 +680,40 @@ def compute_mean_and_std(ratio_par_window_results):
elif float(value) != -1:
table.append(float(value))
# Calculer la moyenne, l'écart-type et la médiane des valeurs filtrées
# Calculate the mean, standard deviation, and median of the filtered values
mean_ratio = np.mean(table) if table else 0
std_ratio = np.std(table) if table else 0
med_ratio = np.median(table) if table else 0
# Créer le dictionnaire de stats
#stats = {'mean': mean_ratio, 'std': std_ratio, 'med': med_ratio}
# Afficher les résultats
#print(stats)
# Display results
print(mean_ratio, std_ratio, med_ratio)
sys.stderr.write("Computation done\n")
# Retourner les résultats
# Return results
return mean_ratio, std_ratio, med_ratio
##############################################
######<---Fonction copy number level--->######
##############################################
def cn_level(x):
if x < 1 :
"""
Determine the copy number level based on the given value.
This function returns the copy number level based on the input value `x`.
Parameters
----------
x : float
The input value used to determine the copy number level.
Returns
-------
int
The copy number level:
- 0 if x < 0.1
- 1 if 0.1 <= x <= 0.75
- 2 if 0.75 < x < 1 or round(x) == 1
- round(x) if round(x) > 1
"""
if x < 1:
if x <= 0.75:
if x >= 0.1:
return 1
......@@ -447,30 +727,92 @@ def cn_level(x):
if round(x) > 1:
return round(x)
##############################################
######<---Fonction get sample--->#############
##############################################
def get_sample_name(bamfile_path):
"""
Extract the sample name from a BAM file.
This function reads the header of a BAM file to extract the sample name from the read groups.
Parameters
----------
bamfile_path : str
The path to the BAM file.
Returns
-------
str
The sample name extracted from the BAM file. If no sample name is found, returns "UnknownSample".
"""
with pysam.AlignmentFile(bamfile_path, "rb") as bamfile:
for read_group in bamfile.header.get('RG', []):
if 'SM' in read_group:
return read_group['SM']
for read_group in bamfile.header.get("RG", []):
if "SM" in read_group:
return read_group["SM"]
return "UnknownSample"
##############################################
######<---Fonction create signal--->##########
##############################################
def create_signal(signal, chr, z_score_results, step_size):
"""
Create a signal dictionary for a specific chromosome based on z-score results.
This function populates a signal dictionary with positions and corresponding z-score results for a given chromosome.
Parameters
----------
signal : dict
A dictionary to store the signal data.
chr : str
The chromosome for which the signal is created.
z_score_results : list or numpy.ndarray
A list or array of z-score results.
step_size : int
The step size used to calculate the positions.
Returns
-------
None
The function modifies the signal dictionary in place.
"""
if chr not in signal:
signal[chr] = {}
for i in range(len(z_score_results)):
pos = (i * step_size) + 1
signal[chr][pos] = z_score_results[i]
##############################################
######<---Fonction detect_events--->##########
##############################################
def detect_events(z_score_results, zscore_threshold, events, med_ratio, ratio_par_mean_ratio_results, chr):
def detect_events(
z_score_results,
zscore_threshold,
events,
med_ratio,
ratio_par_mean_ratio_results,
chr,
):
"""
Detect genomic events based on z-score results and a z-score threshold.
This function identifies significant genomic events where z-scores exceed the given threshold. Events are recorded in the `events` dictionary for the specified chromosome.
Parameters
----------
z_score_results : list or numpy.ndarray
A list or array of z-score values.
zscore_threshold : float
The threshold for detecting significant z-score events.
events : dict
A dictionary to store detected events.
med_ratio : float
The median ratio used for copy number level calculations.
ratio_par_mean_ratio_results : list or numpy.ndarray
A list or array of ratio values compared to the mean ratio.
chr : str
The chromosome for which events are detected.
Returns
-------
None
The function modifies the events dictionary in place.
"""
sys.stderr.write("\t starting detect_events\n")
c = 0
for i, z_score in enumerate(z_score_results):
......@@ -492,10 +834,25 @@ def detect_events(z_score_results, zscore_threshold, events, med_ratio, ratio_pa
events[chr][(pos_start, pos_end)] = c
sys.stderr.write("\t ending detect_events\n")
##############################################
######<---Fonction segmentation--->###########
##############################################
def segmentation(events, segment):
"""
Segment the detected events into contiguous regions with the same copy number level.
This function processes the detected events and groups contiguous regions with the same copy number level into segments.
Parameters
----------
events : dict
A dictionary of detected events for each chromosome.
segment : dict
A dictionary to store the segmented regions.
Returns
-------
None
The function modifies the segment dictionary in place.
"""
sys.stderr.write("starting segmentation : \n")
for k in events.keys():
sys.stderr.write("\tfor chromosome %s\n" % k)
......@@ -505,7 +862,7 @@ def segmentation(events, segment):
for p in sorted(events[k].keys()):
level = events[k][p]
# new coordinates
if (p[0] > (oldPos + window_size)):
if p[0] > (oldPos + window_size):
if (starts != 0) and (starts != p[0]):
if k not in segment:
segment[k] = {}
......@@ -521,7 +878,7 @@ def segmentation(events, segment):
else:
starts = p[0]
# case where it's contiguous but different level
if (level != oldLevel):
if level != oldLevel:
if oldLevel != -1:
if k not in segment:
segment[k] = {}
......@@ -540,25 +897,54 @@ def segmentation(events, segment):
oldLevel = level
sys.stderr.write("segmentation done\n")
##############################################
######<---Fonction display results--->########
##############################################
def display_results_vcf(sample, segment, signal, lengthFilter, output_file):
"""
Generate a VCF file containing structural variant calls based on segmented regions and signal data.
This function creates a VCF (Variant Call Format) file containing structural variant calls derived from segmented regions and signal data. The structural variant type (DEL for deletion or DUP for duplication) is determined based on copy number levels and signal values. The resulting VCF file includes information about the chromosome, position, type of structural variant, copy number, and other relevant information.
Parameters
----------
sample : str
The sample name to be included in the VCF file header.
segment : dict
A dictionary containing segmented regions with copy number information.
signal : dict
A dictionary containing signal data for each chromosome.
lengthFilter : int
The minimum length threshold for including variants in the VCF file.
output_file : str
The path to the output VCF file.
Returns
-------
None
This function writes the structural variant calls to the specified output file in VCF format.
"""
global header_written
sys.stderr.write("starting display results\n")
with open(output_file, 'a') as f:
with open(output_file, "a") as f:
if not header_written:
f.write("##fileformat=VCFv4.2\n")
f.write("##source=cnvcaller\n")
f.write("##INFO=<ID=SVTYPE,Number=1,Type=String,Description=\"Type of structural variant\">\n")
f.write("##INFO=<ID=SVLEN,Number=.,Type=Integer,Description=\"Difference in length between REF and ALT alleles\">\n")
f.write("##INFO=<ID=END,Number=1,Type=Integer,Description=\"End position of the variant described in this record\">\n")
f.write("##INFO=<ID=CN,Number=1,Type=Integer,Description=\"Copy number\">\n")
f.write("##ALT=<ID=DEL,Description=\"Deletion\">\n")
f.write("##ALT=<ID=DUP,Description=\"Duplication\">\n")
f.write("##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n")
f.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % (sample))
f.write(
'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n'
)
f.write(
'##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">\n'
)
f.write(
'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n'
)
f.write('##INFO=<ID=CN,Number=1,Type=Integer,Description="Copy number">\n')
f.write('##ALT=<ID=DEL,Description="Deletion">\n')
f.write('##ALT=<ID=DUP,Description="Duplication">\n')
f.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n')
f.write(
"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % (sample)
)
header_written = True
for k in segment.keys():
......@@ -568,15 +954,73 @@ def display_results_vcf(sample, segment, signal, lengthFilter, output_file):
if (segment[k][elt]["end"] - segment[k][elt]["start"]) < lengthFilter:
continue
if int(signal[k][segment[k][elt]["start"]]) < 0:
f.write("%s\t%s\t.\tN\t<DEL>\t.\t.\tSVTYPE=DEL;END=%s;VALUE=%s\tGT:GQ\t./.:0\n" % (k, segment[k][elt]["start"], segment[k][elt]["end"], int(segment[k][elt]["cn"])))
f.write(
"%s\t%s\t.\tN\t<DEL>\t.\t.\tSVTYPE=DEL;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
% (
k,
segment[k][elt]["start"],
segment[k][elt]["end"],
int(segment[k][elt]["cn"]),
)
)
else:
f.write("%s\t%s\t.\tN\t<DUP>\t.\t.\tSVTYPE=DUP;END=%s;VALUE=%s\tGT:GQ\t./.:0\n" % (k, segment[k][elt]["start"], segment[k][elt]["end"], int(segment[k][elt]["cn"])))
f.write(
"%s\t%s\t.\tN\t<DUP>\t.\t.\tSVTYPE=DUP;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
% (
k,
segment[k][elt]["start"],
segment[k][elt]["end"],
int(segment[k][elt]["cn"]),
)
)
#################################
######<---Fonction main--->######
###### <---Fonction main--->######
#################################
def main_calcul(bamfile_path, chr, seq_length, window_size, step_size, zscore_threshold, lengthFilter, output_file, sample):
def main_calcul(
bamfile_path,
chr,
seq_length,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
sample,
):
"""
Perform structural variant detection and VCF file generation.
This function orchestrates a series of computations and data manipulations,
leveraging GPU acceleration for performance improvements in genomic data analysis.
Parameters
----------
bamfile_path : str
Path to the BAM file containing aligned reads.
chr : str
Chromosome identifier for which analysis is performed.
seq_length : int
Length of the chromosome sequence.
window_size : int
Size of the sliding window used for analysis.
step_size : int
Size of the step when moving the window along the chromosome.
zscore_threshold : float
Threshold value for detecting significant events based on Z-scores.
lengthFilter : int
Minimum length threshold for including variants in the VCF file.
output_file : str
Path to the output VCF file.
sample : str
Name of the sample being analyzed.
Returns
-------
None
"""
sys.stderr.write("\t entering main_calcul\n")
global seq
events = {}
......@@ -591,112 +1035,218 @@ def main_calcul(bamfile_path, chr, seq_length, window_size, step_size, zscore_th
# Transférer le tableau NumPy vers CUDA
d_depth_data = cuda.mem_alloc(depth_data.nbytes)
cuda.memcpy_htod(d_depth_data, depth_data)
sys.stderr.write("\t d_depth_data : %s, %s\n" % (d_depth_data, d_depth_data.as_buffer(sys.getsizeof(depth_data))))
sys.stderr.write(
"\t d_depth_data : %s, %s\n"
% (d_depth_data, d_depth_data.as_buffer(sys.getsizeof(depth_data)))
)
d_gc_data = cuda.mem_alloc(gc_data.nbytes)
cuda.memcpy_htod(d_gc_data, gc_data)
sys.stderr.write("\t d_gc_data : %s, %s\n" % (d_gc_data, d_gc_data.as_buffer(sys.getsizeof(gc_data))))
sys.stderr.write(
"\t d_gc_data : %s, %s\n"
% (d_gc_data, d_gc_data.as_buffer(sys.getsizeof(gc_data)))
)
d_map_data = cuda.mem_alloc(map_data.nbytes)
cuda.memcpy_htod(d_map_data, map_data)
sys.stderr.write("\t d_map_data : %s, %s\n" % (d_map_data, d_map_data.as_buffer(sys.getsizeof(map_data))))
sys.stderr.write(
"\t d_map_data : %s, %s\n"
% (d_map_data, d_map_data.as_buffer(sys.getsizeof(map_data)))
)
# Calculer la taille de la grille et de bloc pour CUDA
block_size = num_cores
sys.stderr.write("\t blocksize (nb de threads) = %s\n" % (num_cores))
grid_size = int((int((seq_length - window_size) / step_size) + 1) / block_size)+1
grid_size = int((int((seq_length - window_size) / step_size) + 1) / block_size) + 1
sys.stderr.write("\t grid_size = \n")
# Initialiser les tableaux pour stocker les résultats
depth_results = np.zeros(int((seq_length - window_size) / step_size) + 1, dtype=np.float32)
depth_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de depth_results\n")
gc_results = np.zeros(int((seq_length - window_size) / step_size) + 1, dtype=np.float32)
gc_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de gc_results\n")
map_results = np.zeros(int((seq_length - window_size) / step_size) + 1, dtype=np.float32)
map_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de map_results\n")
depth_correction_results = np.zeros(int((seq_length - window_size) / step_size) + 1, dtype=np.float32)
depth_correction_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de depth_correction_results\n")
normalize_depth_results = np.zeros(int((seq_length - window_size) / step_size) + 1, dtype=np.float32)
normalize_depth_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de normalize_depth_results\n")
ratio_par_window_results = np.zeros(int((seq_length - window_size) / step_size) + 1, dtype=np.float32)
ratio_par_window_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de ratio_par_window\n")
z_score_results = np.zeros(int((seq_length - window_size) / step_size) + 1, dtype=np.float32)
z_score_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de z_score_results\n")
ratio_par_mean_ratio_results = np.zeros(int((seq_length - window_size) / step_size) + 1, dtype=np.float32)
ratio_par_mean_ratio_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de ratio_par_mean_ratio_results\n")
# Allouer de la mémoire pour les résultats sur le périphérique CUDA
d_depth_results = cuda.mem_alloc(depth_results.nbytes)
sys.stderr.write("\t d_depth_results = %s\n" % d_depth_results.as_buffer(sys.getsizeof(d_depth_results)))
sys.stderr.write(
"\t d_depth_results = %s\n"
% d_depth_results.as_buffer(sys.getsizeof(d_depth_results))
)
sys.stderr.write("\t depth_results.nbytes = %s\n" % depth_results.nbytes)
d_gc_results = cuda.mem_alloc(gc_results.nbytes)
sys.stderr.write("\t d_gc_results = %s\n" % d_gc_results.as_buffer(sys.getsizeof(d_gc_results)))
sys.stderr.write(
"\t d_gc_results = %s\n" % d_gc_results.as_buffer(sys.getsizeof(d_gc_results))
)
sys.stderr.write("\t gc_results.nbytes = %s\n" % gc_results.nbytes)
d_map_results = cuda.mem_alloc(map_results.nbytes)
sys.stderr.write("\t d_map_results = %s\n" % d_map_results.as_buffer(sys.getsizeof(d_map_results)))
sys.stderr.write(
"\t d_map_results = %s\n"
% d_map_results.as_buffer(sys.getsizeof(d_map_results))
)
sys.stderr.write("\t map_results.nbytes = %s\n" % map_results.nbytes)
d_depth_correction_results = cuda.mem_alloc(depth_correction_results.nbytes)
sys.stderr.write("\t d_depth_correction_results = %s\n" % d_depth_correction_results.as_buffer(sys.getsizeof(d_depth_correction_results)))
sys.stderr.write("\t depth_correction_results.nbytes = %s\n" % depth_correction_results.nbytes)
sys.stderr.write(
"\t d_depth_correction_results = %s\n"
% d_depth_correction_results.as_buffer(
sys.getsizeof(d_depth_correction_results)
)
)
sys.stderr.write(
"\t depth_correction_results.nbytes = %s\n" % depth_correction_results.nbytes
)
d_normalize_depth_results = cuda.mem_alloc(normalize_depth_results.nbytes)
sys.stderr.write("\t d_normalize_depth_results = %s\n" % d_normalize_depth_results.as_buffer(sys.getsizeof(d_normalize_depth_results)))
sys.stderr.write("\t normalize_depth_results.nbytes = %s\n" % normalize_depth_results.nbytes)
sys.stderr.write(
"\t d_normalize_depth_results = %s\n"
% d_normalize_depth_results.as_buffer(sys.getsizeof(d_normalize_depth_results))
)
sys.stderr.write(
"\t normalize_depth_results.nbytes = %s\n" % normalize_depth_results.nbytes
)
d_ratio_par_window_results = cuda.mem_alloc(ratio_par_window_results.nbytes)
sys.stderr.write("\t d_ratio_par_window_results = %s\n" % d_ratio_par_window_results.as_buffer(sys.getsizeof(d_ratio_par_window_results)))
sys.stderr.write("\t ratio_par_window_results.nbytes = %s\n" % ratio_par_window_results.nbytes)
sys.stderr.write(
"\t d_ratio_par_window_results = %s\n"
% d_ratio_par_window_results.as_buffer(
sys.getsizeof(d_ratio_par_window_results)
)
)
sys.stderr.write(
"\t ratio_par_window_results.nbytes = %s\n" % ratio_par_window_results.nbytes
)
d_z_score_results = cuda.mem_alloc(z_score_results.nbytes)
sys.stderr.write("\t d_z_score_results = %s\n" % d_z_score_results.as_buffer(sys.getsizeof(d_z_score_results)))
sys.stderr.write(
"\t d_z_score_results = %s\n"
% d_z_score_results.as_buffer(sys.getsizeof(d_z_score_results))
)
sys.stderr.write("\t z_score_results.nbytes = %s\n" % z_score_results.nbytes)
d_ratio_par_mean_ratio_results = cuda.mem_alloc(ratio_par_mean_ratio_results.nbytes)
sys.stderr.write("\t d_ratio_par_mean_ratio_results = %s\n" % d_ratio_par_mean_ratio_results.as_buffer(sys.getsizeof(d_ratio_par_mean_ratio_results)))
sys.stderr.write("\t ratio_par_mean_ratio_results.nbytes = %s\n" % ratio_par_mean_ratio_results.nbytes)
sys.stderr.write(
"\t d_ratio_par_mean_ratio_results = %s\n"
% d_ratio_par_mean_ratio_results.as_buffer(
sys.getsizeof(d_ratio_par_mean_ratio_results)
)
)
sys.stderr.write(
"\t ratio_par_mean_ratio_results.nbytes = %s\n"
% ratio_par_mean_ratio_results.nbytes
)
# Appeler la fonction de calcul de profondeur avec CUDA
calcul_depth_kernel_cuda(d_depth_data, np.int32(seq_length), np.int32(window_size), np.int32(step_size), d_depth_results, block=(block_size, 1, 1), grid=(grid_size, 1))
calcul_depth_kernel_cuda(
d_depth_data,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_depth_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction calcul_depth_kernel_cuda\n")
calcul_gc_kernel_cuda(d_gc_data, np.int32(seq_length), np.int32(window_size), np.int32(step_size), d_gc_results, block=(block_size, 1, 1), grid=(grid_size, 1))
calcul_gc_kernel_cuda(
d_gc_data,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_gc_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction calcul_gc_kernel_cuda\n")
calcul_map_kernel_cuda(d_map_data, np.int32(seq_length), np.int32(window_size), np.int32(step_size), d_map_results, block=(block_size, 1, 1), grid=(grid_size, 1))
calcul_map_kernel_cuda(
d_map_data,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_map_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction calcul_map_kernel_cuda\n")
calcul_depth_correction_kernel_cuda(d_depth_results, d_map_results, np.int32(seq_length), np.int32(window_size), np.int32(step_size), d_depth_correction_results, block=(block_size, 1, 1), grid=(grid_size, 1))
calcul_depth_correction_kernel_cuda(
d_depth_results,
d_map_results,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_depth_correction_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction calcul_depth_correction_kernel_cuda\n")
context.synchronize()
# Copier les résultats depuis le périphérique CUDA vers l'hôte
cuda.memcpy_dtoh(depth_results, d_depth_results) #cuda.memcpy_dtoh(dest, src)
sys.stderr.write("\t Copie les resultats du GPU (d_depth_results) vers le CPU (depth_results)\n")
cuda.memcpy_dtoh(gc_results, d_gc_results) #cuda.memcpy_dtoh(dest, src)
sys.stderr.write("\t Copie les resultats du GPU (d_gc_results) vers le CPU (gc_results)\n")
cuda.memcpy_dtoh(map_results, d_map_results) #cuda.memcpy_dtoh(dest, src)
sys.stderr.write("\t Copie les resultats du GPU (d_map_results) vers le CPU (map_results)\n")
cuda.memcpy_dtoh(depth_correction_results, d_depth_correction_results) #cuda.memcpy_dtoh(dest, src)
sys.stderr.write("\t Copie les resultats du GPU (d_depth_correction_results) vers le CPU (depth_correction_results)\n")
###NORMALISATION###
#Appel fonctions medianes
# cuda.memcpy_dtoh(dest, src)
cuda.memcpy_dtoh(depth_results, d_depth_results)
sys.stderr.write(
"\t Copie les resultats du GPU (d_depth_results) vers le CPU (depth_results)\n"
)
cuda.memcpy_dtoh(gc_results, d_gc_results) # cuda.memcpy_dtoh(dest, src)
sys.stderr.write(
"\t Copie les resultats du GPU (d_gc_results) vers le CPU (gc_results)\n"
)
cuda.memcpy_dtoh(map_results, d_map_results) # cuda.memcpy_dtoh(dest, src)
sys.stderr.write(
"\t Copie les resultats du GPU (d_map_results) vers le CPU (map_results)\n"
)
cuda.memcpy_dtoh(
depth_correction_results, d_depth_correction_results
) # cuda.memcpy_dtoh(dest, src)
sys.stderr.write(
"\t Copie les resultats du GPU (d_depth_correction_results) vers le CPU (depth_correction_results)\n"
)
### NORMALISATION###
# Appel fonctions medianes
sys.stderr.write("\t appel fonctions calcul medianes\n")
m = calcul_med_total(depth_correction_results)
gc_to_median = calcul_med_same_gc(gc_results, depth_correction_results)
......@@ -713,7 +1263,18 @@ def main_calcul(bamfile_path, chr, seq_length, window_size, step_size, zscore_th
cuda.memcpy_htod(d_gc_to_median, gc_to_median_array)
# Appeler le kernel de normalisation
normalize_depth_kernel_cuda(d_depth_correction_results, d_gc_results, np.float32(m), d_gc_to_median, np.int32(seq_length), np.int32(window_size), np.int32(step_size), d_normalize_depth_results, block=(block_size, 1, 1), grid=(grid_size, 1))
normalize_depth_kernel_cuda(
d_depth_correction_results,
d_gc_results,
np.float32(m),
d_gc_to_median,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_normalize_depth_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction normalize_depth_kernel_cuda\n")
context.synchronize()
......@@ -721,15 +1282,23 @@ def main_calcul(bamfile_path, chr, seq_length, window_size, step_size, zscore_th
# Copier les resultats normalises depuis le peripherique CUDA vers l'hote
cuda.memcpy_dtoh(normalize_depth_results, d_normalize_depth_results)
### Ratio par window###
###Ratio par window###
#Appel fonction moyenne
# Appel fonction moyenne
sys.stderr.write("\t appel fonction calcul moyenne\n")
mean_chr = calcul_moy_totale(normalize_depth_results)
# Appeler le kernel de ratio
ratio_par_window_kernel_cuda(d_normalize_depth_results, np.float32(mean_chr), np.int32(seq_length), np.int32(window_size), np.int32(step_size), d_ratio_par_window_results, block=(block_size, 1, 1), grid=(grid_size, 1))
ratio_par_window_kernel_cuda(
d_normalize_depth_results,
np.float32(mean_chr),
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_ratio_par_window_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction ratio_par_window_kernel_cuda\n")
context.synchronize()
......@@ -737,15 +1306,34 @@ def main_calcul(bamfile_path, chr, seq_length, window_size, step_size, zscore_th
# Copier les resultats ratio depuis le peripherique CUDA vers l'hote
cuda.memcpy_dtoh(ratio_par_window_results, d_ratio_par_window_results)
#Création table à partir du ratio
# Création table à partir du ratio
mean_ratio, std_ratio, med_ratio = compute_mean_and_std(ratio_par_window_results)
# Appeler le kernel de calcule du ratio divisé par le ratio moyen
ratio_par_mean_ratio_kernel_cuda(d_ratio_par_window_results, np.float32(mean_ratio), np.int32(seq_length), np.int32(window_size), np.int32(step_size), d_ratio_par_mean_ratio_results, block=(block_size, 1, 1), grid=(grid_size, 1))
ratio_par_mean_ratio_kernel_cuda(
d_ratio_par_window_results,
np.float32(mean_ratio),
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_ratio_par_mean_ratio_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction ratio_par_mean_ratio_kernel_cuda\n")
# Appeler le kernel de Z-score
z_score_kernel_cuda(d_ratio_par_window_results, np.float32(mean_ratio), np.float32(std_ratio), np.int32(seq_length), np.int32(window_size), np.int32(step_size), d_z_score_results, block=(block_size, 1, 1), grid=(grid_size, 1))
z_score_kernel_cuda(
d_ratio_par_window_results,
np.float32(mean_ratio),
np.float32(std_ratio),
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_z_score_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction z_score_kernel_cuda\n")
context.synchronize()
......@@ -754,21 +1342,28 @@ def main_calcul(bamfile_path, chr, seq_length, window_size, step_size, zscore_th
cuda.memcpy_dtoh(ratio_par_mean_ratio_results, d_ratio_par_mean_ratio_results)
cuda.memcpy_dtoh(z_score_results, d_z_score_results)
#Appel fonction create signal
# Appel fonction create signal
create_signal(signal, chr, z_score_results, step_size)
#Appel fonction detect events
detect_events(z_score_results, zscore_threshold, events, med_ratio, ratio_par_mean_ratio_results, chr)
#Appel fonction segmentation
# Appel fonction detect events
detect_events(
z_score_results,
zscore_threshold,
events,
med_ratio,
ratio_par_mean_ratio_results,
chr,
)
# Appel fonction segmentation
segmentation(events, segment)
#Appel fonction display_results_vcf
# Appel fonction display_results_vcf
display_results_vcf(sample, segment, signal, lengthFilter, output_file)
# Programme principal
#Calcul nombre de coeurs max pour le GPU
# Calcul nombre de coeurs max pour le GPU
header_written = False
sample = get_sample_name(bamfile_path)
......@@ -778,8 +1373,8 @@ num_cores = attributes[1]
print("Nombre de CPU: ", multiprocessing.cpu_count())
print(f"Nombre de coeurs max GPU: {num_cores}")
gc_file = '/work/gad/shared/pipeline/grch38/index/grch38_essential.fa'
mappability_file = '/work/gad/shared/analyse/test/cnvGPU/test_scalability/wgEncodeCrgMapabilityAlign100mer_no_uniq.grch38.bedgraph'
gc_file = "/work/gad/shared/pipeline/grch38/index/grch38_essential.fa"
mappability_file = "/work/gad/shared/analyse/test/cnvGPU/test_scalability/wgEncodeCrgMapabilityAlign100mer_no_uniq.grch38.bedgraph"
seq = parse_fasta(gc_file)
mappability = dico_mappabilite(mappability_file)
......@@ -789,10 +1384,23 @@ with pysam.AlignmentFile(bamfile_path, "rb") as bamfile_handle:
sys.stderr.write("Chromosome : %s, seq length : %s\n" % (chr, seq_length))
# Appeler la fonction de calcul de la profondeur moyenne pour ce chromosome
main_calcul(bamfile_handle, chr, seq_length, window_size, step_size, zscore_threshold, lengthFilter, output_file, sample)
logging.basicConfig(filename='%s' % (logfile), filemode='a', level=logging.INFO, format='%(asctime)s %(levelname)s - %(message)s')
logging.info('end')
main_calcul(
bamfile_handle,
chr,
seq_length,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
sample,
)
logging.basicConfig(
filename="%s" % (logfile),
filemode="a",
level=logging.INFO,
format="%(asctime)s %(levelname)s - %(message)s",
)
logging.info("end")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment