Commit 324f6f25 authored by Theo Serralta's avatar Theo Serralta

Remote files from git

parent 2d7ecfcf
import pysam
import concurrent.futures
import sys
def calcul_depth_seq(bamfile, seq, window_size=100, step_size=10):
samfile = pysam.AlignmentFile(bamfile, "rb")
seq_length = samfile.lengths[samfile.references.index(seq)] #Permet d'obtenir la longueur de la sequence
for pos_start in range(0, seq_length - window_size +1, step_size): #Itère sur la sequence par "fenêtre" en fonction de la taille
pos_end = pos_start + window_size #pos_start + window_size donne la position de début de la fenêtre, puis grâce à min() et seq_length je peux définir position de fin. le min me permet d'être certain que pos_end < seq_lenght. C-a-d que si la fenêtre dépasse la taille de la sequence, j'aurais quand même pos_end
window_depths = [] #liste pour stocker la profondeur de chaque fenêtre
for pileupcolumn in samfile.pileup(seq, start=pos_start, stop=pos_end): #itère sur chaque colone de pileup
window_depths.append(pileupcolumn.n)#ajoute le nb de lecture couvrant cette position à la liste window_depths
if window_depths:#calcul la profondeur moyenne pour chaque sequence
avg_depth = sum(window_depths) / len(window_depths)
print(f"{seq}:{pos_start}-{pos_end}: {avg_depth}")
samfile.close()
def calcul_depth_all(bamfile, window_size=100, step_size=10, num_threads=4):
samfile=pysam.AlignmentFile(bamfile, "rb")
with concurrent.futures.ThreadPoolExecutor(max_workers = num_threads) as executor:
futures = [] #liste pour stocker les futures
for seq in samfile.references:
future = executor.submit(calcul_depth_seq, bamfile, seq, window_size, step_size)
futures.append(future)
concurrent.futures.wait(futures)
samfile.close()
#Programme principal
if len(sys.argv) <2:
print("Usage : File alignement .bam")
sys.exit()
bamfile_path = sys.argv[1]
calcul_depth_all(bamfile_path, window_size=100, num_threads=4)
import pysam
import concurrent.futures
import sys
import getopt
import logging
#Options
output_file = ""
try:
opts, args = getopt.getopt(sys.argv[1:], 'b:w:s:t:o:e:')
for opt, arg in opts:
if opt in ("-b"):
bamfile = arg
if opt in ("-w"):
window_size = int(arg)
if opt in ("-s"):
step_size = int(arg)
if opt in ("-t"):
num_threads = int(arg)
if opt in ("-o"):
output_file = open(arg, "a")
if opt in ("-e"):
logfile = arg
except getopt.GetoptError:
print('Invalid argument')
sys.exit(1)
logging.basicConfig(filename = '%s' % (logfile), filemode = 'a', level = logging.INFO, format = '%(asctime)s %(levelname)s - %(message)s')
logging.info('start')
def calcul_depth_seq(bamfile, seq, window_size, step_size, numero_de_thread):
sys.stderr.write("\t entering calcul_depth_seq : thread numero %s\n" % numero_de_thread)
samfile = pysam.AlignmentFile(bamfile, "rb")
seq_length = samfile.lengths[samfile.references.index(seq)] #Permet d'obtenir la longueur de la sequence
for pos_start in range(0, seq_length - window_size +1, step_size): #Itère sur la sequence par "fenêtre" en fonction de la taille
pos_end = pos_start + window_size #pos_start + window_size donne la position de début de la fenêtre, puis grâce à min() et seq_length je peux définir position de fin. le min me permet d'être certain que pos_end < seq_lenght. C-a-d que si la fenêtre dépasse la taille de la sequence, j'aurais quand même pos_end
window_depths = [] #liste pour stocker la profondeur de chaque fenêtre
for pileupcolumn in samfile.pileup(seq, start=pos_start, stop=pos_end): #itère sur chaque colone de pileup
window_depths.append(pileupcolumn.n)#ajoute le nb de lecture couvrant cette position à la liste window_depths
if window_depths:#calcul la profondeur moyenne pour chaque sequence
sys.stderr.write("\t\t entering wd thread numero %s\n" % numero_de_thread)
avg_depth = sum(window_depths) / len(window_depths)
sys.stdout.write(f"{seq}:{pos_start}-{pos_end}: {avg_depth}\n")
output_file.write(f"{seq}:{pos_start}-{pos_end}: {avg_depth}\n")
sys.stderr.write("\t leaving calcul_depth_seq thread numero %s\n" % numero_de_thread)
def calcul_depth_all(bamfile, window_size, step_size, num_threads):
sys.stderr.write("entering calcul_depth_all \n")
samfile=pysam.AlignmentFile(bamfile, "rb")
with concurrent.futures.ThreadPoolExecutor(max_workers = num_threads) as executor:
futures = [] #liste pour stocker les futures
n=0
for seq in samfile.references:
n+=1
future = executor.submit(calcul_depth_seq, bamfile, seq, window_size, step_size, n)
futures.append(future)
concurrent.futures.wait(futures)
samfile.close()
#if output_file != sys.stdout:
# output_file.close()
#Programme principal
if len(sys.argv) <2 or bamfile is None:
print("Usage : File alignement .bam")
sys.exit(1)
bamfile_path = bamfile
calcul_depth_all(bamfile_path, window_size, step_size, num_threads)
logging.info('end')
samfile.close()
import pysam
import sys
import getopt
import logging
import numpy as np
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
from pycuda.autoinit import context
import multiprocessing
from multiprocessing import Process, Queue
import time
import subprocess
import pandas as pd
# Options
def parse_arguments():
"""
Parse command-line arguments.
This function parses the command-line arguments provided to the script and extracts the values for various parameters.
Parameters
----------
None
Returns
-------
tuple
A tuple containing the following elements:
bamfile_path : str or None
The path to the BAM file.
window_size : int or None
The size of the window.
step_size : int or None
The step size for the analysis.
zscore_threshold : float or None
The threshold for the Z-score.
lengthFilter : int or None
The filter for length.
output_file : str or None
The path to the output file.
logfile : str or None
The path to the log file.
Each element can be None if the corresponding argument was not provided.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "b:c:w:s:z:l:t:o:e:")
(
bamfile_path,
num_chr,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
logfile,
) = (None, None, None, None, None, None, None, None)
for opt, arg in opts:
if opt in ("-b"):
bamfile_path = arg
if opt in ("-c"):
num_chr = arg
if opt in ("-w"):
window_size = int(arg)
if opt in ("-s"):
step_size = int(arg)
if opt in ("-z"):
zscore_threshold = float(arg)
if opt in ("-l"):
lengthFilter = int(arg)
if opt in ("-o"):
output_file = arg
if opt in ("-e"):
logfile = arg
return (
bamfile_path,
num_chr,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
logfile,
)
except getopt.GetoptError:
print("Invalid argument")
sys.exit(1)
if __name__ == "__main__":
(
bamfile_path,
num_chr,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
logfile,
) = parse_arguments()
logging.basicConfig(
filename="%s" % (logfile),
filemode="a",
level=logging.INFO,
format="%(asctime)s %(levelname)s - %(message)s",
)
logging.info("start")
global seq
# Code CUDA
mod = SourceModule(
"""
//Kernel pour calculer la profondeur moyenne brute
__global__ void calcul_depth_kernel(int *depth_data, int seq_length, int window_size, int step_size, float *depth_results) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < seq_length - window_size + 1) {
int pos_start = (idx * step_size) + 1;
int pos_end = pos_start + window_size;
int count_reads = 0;
for (int i = pos_start; i < pos_end; i++) {
count_reads += depth_data[i];
}
float avg_depth = (float)count_reads / window_size;
depth_results[idx] = avg_depth;
}
}
__global__ void calcul_depth_count_kernel(int *depth_data_count, int seq_length, int window_size, int step_size, float *depth_results_count) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < seq_length - window_size + 1) {
int pos_start = (idx * step_size) + 1;
int pos_end = pos_start + window_size;
int count_reads = 0;
for (int i = pos_start; i < pos_end; i++) {
count_reads += depth_data_count[i];
}
float avg_depth_count = (float)count_reads / window_size;
depth_results_count[idx] = avg_depth_count;
}
}
//Kernel pour calculer le GC content
__global__ void calcul_gc_kernel(char *gc_data, int seq_length, int window_size, int step_size, float *gc_results) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < seq_length - window_size + 1) {
int pos_start = (idx * step_size) + 1;
int pos_end = pos_start + window_size;
int gc_count = 0;
//printf(gc_data);
//printf(" ");
for (int i = pos_start; i <= pos_end; ++i) {
if ((gc_data[i] == 'G') or (gc_data[i] == 'C') or (gc_data[i] == 'g') or (gc_data[i] == 'c')) {
//printf(&gc_data[i]);
//printf(" ");
gc_count++;
}
}
float avg_gc = ((float)gc_count / window_size) * 100;
gc_results[idx] = avg_gc;
}
}
// Kernel pour calculer la mappabilite moyenne ponderee
__global__ void calcul_map_kernel(float *map_data, int seq_length, int window_size, int step_size, float *map_results) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < seq_length - window_size + 1) {
int pos_start = (idx * step_size) + 1;
int pos_end = pos_start + window_size;
float weighted_sum = 0.0;
float total_weight = 0.0;
for (int i = pos_start; i <= pos_end; ++i) {
float weight = map_data[i];
weighted_sum += weight;
total_weight += 1;
}
float avg_map = weighted_sum / total_weight;
map_results[idx] = avg_map;
}
}
// Kernel pour calculer la lecture de profondeur corrigee par la mappabilitee
__global__ void calcul_depth_correction_kernel(float *depth_results, float *map_results, int seq_length, int window_size, int step_size, float *depth_correction_results) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < seq_length - window_size + 1) {
float avg_depth = depth_results[idx];
float avg_map = map_results[idx];
// Verification si avg_map est egal a 0 pour eviter la division par 0
float depth_correction = (avg_map != 0.0f) ? (avg_depth / avg_map) : 0.0f;
depth_correction_results[idx] = depth_correction;
}
}
// Kernel pour normaliser la profondeur corrigee
__global__ void normalize_depth_kernel(float *depth_correction, float *gc_results, float m, float *gc_to_median, int seq_length, int window_size, int step_size, float *depth_normalize) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < seq_length - window_size + 1) {
float mGC = gc_to_median[(int)gc_results[idx]];
// Verification si mGC est egal a 0 pour eviter la division par 0
float depth_normalize_val = (mGC != 0.0f) ? (depth_correction[idx] * m / mGC) : 0.0f;
depth_normalize[idx] = depth_normalize_val;
}
}
// Kernel pour calculer le ratio par window
__global__ void ratio_par_window_kernel(float *depth_normalize_val, float mean_chr, int seq_length, int window_size, int step_size, float *ratio_par_window_results) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < seq_length - window_size + 1) {
float ratio = depth_normalize_val[idx] / mean_chr;
ratio_par_window_results[idx] = ratio;
}
}
// Kernel pour calculer le z_score par window sur le ratio
__global__ void z_score_kernel(float *ratio, float mean_ratio, float std_ratio, int seq_length, int window_size, int step_size, float *z_score_results) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < seq_length - window_size + 1) {
float z_score = (ratio[idx] - mean_ratio) / std_ratio;
z_score_results[idx] = z_score;
}
}
// Kernel pour calculer le ratio divise par le ratio moyen
__global__ void ratio_par_mean_ratio_kernel(float *ratio, float mean_ratio, int seq_length, int window_size, int step_size, float *ratio_par_mean_ratio_results) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < seq_length - window_size + 1) {
float ratio_mean_ratio = ratio[idx] / mean_ratio;
ratio_par_mean_ratio_results[idx] = ratio_mean_ratio;
}
}
"""
)
# Obtention de la fonction de kernel compilée
calcul_depth_kernel_cuda = mod.get_function("calcul_depth_kernel")
calcul_depth_count_kernel_cuda = mod.get_function("calcul_depth_count_kernel")
calcul_gc_kernel_cuda = mod.get_function("calcul_gc_kernel")
calcul_map_kernel_cuda = mod.get_function("calcul_map_kernel")
calcul_depth_correction_kernel_cuda = mod.get_function("calcul_depth_correction_kernel")
normalize_depth_kernel_cuda = mod.get_function("normalize_depth_kernel")
ratio_par_window_kernel_cuda = mod.get_function("ratio_par_window_kernel")
z_score_kernel_cuda = mod.get_function("z_score_kernel")
ratio_par_mean_ratio_kernel_cuda = mod.get_function("ratio_par_mean_ratio_kernel")
def merge_intervals(intervals):
"""
Merge overlapping intervals with the same score.
This function takes a list of intervals and merges those that have the same score.
Parameters
----------
intervals : list of tuples
A list where each element is a tuple (start, end, score).
The intervals should be sorted by start position.
Returns
-------
list of tuples
A list of merged intervals (start, end, score) where overlapping intervals with the same score are combined.
"""
merged = []
start, end, score = intervals[0]
for interval in intervals[1:]:
if interval[2] == score:
end = interval[1]
else:
merged.append((start, end, score))
start, end, score = interval
merged.append((start, end, score))
return merged
def dico_mappabilite(mappability_file):
"""
Create a dictionary of mappability scores from a file.
This function reads a mappability file and creates a dictionary with chromosomes as keys and mappability scores as values.
Parameters
----------
mappability_file : str
The path to the mappability file. Each line in the file should have the format:
chromosome, start_pos, end_pos, score.
Returns
-------
dict
A dictionary where keys are chromosome names and values are another dictionary with start positions as keys
and mappability scores as values.
"""
logging.info("Entering dico_mappability")
start_time = time.time()
mappability_dico = {}
logging.info(" In dico_mappability : Open file and create the dico")
start_time_1 = time.time()
with open(mappability_file, "r") as f:
for line in f:
fields = line.strip().split("\t")
if len(fields) < 4:
continue
chromosome = fields[0]
start_pos = int(fields[1])
end_pos = int(fields[2])
score = float(fields[3])
if chromosome not in mappability_dico:
mappability_dico[chromosome] = []
mappability_dico[chromosome].append((start_pos, end_pos, score))
end_time_1 = time.time()
elapsed_time_1 = end_time_1 - start_time_1
logging.info(f"In dico_mappability : Leaving file and create the dico (Time taken: {elapsed_time_1:.4f} seconds)")
# Add position 0 for each chromosome
logging.info(" In dico_mappability : Add position 0 for each chromosome")
start_time_2 = time.time()
for chromosome, intervals in mappability_dico.items():
if intervals[0][0] != 0:
mappability_dico[chromosome].insert(0, (0, intervals[0][0], 0))
end_time_2 = time.time()
elapsed_time_2 = end_time_2 - start_time_2
logging.info(f"In dico_mappability : Ending add position 0 for each chromosome (Time taken: {elapsed_time_2:.4f} seconds)")
# Merge intervals with the same score
logging.info(" In dico_mappability : Merge intervals with the same score")
start_time_3 = time.time()
for chromosome, intervals in mappability_dico.items():
merged_intervals = merge_intervals(intervals)
mappability_dico[chromosome] = {
start: score for start, _, score in merged_intervals
}
end_time_3 = time.time()
elapsed_time_3 = end_time_3 - start_time_3
logging.info(f"In dico_mappability : Ending merge intervals with the same score (Time taken: {elapsed_time_2:.4f} seconds)")
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving dico_mappability (Time taken: {elapsed_time:.4f} seconds)")
return mappability_dico
def calcul_mappability(seq_length, mappability, chr):
"""
Calculate mappability array for a given sequence length and chromosome.
This function generates an array of mappability scores for a specific chromosome and sequence length.
Parameters
----------
seq_length : int
The length of the sequence.
mappability : dict
A dictionary containing mappability information, with chromosomes as keys and dictionaries of start positions
and scores as values.
chr : str
The chromosome for which the mappability is calculated.
Returns
-------
numpy.ndarray
An array of mappability scores for the given sequence length.
"""
logging.info(f"Entering calcul_mappability for {chr}")
start_time = time.time()
map_data = np.zeros(seq_length, dtype=np.float32)
sorted_keys = sorted(mappability[chr].keys())
prev_bound = 0
prev_mappability = 0
logging.info(f"In calcul_mappability : Entering for bound in sorted_keys for {chr}")
start_time_1 = time.time()
for bound in sorted_keys:
for i in range(prev_bound, min(seq_length, bound)):
map_data[i] = prev_mappability
prev_bound = bound
prev_mappability = mappability[chr][bound]
end_time_1 = time.time()
elapsed_time_1 = end_time_1 - start_time_1
logging.info(f"In calcul_mappability : Leaving for bound in sorted_keys for {chr} (Time taken: {elapsed_time_1:.4f} seconds)")
# Fill remaining positions if sequence length exceeds last bound
logging.info(f"In calcul_mappability : Entering for i in range(prev_bound, seq_length) for {chr}")
start_time_2 = time.time()
for i in range(prev_bound, seq_length):
map_data[i] = prev_mappability
end_time_2 = time.time()
elapsed_time_2 = end_time_2 - start_time_2
logging.info(f"In calcul_mappability : Leaving for i in range(prev_bound, seq_length) for {chr} (Time taken: {elapsed_time_2:.4f} seconds)")
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving dico_mappability for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return map_data
def parse_fasta(gc_file):
"""
Parse a FASTA file and extract sequences.
This function reads a FASTA file and extracts the sequences, storing them in a dictionary with headers as keys.
Parameters
----------
gc_file : str
The path to the FASTA file. The file should be in standard FASTA format.
Returns
-------
dict
A dictionary where keys are sequence headers and values are the corresponding sequences.
"""
logging.info("Entering parse_fasta")
start_time = time.time()
sequences = {}
with open(gc_file, "r") as f:
data = f.read().split(">")
for entry in data[1:]:
lines = entry.split("\n")
header = lines[0]
sequence = "".join(lines[1:])
sequences[header] = sequence
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving parse_fasta (Time taken: {elapsed_time:.4f} seconds)")
return sequences
def calcul_gc_content(seq_length, chr, seq):
"""
Calculate the GC content of a given sequence.
This function generates an array representing the GC content for a specific chromosome and sequence length.
Parameters
----------
seq_length : int
The length of the sequence.
chr : str
The chromosome for which the GC content is calculated.
seq : dict
A dictionary containing sequences, with chromosome names as keys and sequences as values.
Returns
-------
numpy.ndarray
An array of bytes ('S' dtype) representing the GC content for the given sequence length.
"""
logging.info(f"Entering calcul_gc_content for {chr}")
start_time = time.time()
gc_data = np.zeros(seq_length, dtype="S")
for i in range(len(seq[chr])):
gc_data[i] = seq[chr][i]
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving calcul_gc_content for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return gc_data
def calcul_depth_seq(seq_length, bamfile_path, chr):
"""
Calculate the sequencing depth for a given chromosome.
This function computes the sequencing depth for a specific chromosome and sequence length using a BAM file.
Parameters
----------
seq_length : int
The length of the sequence.
bamfile_path : pysam.AlignmentFile
The path to the BAM file opened with pysam.AlignmentFile.
chr : str
The chromosome for which the depth is calculated.
Returns
-------
numpy.ndarray
An array of integers representing the sequencing depth for the given sequence length.
"""
logging.info(f"Entering calcul_depth_seq for {chr}")
start_time = time.time()
depth_data = np.zeros(seq_length, dtype=np.int32)
for pileupcolumn in bamfile_path.pileup(reference = chr):
pos = pileupcolumn.reference_pos
if pos >= seq_length:
break
depth_data[pos] = pileupcolumn.nsegments
#depth_data = bamfile_path.pileup().nsegments
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return depth_data
def calcul_depth_seq_copy(seq_length, bamfile_path, chr):
"""
Calculate the sequencing depth for a given chromosome.
This function computes the sequencing depth for a specific chromosome and sequence length using a BAM file.
Parameters
----------
seq_length : int
The length of the sequence.
bamfile_path : pysam.AlignmentFile
The path to the BAM file opened with pysam.AlignmentFile.
chr : str
The chromosome for which the depth is calculated.
Returns
-------
numpy.ndarray
An array of integers representing the sequencing depth for the given sequence length.
"""
logging.info(f"Entering calcul_depth_seq for {chr}")
start_time = time.time()
depth_data = np.zeros(seq_length, dtype=np.int32)
i = 0
it = bamfile_path.pileup(reference = chr)
while i < (seq_length - 1):
t = next(it)
pos = t.reference_pos
depth_data[pos] = t.nsegments
print(i)
i += 1
#for pileupcolumn in bamfile_path.pileup():
# pos = pileupcolumn.reference_pos
# if pos >= seq_length:
# break
#depth_data[pos] = pileupcolumn.nsegments
#print(bamfile_path.pileup()) print("########################")
# depth_data = np.array(bamfile_path.pileup().nsegments)
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return depth_data
def calcul_depth_seq_count(seq_length, bamfile_path, chr):
"""
Calculate the sequencing depth for a given chromosome.
This function computes the sequencing depth for a specific chromosome and sequence length using a BAM file.
Parameters
----------
seq_length : int
The length of the sequence.
bamfile_path : pysam.AlignmentFile
The path to the BAM file opened with pysam.AlignmentFile.
chr : str
The chromosome for which the depth is calculated.
Returns
-------
numpy.ndarray
An array of integers representing the sequencing depth for the given sequence length.
"""
logging.info(f"Entering calcul_depth_seq_count for {chr}")
start_time = time.time()
# Use count_coverage to get coverage depth
depth_data_count = bamfile_path.count_coverage(contig = chr)
type(depth_data_count)
#print(depth_data_count[:10056])
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return depth_data_count
def calcul_depth_seq_samtools(seq_length, bamfile_path, chr, num_threads=12):
"""
Calculate the sequencing depth for a given chromosome using a parallelized bash script.
Parameters
----------
seq_length : int
The length of the sequence.
bamfile_path : str
The path to the BAM file.
chr : str
The chromosome for which the depth is calculated.
num_threads : int
The number of threads to use for parallel processing.
Returns
-------
numpy.ndarray
An array of integers representing the sequencing depth for the given sequence length.
"""
logging.info(f"Entering calcul_depth_seq for {chr}")
start_time = time.time()
#Define the output file for depth results
output_file_1 = f"/work/gad/shared/analyse/test/cnvGPU/test_scalability/tmp_depth_{chr}.out"
# Run the parallelized bash script to calculate depth
p = subprocess.Popen(["/work/gad/shared/analyse/test/cnvGPU/test_scalability/samtools_test_by_chr_multithreading_log.sh %s %s %s %s" % (bamfile_path, chr, output_file_1, num_threads)], shell = True, executable = "/bin/bash")
p.communicate()
# Create the numpy array for depth data
depth_data = np.zeros(seq_length, dtype=np.int32)
# Read the output file and fill the numpy array using pandas for efficiency
try:
df = pd.read_csv(output_file_1, delim_whitespace=True, header=None, names=['chr', 'pos', 'depth'])
df['pos'] -= 1 # Convert to 0-based index
df = df[df['pos'] < seq_length] # Ensure positions are within sequence length
depth_data[df['pos']] = df['depth']
except Exception as e:
logging.error(f"Error reading depth file: {e}")
# Clean up the output file
subprocess.run(['rm', output_file_1])
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving calcul_depth_seq for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return depth_data
def calcul_med_total(depth_correction_results, chr):
"""
Calculate the median of non-zero depth correction results.
This function filters out zero values from the depth correction results and computes the median of the remaining values.
Parameters
----------
depth_correction_results : list or numpy.ndarray
A list or array of depth correction values.
Returns
-------
float
The median of the non-zero depth correction values, or 0 if no non-zero values are present.
"""
logging.info(f"Entering calcul_med_total for {chr}")
start_time = time.time()
depth_correction_results = np.array(depth_correction_results)
# Filter results to remove zero values
non_zero_results = depth_correction_results[depth_correction_results != 0]
# Calculate the median of non-zero results
m = np.median(non_zero_results) if non_zero_results.size > 0 else 0
sys.stderr.write("Chromosome : %s, med_chr : %s\n" % (chr, m))
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving calcul_med_total for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return m
def calcul_med_same_gc(gc_results, depth_correction_results, chr):
"""
Calculate the median depth correction for each unique GC content value.
This function computes the median depth correction values for each unique GC content value, filtering out zero values.
Parameters
----------
gc_results : list or numpy.ndarray
A list or array of GC content values.
depth_correction_results : list or numpy.ndarray
A list or array of depth correction values.
Returns
-------
dict
A dictionary where keys are unique GC content values and values are the median depth correction for those GC values.
"""
logging.info(f"Entering calcul_med_same_gc for {chr}")
start_time = time.time()
mGC = []
depth_correction_results_array = np.array(depth_correction_results)
unique_gc_values = np.unique(gc_results)
for gc in unique_gc_values:
indices = np.where(
gc_results == gc
) # Get positions of each unique GC value in gc_results
# Filter depth correction results to remove zero values
filtered_depths = depth_correction_results_array[indices][
depth_correction_results_array[indices] != 0
]
if (
filtered_depths.size > 0
): # Calculate median only if filtered results are not empty
median_gc = np.median(filtered_depths)
else:
median_gc = 0 # Or another default value if all results are 0
mGC.append((gc, median_gc))
gc_to_median = dict(mGC)
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving calcul_med_same_gc for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return gc_to_median
def calcul_moy_totale(normalize_depth_results, chr):
"""
Calculate the mean of non-zero normalized depth results.
This function filters out zero values from the normalized depth results and computes the mean of the remaining values.
Parameters
----------
normalize_depth_results : list or numpy.ndarray
A list or array of normalized depth values.
Returns
-------
float
The mean of the non-zero normalized depth values, or 0 if no non-zero values are present.
"""
logging.info(f"Entering calcul_moy_totale for {chr}")
start_time = time.time()
normalize_depth_results = np.array(normalize_depth_results)
# Filter results to remove zero values
non_zero_results = normalize_depth_results[normalize_depth_results != 0]
# Calculate the mean of non-zero results
mean_chr = np.mean(non_zero_results) if non_zero_results.size > 0 else 0
sys.stderr.write("Chromosome : %s, mean_chr : %s\n" % (chr, mean_chr))
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving calcul_moy_totale for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return mean_chr
def calcul_std(normalize_depth_results, chr):
"""
Calculate the standard deviation of non-zero normalized depth results.
This function filters out zero values from the normalized depth results and computes the standard deviation of the remaining values.
Parameters
----------
normalize_depth_results : list or numpy.ndarray
A list or array of normalized depth values.
Returns
-------
float
The standard deviation of the non-zero normalized depth values, or 0 if no non-zero values are present.
"""
logging.info(f"Entering calcul_std for {chr}")
start_time = time.time()
normalize_depth_results = np.array(normalize_depth_results)
# Filter results to remove zero values
non_zero_results = normalize_depth_results[normalize_depth_results != 0]
# Calculate the standard deviation of non-zero results
std_chr = np.std(non_zero_results) if non_zero_results.size > 0 else 0
sys.stderr.write("Chromosome : %s, std_chr : %s\n" % (chr, std_chr))
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving calcul_std for {chr} (Time taken: {elapsed_time:.4f} seconds)")
return std_chr
def compute_mean_std_med(ratio_par_window_results, chr):
"""
Compute the mean, standard deviation, and median of non-zero ratio results per window.
This function filters out zero and -1 values from the ratio results per window and computes the mean, standard deviation, and median of the remaining values. Values greater than or equal to 5 are capped at 5.
Parameters
----------
ratio_par_window_results : list or numpy.ndarray
A list or array of ratio values per window.
Returns
-------
tuple
A tuple containing the mean, standard deviation, and median of the filtered ratio values.
"""
logging.info(f"Entering compute_mean_std_med for {chr}")
start_time = time.time()
# Filter results to remove zero and -1 values
ratio_par_window_results = np.array(ratio_par_window_results)
non_zero_results = ratio_par_window_results[ratio_par_window_results != 0]
# Initialize list for stats computation
table = []
for value in non_zero_results:
if float(value) >= 5:
table.append(5)
elif float(value) != -1:
table.append(float(value))
# Calculate the mean, standard deviation, and median of the filtered values
mean_ratio = np.mean(table) if table else 0
std_ratio = np.std(table) if table else 0
med_ratio = np.median(table) if table else 0
sys.stderr.write("Chromosome : %s, mean_ratio : %s, std_ratio : %s, med_ratio : %s\n" % (chr, mean_ratio, std_ratio, med_ratio))
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving compute_mean_std_med for {chr} (Time taken: {elapsed_time:.4f} seconds)")
# Return results
return mean_ratio, std_ratio, med_ratio
def cn_level(x, chr):
"""
Determine the copy number level based on the given value.
This function returns the copy number level based on the input value `x`.
Parameters
----------
x : float
The input value used to determine the copy number level.
Returns
-------
int
The copy number level:
- 0 if x < 0.1
- 1 if 0.1 <= x <= 0.75
- 2 if 0.75 < x < 1 or round(x) == 1
- round(x) if round(x) > 1
"""
if x < 0.1:
return 0
elif x <= 0.75:
return 1
elif x < 1:
return 2
else:
rounded_x = round(x)
return 2 if rounded_x == 1 else rounded_x
def get_sample_name(bamfile_path):
"""
Extract the sample name from a BAM file.
This function reads the header of a BAM file to extract the sample name from the read groups.
Parameters
----------
bamfile_path : str
The path to the BAM file.
Returns
-------
str
The sample name extracted from the BAM file. If no sample name is found, returns "UnknownSample".
"""
logging.info(f"Entering get_sample_name")
start_time = time.time()
with pysam.AlignmentFile(bamfile_path, "rb") as bamfile:
for read_group in bamfile.header.get("RG", []):
if "SM" in read_group:
return read_group["SM"]
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving get_sample_name (Time taken: {elapsed_time:.4f} seconds)")
return "UnknownSample"
def create_signal(signal, chr, z_score_results, step_size):
"""
Create a signal dictionary for a specific chromosome based on z-score results.
This function populates a signal dictionary with positions and corresponding z-score results for a given chromosome.
Parameters
----------
signal : dict
A dictionary to store the signal data.
chr : str
The chromosome for which the signal is created.
z_score_results : list or numpy.ndarray
A list or array of z-score results.
step_size : int
The step size used to calculate the positions.
Returns
-------
None
The function modifies the signal dictionary in place.
"""
logging.info(f"Entering create_signal for {chr} (include copy_number_level)")
start_time = time.time()
if chr not in signal:
signal[chr] = {}
for i in range(len(z_score_results)):
pos = (i * step_size) + 1
signal[chr][pos] = z_score_results[i]
#sys.stderr.write("\t signal %s\n" % signal[chr])
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving create_signal for {chr} (include copy_number_level) (Time taken: {elapsed_time:.4f} seconds)")
def detect_events(
z_score_results,
zscore_threshold,
events,
med_ratio,
ratio_par_mean_ratio_results,
chr,
):
"""
Detect genomic events based on z-score results and a z-score threshold.
This function identifies significant genomic events where z-scores exceed the given threshold. Events are recorded in the `events` dictionary for the specified chromosome.
Parameters
----------
z_score_results : list or numpy.ndarray
A list or array of z-score values.
zscore_threshold : float
The threshold for detecting significant z-score events.
events : dict
A dictionary to store detected events.
med_ratio : float
The median ratio used for copy number level calculations.
ratio_par_mean_ratio_results : list or numpy.ndarray
A list or array of ratio values compared to the mean ratio.
chr : str
The chromosome for which events are detected.
Returns
-------
None
The function modifies the events dictionary in place.
"""
logging.info(f"Entering detect_events for {chr}")
start_time = time.time()
c = 0
if chr not in events:
events[chr] = {}
for i, z_score in enumerate(z_score_results):
if abs(z_score) >= zscore_threshold:
if med_ratio != 0:
c = cn_level(float(ratio_par_mean_ratio_results[i]), chr)
if z_score >= 0:
c = 3
elif c == 2 and z_score < 0:
c = 1
pos_start = (i * step_size) + 1
pos_end = pos_start + window_size
events[chr][(pos_start, pos_end)] = c
#sys.stderr.write("\t events %s\n" % events)
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving detect_events for {chr} (Time taken: {elapsed_time:.4f} seconds)")
def segmentation(events, segment, chr):
"""
Segment the detected events into contiguous regions with the same copy number level.
This function processes the detected events and groups contiguous regions with the same copy number level into segments.
Parameters
----------
events : dict
A dictionary of detected events for each chromosome.
segment : dict
A dictionary to store the segmented regions.
Returns
-------
None
The function modifies the segment dictionary in place.
"""
logging.info(f"Entering segmentation for {chr}")
start_time = time.time()
for k in events.keys():
sys.stderr.write("\tfor chromosome %s\n" % k)
starts, oldPos, oldLevel = 1, 1, -1
for p in sorted(events[k].keys()):
level = events[k][p]
#sys.stderr.write("\t p %s\n" % p)
# new coordinates
if p[0] > (oldPos + window_size):
if (starts != 1) and (starts != p[0]):
if k not in segment:
segment[k] = {}
index = str(starts) + "-" + str(oldPos)
segment[k][index] = {}
segment[k][index]["start"] = starts
segment[k][index]["end"] = oldPos + window_size
segment[k][index]["cn"] = oldLevel
oldPos, starts, oldLevel = p[0], p[0], level
continue
else:
starts = p[0]
# case where it's contiguous but different level
if level != oldLevel:
if oldLevel != -1:
if k not in segment:
segment[k] = {}
index = str(starts) + "-" + str(oldPos)
segment[k][index] = {}
segment[k][index]["start"] = starts
segment[k][index]["end"] = oldPos
segment[k][index]["cn"] = oldLevel
oldPos, starts, oldLevel = p[0], p[0], level
continue
else:
oldLevel = level
oldPos, oldLevel = p[0], level
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving segmentation for {chr} (Time taken: {elapsed_time:.4f} seconds)")
def display_results_vcf(sample, segment, signal, lengthFilter, output_file, chr):
"""
Generate a VCF file containing structural variant calls based on segmented regions and signal data.
This function creates a VCF (Variant Call Format) file containing structural variant calls derived from segmented regions and signal data. The structural variant type (DEL for deletion or DUP for duplication) is determined based on copy number levels and signal values. The resulting VCF file includes information about the chromosome, position, type of structural variant, copy number, and other relevant information.
Parameters
----------
sample : str
The sample name to be included in the VCF file header.
segment : dict
A dictionary containing segmented regions with copy number information.
signal : dict
A dictionary containing signal data for each chromosome.
lengthFilter : int
The minimum length threshold for including variants in the VCF file.
output_file : str
The path to the output VCF file.
Returns
-------
None
This function writes the structural variant calls to the specified output file in VCF format.
"""
global header_written
logging.info(f"Entering display_results_vcf for {chr}")
start_time = time.time()
with open(output_file, "a") as f:
if not header_written:
f.write("##fileformat=VCFv4.2\n")
f.write("##source=cnvcaller\n")
f.write(
'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n'
)
f.write(
'##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">\n'
)
f.write(
'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n'
)
f.write('##INFO=<ID=CN,Number=1,Type=Integer,Description="Copy number">\n')
f.write('##ALT=<ID=DEL,Description="Deletion">\n')
f.write('##ALT=<ID=DUP,Description="Duplication">\n')
f.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n')
f.write(
"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % (sample)
)
header_written = True
for k in segment.keys():
#sys.stderr.write("\tfor chromosome %s\n" % k)
for elt in sorted(segment[k].keys()):
#sys.stderr.write("\tfor elt %s\n" % elt)
if segment[k][elt]["start"] == segment[k][elt]["end"]:
continue
if (segment[k][elt]["end"] - segment[k][elt]["start"]) < lengthFilter:
continue
#sys.stderr.write("\t [segment[k][elt] %s\n" % [segment[k][elt]])
if int(signal[k][segment[k][elt]["start"]]) < 0:
f.write(
"%s\t%s\t.\tN\t<DEL>\t.\t.\tSVTYPE=DEL;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
% (
k,
segment[k][elt]["start"],
segment[k][elt]["end"],
int(segment[k][elt]["cn"]),
)
)
else:
f.write(
"%s\t%s\t.\tN\t<DUP>\t.\t.\tSVTYPE=DUP;END=%s;VALUE=%s\tGT:GQ\t./.:0\n"
% (
k,
segment[k][elt]["start"],
segment[k][elt]["end"],
int(segment[k][elt]["cn"]),
)
)
end_time = time.time()
elapsed_time = end_time - start_time
logging.info(f"Leaving display_results_vcf for {chr} (Time taken: {elapsed_time:.4f} seconds)")
def main_calcul(
bamfile_path,
chr,
seq_length,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
sample,
):
"""
Perform structural variant detection and VCF file generation.
This function orchestrates a series of computations and data manipulations,
leveraging GPU acceleration for performance improvements in genomic data analysis.
Parameters
----------
bamfile_path : str
Path to the BAM file containing aligned reads.
chr : str
Chromosome identifier for which analysis is performed.
seq_length : int
Length of the chromosome sequence.
window_size : int
Size of the sliding window used for analysis.
step_size : int
Size of the step when moving the window along the chromosome.
zscore_threshold : float
Threshold value for detecting significant events based on Z-scores.
lengthFilter : int
Minimum length threshold for including variants in the VCF file.
output_file : str
Path to the output VCF file.
sample : str
Name of the sample being analyzed.
Returns
-------
None
"""
sys.stderr.write("\t entering main_calcul\n")
global seq
events = {}
segment = {}
signal = {}
# Appeler les différentes fonctions
map_data = calcul_mappability(seq_length, mappability, chr)
gc_data = calcul_gc_content(seq_length, chr, seq)
#depth_data = calcul_depth_seq(seq_length, bamfile_path, chr)
#depth_data_count = calcul_depth_seq_count(seq_length, bamfile_path, chr)
#depth_data = calcul_depth_seq_copy(seq_length, bamfile_path, chr)
depth_data = calcul_depth_seq_samtools(seq_length, bamfile_path, chr)
# Transférer le tableau NumPy vers CUDA
d_depth_data = cuda.mem_alloc(depth_data.nbytes)
cuda.memcpy_htod(d_depth_data, depth_data)
sys.stderr.write(
"\t d_depth_data : %s, %s\n"
% (d_depth_data, d_depth_data.as_buffer(sys.getsizeof(depth_data)))
)
#d_depth_data_count = cuda.mem_alloc(depth_data_count.nbytes)
#cuda.memcpy_htod(d_depth_data_count, depth_data_count)
#sys.stderr.write(
# "\t d_depth_data_count : %s, %s\n"
# % (d_depth_data_count, d_depth_data_count.as_buffer(sys.getsizeof(depth_data_count)))
#)
d_gc_data = cuda.mem_alloc(gc_data.nbytes)
cuda.memcpy_htod(d_gc_data, gc_data)
sys.stderr.write(
"\t d_gc_data : %s, %s\n"
% (d_gc_data, d_gc_data.as_buffer(sys.getsizeof(gc_data)))
)
d_map_data = cuda.mem_alloc(map_data.nbytes)
cuda.memcpy_htod(d_map_data, map_data)
sys.stderr.write(
"\t d_map_data : %s, %s\n"
% (d_map_data, d_map_data.as_buffer(sys.getsizeof(map_data)))
)
# Calculer la taille de la grille et de bloc pour CUDA
block_size = num_cores
sys.stderr.write("\t blocksize (nb de threads) = %s\n" % (num_cores))
grid_size = int((int((seq_length - window_size) / step_size) + 1) / block_size) + 1
sys.stderr.write("\t grid_size = \n")
# Initialiser les tableaux pour stocker les résultats
depth_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de depth_results\n")
depth_results_count = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de depth_results_count\n")
gc_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de gc_results\n")
map_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de map_results\n")
depth_correction_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de depth_correction_results\n")
normalize_depth_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de normalize_depth_results\n")
ratio_par_window_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de ratio_par_window\n")
z_score_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de z_score_results\n")
ratio_par_mean_ratio_results = np.zeros(
int((seq_length - window_size) / step_size) + 1, dtype=np.float32
)
sys.stderr.write("\t Definition de ratio_par_mean_ratio_results\n")
# Allouer de la mémoire pour les résultats sur le périphérique CUDA
d_depth_results = cuda.mem_alloc(depth_results.nbytes)
sys.stderr.write(
"\t d_depth_results = %s\n"
% d_depth_results.as_buffer(sys.getsizeof(d_depth_results))
)
sys.stderr.write("\t depth_results.nbytes = %s\n" % depth_results.nbytes)
d_depth_results_count = cuda.mem_alloc(depth_results_count.nbytes)
sys.stderr.write(
"\t d_depth_results_count = %s\n"
% d_depth_results_count.as_buffer(sys.getsizeof(d_depth_results_count))
)
sys.stderr.write("\t depth_results_count.nbytes = %s\n" % depth_results_count.nbytes)
d_gc_results = cuda.mem_alloc(gc_results.nbytes)
sys.stderr.write(
"\t d_gc_results = %s\n" % d_gc_results.as_buffer(sys.getsizeof(d_gc_results))
)
sys.stderr.write("\t gc_results.nbytes = %s\n" % gc_results.nbytes)
d_map_results = cuda.mem_alloc(map_results.nbytes)
sys.stderr.write(
"\t d_map_results = %s\n"
% d_map_results.as_buffer(sys.getsizeof(d_map_results))
)
sys.stderr.write("\t map_results.nbytes = %s\n" % map_results.nbytes)
d_depth_correction_results = cuda.mem_alloc(depth_correction_results.nbytes)
sys.stderr.write(
"\t d_depth_correction_results = %s\n"
% d_depth_correction_results.as_buffer(
sys.getsizeof(d_depth_correction_results)
)
)
sys.stderr.write(
"\t depth_correction_results.nbytes = %s\n" % depth_correction_results.nbytes
)
d_normalize_depth_results = cuda.mem_alloc(normalize_depth_results.nbytes)
sys.stderr.write(
"\t d_normalize_depth_results = %s\n"
% d_normalize_depth_results.as_buffer(sys.getsizeof(d_normalize_depth_results))
)
sys.stderr.write(
"\t normalize_depth_results.nbytes = %s\n" % normalize_depth_results.nbytes
)
d_ratio_par_window_results = cuda.mem_alloc(ratio_par_window_results.nbytes)
sys.stderr.write(
"\t d_ratio_par_window_results = %s\n"
% d_ratio_par_window_results.as_buffer(
sys.getsizeof(d_ratio_par_window_results)
)
)
sys.stderr.write(
"\t ratio_par_window_results.nbytes = %s\n" % ratio_par_window_results.nbytes
)
d_z_score_results = cuda.mem_alloc(z_score_results.nbytes)
sys.stderr.write(
"\t d_z_score_results = %s\n"
% d_z_score_results.as_buffer(sys.getsizeof(d_z_score_results))
)
sys.stderr.write("\t z_score_results.nbytes = %s\n" % z_score_results.nbytes)
d_ratio_par_mean_ratio_results = cuda.mem_alloc(ratio_par_mean_ratio_results.nbytes)
sys.stderr.write(
"\t d_ratio_par_mean_ratio_results = %s\n"
% d_ratio_par_mean_ratio_results.as_buffer(
sys.getsizeof(d_ratio_par_mean_ratio_results)
)
)
sys.stderr.write(
"\t ratio_par_mean_ratio_results.nbytes = %s\n"
% ratio_par_mean_ratio_results.nbytes
)
# Appeler la fonction de calcul de profondeur avec CUDA
calcul_depth_kernel_cuda(
d_depth_data,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_depth_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction calcul_depth_kernel_cuda\n")
#calcul_depth_count_kernel_cuda(
# d_depth_data_count,
# np.int32(seq_length),
# np.int32(window_size),
#np.int32(step_size),
#d_depth_results,
#block=(block_size, 1, 1),
#grid=(grid_size, 1),
#)
#sys.stderr.write("\t appel fonction calcul_depth_count_kernel_cuda\n")
calcul_gc_kernel_cuda(
d_gc_data,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_gc_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction calcul_gc_kernel_cuda\n")
calcul_map_kernel_cuda(
d_map_data,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_map_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction calcul_map_kernel_cuda\n")
calcul_depth_correction_kernel_cuda(
d_depth_results,
d_map_results,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_depth_correction_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction calcul_depth_correction_kernel_cuda\n")
context.synchronize()
# Copier les résultats depuis le périphérique CUDA vers l'hôte
cuda.memcpy_dtoh(depth_results, d_depth_results) # cuda.memcpy_dtoh(dest, src)
sys.stderr.write(
"\t Copie les resultats du GPU (d_depth_results) vers le CPU (depth_results)\n"
)
#cuda.memcpy_dtoh(depth_results_count, d_depth_results_count)
#sys.stderr.write(
# "\t Copie les resultats du GPU (d_depth_results_count) vers le CPU (depth_results_count)\n"
#)
cuda.memcpy_dtoh(gc_results, d_gc_results) # cuda.memcpy_dtoh(dest, src)
sys.stderr.write(
"\t Copie les resultats du GPU (d_gc_results) vers le CPU (gc_results)\n"
)
cuda.memcpy_dtoh(map_results, d_map_results) # cuda.memcpy_dtoh(dest, src)
sys.stderr.write(
"\t Copie les resultats du GPU (d_map_results) vers le CPU (map_results)\n"
)
cuda.memcpy_dtoh(
depth_correction_results, d_depth_correction_results
) # cuda.memcpy_dtoh(dest, src)
sys.stderr.write(
"\t Copie les resultats du GPU (d_depth_correction_results) vers le CPU (depth_correction_results)\n"
)
### NORMALISATION###
# Appel fonctions medianes
sys.stderr.write("\t appel fonctions calcul medianes\n")
m = calcul_med_total(depth_correction_results, chr)
gc_to_median = calcul_med_same_gc(gc_results, depth_correction_results, chr)
# Convertir gc_to_median en un tableau NumPy pour le transfert vers CUDA
sys.stderr.write("\t Conversion medianes en tableau numpy\n")
gc_to_median_array = np.zeros(int(max(gc_results)) + 1, dtype=np.float32)
for gc, median in gc_to_median.items():
gc_to_median_array[int(gc)] = median
# Allouer de la memoire pour gc_to_median sur le peripherique CUDA
sys.stderr.write("\t Allocation mémoire médianes GPU\n")
d_gc_to_median = cuda.mem_alloc(gc_to_median_array.nbytes)
cuda.memcpy_htod(d_gc_to_median, gc_to_median_array)
# Appeler le kernel de normalisation
normalize_depth_kernel_cuda(
d_depth_correction_results,
d_gc_results,
np.float32(m),
d_gc_to_median,
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_normalize_depth_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction normalize_depth_kernel_cuda\n")
context.synchronize()
# Copier les resultats normalises depuis le peripherique CUDA vers l'hote
cuda.memcpy_dtoh(normalize_depth_results, d_normalize_depth_results)
### Ratio par window###
# Appel fonction moyenne
sys.stderr.write("\t appel fonction calcul moyenne\n")
mean_chr = calcul_moy_totale(normalize_depth_results, chr)
# Appeler le kernel de ratio
ratio_par_window_kernel_cuda(
d_normalize_depth_results,
np.float32(mean_chr),
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_ratio_par_window_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction ratio_par_window_kernel_cuda\n")
context.synchronize()
# Copier les resultats ratio depuis le peripherique CUDA vers l'hote
cuda.memcpy_dtoh(ratio_par_window_results, d_ratio_par_window_results)
# Création table à partir du ratio
mean_ratio, std_ratio, med_ratio = compute_mean_std_med(ratio_par_window_results, chr)
# Appeler le kernel de calcule du ratio divisé par le ratio moyen
ratio_par_mean_ratio_kernel_cuda(
d_ratio_par_window_results,
np.float32(mean_ratio),
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_ratio_par_mean_ratio_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction ratio_par_mean_ratio_kernel_cuda\n")
# Appeler le kernel de Z-score
z_score_kernel_cuda(
d_ratio_par_window_results,
np.float32(mean_ratio),
np.float32(std_ratio),
np.int32(seq_length),
np.int32(window_size),
np.int32(step_size),
d_z_score_results,
block=(block_size, 1, 1),
grid=(grid_size, 1),
)
sys.stderr.write("\t appel fonction z_score_kernel_cuda\n")
context.synchronize()
# Copier les resultats ratio depuis le peripherique CUDA vers l'hote
cuda.memcpy_dtoh(ratio_par_mean_ratio_results, d_ratio_par_mean_ratio_results)
cuda.memcpy_dtoh(z_score_results, d_z_score_results)
# Appel fonction create signal
create_signal(signal, chr, z_score_results, step_size)
# Appel fonction detect events
detect_events(
z_score_results,
zscore_threshold,
events,
med_ratio,
ratio_par_mean_ratio_results,
chr,
)
# Appel fonction segmentation
segmentation(events, segment, chr)
# Appel fonction display_results_vcf
display_results_vcf(sample, segment, signal, lengthFilter, output_file, chr)
#Ecrire les résultats dans le fichier de sortie
#with open(output_file, 'a') as f:
# sys.stderr.write("\t ecriture des fichiers\n")
# for i, (depth_data, depth_correction, depth_normalize_val, gc_data, map_data, ratio, ratio_mean_ratio, z_score) in enumerate(zip(depth_results, depth_correction_results, normalize_depth_results, gc_results, map_results, ratio_par_window_results, ratio_par_mean_ratio_results, z_score_results)):
# pos_start = (i * step_size) + 1
# pos_end = pos_start + window_size
# f.write(f"{chr}\t{pos_start}\t{pos_end}\t{depth_data}\t{depth_correction}\t{depth_normalize_val}\t{gc_data}\t{map_data}\t{ratio}\t{ratio_mean_ratio}\t{z_score}\n")
# Programme principal
# Calcul nombre de coeurs max pour le GPU
header_written = False
sample = get_sample_name(bamfile_path)
device = cuda.Device(0)
attributes = device.get_attributes()
num_cores = attributes[1]
print("Nombre de CPU: ", multiprocessing.cpu_count())
print(f"Nombre de coeurs max GPU: {num_cores}")
gc_file = "/work/gad/shared/pipeline/grch38/index/grch38_essential.fa"
mappability_file = "/work/gad/shared/pipeline/grch38/cnv/k100.Umap.MultiTrackMappability.bedgraph"
seq = parse_fasta(gc_file)
mappability = dico_mappabilite(mappability_file)
#bamfile_path_2 = "/work/gad/shared/analyse/test/cnvGPU/test_scalability/dijen1000.bam"
if num_chr == "ALL" :
with pysam.AlignmentFile(bamfile_path, "rb") as bamfile_handle:
for i, seq_length in enumerate(bamfile_handle.lengths):
chr = bamfile_handle.references[i]
#Exclude chrM
if chr == "chrM":
continue
sys.stderr.write("Chromosome : %s, seq length : %s\n" % (chr, seq_length))
# Appeler la fonction de calcul de la profondeur moyenne pour ce chromosome
main_calcul(
bamfile_path,
chr,
seq_length,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
sample,
)
logging.basicConfig(
filename="%s" % (logfile),
filemode="a",
level=logging.INFO,
format="%(asctime)s %(levelname)s - %(message)s",
)
logging.info("end")
else :
with pysam.AlignmentFile(bamfile_path, "rb") as bamfile_handle:
seq_length = bamfile_handle.lengths[int(num_chr) - 1]
chr = bamfile_handle.references[int(num_chr) - 1]
sys.stderr.write("Chromosome : %s, seq length : %s\n" % (chr, seq_length))
# Appeler la fonction de calcul de la profondeur moyenne pour ce chromosome
main_calcul(
bamfile_path,
chr,
seq_length,
window_size,
step_size,
zscore_threshold,
lengthFilter,
output_file,
sample,
)
logging.basicConfig(
filename="%s" % (logfile),
filemode="a",
level=logging.INFO,
format="%(asctime)s %(levelname)s - %(message)s",
)
logging.info("end")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment