%pip install wandb onnx -Uq
%pip install nbformat
Calibrating hyperparameters with weights and biases
Calibrating hyperparameters with weights and biases
Overview
- Goal: Learn how to use weights and biases to calibrate the hyperparameters of a DL model.
Weights and biases is a platform used for AI developers to track, visualize and manage their ML models and experiments. The coolest part is that W&B allows you to log various performance metrics during training, like training and validation loss, test set correlations, etc. Additionally, it allows you to compare between different experiments or versions of your models. Making it easier to identify the best performing models and see which hyperparameter configuration is the optimal.
In this notebook, we will focus on using W&B as a tool to help callibrate the hyperparameters of the TF binding prediction model to find an optimal solution. However, we encourage you to explore other applications that W&B offers.
1. Installing W&B
First install w&b in your environment with the following command, it should take only a couple of seconds
Load all libraries
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
import random
import os
import pandas as pd
import wandb
from scipy.stats import pearsonr
2. Login to w&b
If you don’t already have a w&b account, sign up here. Then, run the following command which will prompt you to insert your API key
wandb.login()
wandb: Using wandb-core as the SDK backend. Please refer to https://wandb.me/wandb-core for more information.
wandb: Currently logged in as: ssalazar_02 to https://api.wandb.ai. Use `wandb login --relogin` to force relogin
True
3. Define your regular functions
Then, define your functions, this will remain unchanged
def get_device():
"""
Determines the device to use for PyTorch computations.
Prioritizes Metal Performance Shaders (MPS), then CUDA, then CPU.
Returns:
torch.device: The selected device.
"""
if torch.backends.mps.is_available():
= torch.device("mps")
device print("Using MPS device.")
elif torch.cuda.is_available():
= torch.device("cuda")
device print("Using CUDA device.")
else:
= torch.device("cpu")
device print("Using CPU device.")
return device
# Example usage:
= get_device() device
Using MPS device.
def one_hot_encode(seq):
"""
Given a DNA sequence, return its one-hot encoding
"""
# Make sure seq has only allowed bases
= set("ACTGN")
allowed if not set(seq).issubset(allowed):
= set(seq) - allowed
invalid print(seq)
raise ValueError(f"Sequence contains chars not in allowed DNA alphabet (ACGTN): {invalid}")
# Dictionary returning one-hot encoding for each nucleotide
= {'A':[1.0,0.0,0.0,0.0],
nuc_d 'C':[0.0,1.0,0.0,0.0],
'G':[0.0,0.0,1.0,0.0],
'T':[0.0,0.0,0.0,1.0],
'N':[0.0,0.0,0.0,0.0]}
# Create array from nucleotide sequence
=np.array([nuc_d[x] for x in seq],dtype='float32')
vec
return vec
def quick_split(df, split_frac=0.8, verbose=False):
'''
Given a df of samples, randomly split indices between
train and test at the desired fraction
'''
= df.columns # original columns, use to clean up reindexed cols
cols = df.reset_index()
df
# shuffle indices
= list(range(df.shape[0]))
idxs
random.shuffle(idxs)
# split shuffled index list by split_frac
= int(len(idxs)*split_frac)
split = idxs[:split]
train_idxs = idxs[split:]
test_idxs
# split dfs and return
= df[df.index.isin(train_idxs)]
train_df = df[df.index.isin(test_idxs)]
test_df
return train_df[cols], test_df[cols]
def split_sequences(sequences_df):
= quick_split(sequences_df)
full_train_sequences, test_sequences = quick_split(full_train_sequences)
train_sequences, val_sequences print("Train:", train_sequences.shape)
print("Val:", val_sequences.shape)
print("Test:", test_sequences.shape)
return train_sequences, val_sequences, test_sequences
def get_data_tensors(scores_df, sequences_df):
# split sequences in train, validation and test sets
= split_sequences(sequences_df)
train_sequences, val_sequences, test_sequences # get scores for each set of sequences
= scores_df[train_sequences['window_name'].to_list()].transpose().values.astype('float32') # shape is (num_sequences, 300)
train_scores = scores_df[val_sequences['window_name'].to_list()].transpose().values.astype('float32')
val_scores = scores_df[test_sequences['window_name'].to_list()].transpose().values.astype('float32')
test_scores
= torch.tensor(train_scores, dtype=torch.float32).to(device)
train_scores = torch.tensor(val_scores, dtype=torch.float32).to(device)
val_scores = torch.tensor(test_scores, dtype=torch.float32).to(device)
test_scores
# get one hot encoded sequences for each set
= [one_hot_encode(seq) for seq in train_sequences['sequence'].to_list()]
train_one_hot = torch.tensor(np.stack(train_one_hot))
train_sequences_tensor
= [one_hot_encode(seq) for seq in val_sequences['sequence'].to_list()]
val_one_hot = torch.tensor(np.stack(val_one_hot))
val_sequences_tensor
= [one_hot_encode(seq) for seq in test_sequences['sequence'].to_list()]
test_one_hot = torch.tensor(np.stack(test_one_hot))
test_sequences_tensor
return train_scores, train_sequences_tensor, val_scores, val_sequences_tensor, test_scores, test_sequences_tensor
def create_dataloader(predictors, targets, batch_size, is_train = True):
'''
features: one hot encoded sequences
targets: sequence scores
batch_size
is_train: if True, data is reshuffled at every epoch
'''
= torch.utils.data.TensorDataset(predictors, targets)
dataset return torch.utils.data.DataLoader(dataset, batch_size, shuffle = is_train)
class DNA_CNN(nn.Module):
def __init__(self,
seq_len,=16,
num_filters=10,
kernel_size=False):
add_sigmoidsuper().__init__()
self.seq_len = seq_len
self.add_sigmoid = add_sigmoid
# Define layers individually
self.conv = nn.Conv1d(in_channels = 4, out_channels = num_filters, kernel_size=kernel_size)
self.relu = nn.ReLU(inplace=True)
self.linear = nn.Linear(num_filters*(seq_len-kernel_size+1), 300)
self.sigmoid = nn.Sigmoid()
def forward(self, xb):
# reshape view to batch_size x 4channel x seq_len
# permute to put channel in correct order
= xb.permute(0,2,1) # (batch_size, 300, 4) to (batch_size, 4, 300)
xb
# Apply layers step by step
= self.conv(xb)
x = self.relu(x)
x = x.flatten(1) # flatten all dimensions except batch
x = self.linear(x)
out
if self.add_sigmoid:
= self.sigmoid(out)
out return out
def process_batch(model, loss_func, x_batch, y_batch, opt=None):
= model(x_batch.to(torch.float32))
xb_out
= loss_func(xb_out, y_batch)
loss
if opt is not None: # backpropagate if train step (optimizer given)
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(), len(x_batch)
def train_epoch(model, train_dl, loss_func, device, opt):
model.train()= [] # train losses
tl = [] # batch sizes, n
ns
# loop through batches
for x_batch, y_batch in train_dl:
= x_batch.to(device),y_batch.to(device)
x_batch, y_batch
= process_batch(model, loss_func, x_batch, y_batch, opt=opt)
t, n
# collect train loss and batch sizes
tl.append(t)
ns.append(n)
# average the losses over all batches
= np.sum(np.multiply(tl, ns)) / np.sum(ns)
train_loss
return train_loss
def val_epoch(model, val_dl, loss_func, device):
# Set model to Evaluation mode
eval()
model.with torch.no_grad():
= [] # val losses
vl = [] # batch sizes, n
ns
# loop through validation DataLoader
for x_batch, y_batch in val_dl:
= x_batch.to(device),y_batch.to(device)
x_batch, y_batch
= process_batch(model, loss_func, x_batch, y_batch)
v, n
# collect val loss and batch sizes
vl.append(v)
ns.append(n)
# average the losses over all batches
= np.sum(np.multiply(vl, ns)) / np.sum(ns)
val_loss
return val_loss
4. Modify the train_loop()
function
The only function that we need to change is the train_loop
function, because here is where we are recovering the parameters that we want to track with w&b. We will use wandb.log
and create a dictionary with the parmeters that we want to track.
def train_loop(epochs, model, loss_func, opt, train_dl, val_dl, device):
# keep track of losses
= []
train_losses = []
val_losses
# loop through epochs
for epoch in range(epochs):
# take a training step
= train_epoch(model,train_dl,loss_func,device,opt)
train_loss
train_losses.append(train_loss)
# take a validation step
= val_epoch(model,val_dl,loss_func,device)
val_loss
val_losses.append(val_loss)
print(f"Epoch {epoch + 1} | train loss: {train_loss:.3f} | val loss: {val_loss:.3f}")
"epoch": epoch + 1,
wandb.log({"train_loss": train_loss,
"val_loss": val_loss})
return train_losses, val_losses
Defining the train_model
function. We omit the plot_curves
function since all performance metrics will be tracked on w&b.
def train_model(train_dl,val_dl,model,device, lr=0.01, epochs=50, lossf=None,opt=None):
# define optimizer
if opt:
= opt(model.parameters(), lr=lr)
optimizer else: # if no opt provided, just use SGD
= torch.optim.SGD(model.parameters(), lr=lr)
optimizer
# define loss function
if lossf:
= lossf
loss_func else: # if no loss function provided, just use MSE
= torch.nn.MSELoss()
loss_func
# run the training loop
= train_loop(
train_losses, val_losses
epochs,
model,
loss_func,
optimizer,
train_dl,
val_dl,
device)
5. Also track the correlation metrics of the test set
As a way to evaluate each model, let’s modify the test_model()
function so that wand also keeps track of the performance metrics. In this case the metrics are pearson_per_sample
, test_pearson_r
and best_test
.
def test_model(model, test_features, test_targets):
eval()
model.= model(test_features.to(torch.float32).to(device)).detach().cpu().numpy()
predictions = test_targets.cpu().numpy()
observations = np.array([pearsonr(predictions[i], observations[i])[0] for i in range(300)])
pearson_per_sample = pearson_per_sample.mean()
test_pearsonr = pearson_per_sample.max()
best_test 'test_avg_pearsonr': test_pearsonr,
wandb.log({'beast_pearsonr': best_test})
6. Define the sweep
configuration
A sweep
is the training and testing of a single model with a given configuration of hyperparameters. with wandb.sweep
we define the set of hyperparameters to test, which will be then combined in different configurations each sweep.
= {
sweep_config 'method': 'random',
'metric': {'name': 'test_avg_pearsonr', 'goal': 'maximize'},
'parameters': {
'num_filters': {'values': [4, 16]},
'kernel_size': {'values': [5, 10]},
'add_sigmoid': {'values': [True, False],},
'learning_rate':{'values':[0.1, 0.05]},
'batch_size': {'values':[16, 32, 64]},
'optimizer': {'values': ['SGD','Adam']}
} }
Create a project ID for your model, all your tests will be saved in this project
= wandb.sweep(sweep_config, project="DNA_model") sweep_id
Create sweep with ID: yfwcc62j
Sweep URL: https://wandb.ai/ssalazar_02/DNA_model/sweeps/yfwcc62j
7. Get the training, val and test sets ready for training
= '/Users/sofiasalazar/Library/CloudStorage/Box-Box/imlab-data/Courses/AI-in-Genomics-2025/data/'
DIR = pd.read_csv(os.path.join(DIR, 'chr22_sequences.txt.gz'), sep="\t", compression='gzip')
sequences = pd.read_csv(os.path.join(DIR, 'chr22_scores.txt.gz'), sep="\t", compression='gzip',dtype='float32') scores
= get_data_tensors(scores, sequences) train_scores, train_sequences_tensor, val_scores, val_sequences_tensor, test_scores, test_sequences_tensor
Train: (14808, 2)
Val: (3703, 2)
Test: (4628, 2)
8. Initialize the sweep
With wandb.init
we initialize one sweep and what we want to do in each. This consists on
Loading a configuration of hyperparameters with
wandb.config
Loading the model
Telling wandb to track the training with
wandb.watch
Create the dataloaders
Train and test the model
def train_sweep():
with wandb.init(project = "DNA_model"):
= wandb.config
config = DNA_CNN(seq_len=300, num_filters=config.num_filters, kernel_size=config.kernel_size, add_sigmoid=config.add_sigmoid).to(device)
model ="all", log_freq=10) # log all: logs all gradients and parameters, every log_freq number of training steps (batches)
wandb.watch(model, log= create_dataloader(train_sequences_tensor, train_scores, batch_size=config.batch_size)
train_loader = create_dataloader(val_sequences_tensor, val_scores, batch_size=config.batch_size, is_train=False)
val_loader if config.optimizer == 'SGD':
= torch.optim.SGD
opt else: opt = torch.optim.Adam
=30, lr = config.learning_rate, opt=opt)
train_model(train_loader, val_loader, model, device, epochs test_model(model, test_sequences_tensor, test_scores)
Finally, we train with wandb.agent
, the argument count
is the number of combinations of hyperparameters I want to try. The maximum in my case is 240 combinations
# wandb.agent(sweep_id, train_sweep, count=240)
=6) wandb.agent(sweep_id, train_sweep, count
wandb: Agent Starting Run: 0un6by39 with config:
wandb: add_sigmoid: True
wandb: batch_size: 16
wandb: kernel_size: 10
wandb: learning_rate: 0.1
wandb: num_filters: 4
wandb: optimizer: SGD
/Users/sofiasalazar/Desktop/IM_lab/DNA_model/wandb/run-20250407_151021-0un6by39
Sweep page: https://wandb.ai/ssalazar_02/DNA_model/sweeps/yfwcc62j
Epoch 1 | train loss: 2.423 | val loss: 2.452
Epoch 2 | train loss: 2.417 | val loss: 2.447
Epoch 3 | train loss: 2.402 | val loss: 2.417
Epoch 4 | train loss: 2.352 | val loss: 2.363
Epoch 5 | train loss: 2.314 | val loss: 2.344
Epoch 6 | train loss: 2.297 | val loss: 2.334
Epoch 7 | train loss: 2.287 | val loss: 2.328
Epoch 8 | train loss: 2.279 | val loss: 2.324
Epoch 9 | train loss: 2.273 | val loss: 2.321
Epoch 10 | train loss: 2.267 | val loss: 2.318
Epoch 11 | train loss: 2.262 | val loss: 2.317
Epoch 12 | train loss: 2.257 | val loss: 2.313
Epoch 13 | train loss: 2.252 | val loss: 2.311
Epoch 14 | train loss: 2.246 | val loss: 2.311
Epoch 15 | train loss: 2.240 | val loss: 2.308
Epoch 16 | train loss: 2.235 | val loss: 2.305
Epoch 17 | train loss: 2.229 | val loss: 2.303
Epoch 18 | train loss: 2.223 | val loss: 2.301
Epoch 19 | train loss: 2.217 | val loss: 2.301
Epoch 20 | train loss: 2.211 | val loss: 2.299
Epoch 21 | train loss: 2.205 | val loss: 2.298
Epoch 22 | train loss: 2.199 | val loss: 2.296
Epoch 23 | train loss: 2.193 | val loss: 2.295
Epoch 24 | train loss: 2.187 | val loss: 2.295
Epoch 25 | train loss: 2.181 | val loss: 2.294
Epoch 26 | train loss: 2.176 | val loss: 2.294
Epoch 27 | train loss: 2.170 | val loss: 2.293
Epoch 28 | train loss: 2.165 | val loss: 2.293
Epoch 29 | train loss: 2.160 | val loss: 2.292
Epoch 30 | train loss: 2.155 | val loss: 2.292
Run history:
beast_pearsonr | ▁ |
epoch | ▁▁▁▂▂▂▂▃▃▃▃▄▄▄▄▅▅▅▅▆▆▆▆▇▇▇▇███ |
test_avg_pearsonr | ▁ |
train_loss | ██▇▆▅▅▄▄▄▄▄▄▄▃▃▃▃▃▃▂▂▂▂▂▂▂▁▁▁▁ |
val_loss | ██▆▄▃▃▃▂▂▂▂▂▂▂▂▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁ |
Run summary:
beast_pearsonr | 0.68177 |
epoch | 30 |
test_avg_pearsonr | 0.23191 |
train_loss | 2.15465 |
val_loss | 2.29175 |
View project at: https://wandb.ai/ssalazar_02/DNA_model
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)
./wandb/run-20250407_151021-0un6by39/logs
wandb: Agent Starting Run: bt4sqsaj with config:
wandb: add_sigmoid: False
wandb: batch_size: 16
wandb: kernel_size: 10
wandb: learning_rate: 0.1
wandb: num_filters: 4
wandb: optimizer: SGD
/Users/sofiasalazar/Desktop/IM_lab/DNA_model/wandb/run-20250407_151218-bt4sqsaj
Sweep page: https://wandb.ai/ssalazar_02/DNA_model/sweeps/yfwcc62j
Epoch 1 | train loss: 2.387 | val loss: 2.354
Epoch 2 | train loss: 2.291 | val loss: 2.311
Epoch 3 | train loss: 2.259 | val loss: 2.314
Epoch 4 | train loss: 2.244 | val loss: 2.289
Epoch 5 | train loss: 2.229 | val loss: 2.259
Epoch 6 | train loss: 2.128 | val loss: 2.137
Epoch 7 | train loss: 2.045 | val loss: 2.091
Epoch 8 | train loss: 1.995 | val loss: 2.044
Epoch 9 | train loss: 1.934 | val loss: 1.987
Epoch 10 | train loss: 1.873 | val loss: 1.942
Epoch 11 | train loss: 1.815 | val loss: 1.886
Epoch 12 | train loss: 1.757 | val loss: 1.846
Epoch 13 | train loss: 1.708 | val loss: 1.807
Epoch 14 | train loss: 1.666 | val loss: 1.774
Epoch 15 | train loss: 1.632 | val loss: 1.746
Epoch 16 | train loss: 1.602 | val loss: 1.724
Epoch 17 | train loss: 1.576 | val loss: 1.702
Epoch 18 | train loss: 1.552 | val loss: 1.684
Epoch 19 | train loss: 1.530 | val loss: 1.667
Epoch 20 | train loss: 1.509 | val loss: 1.650
Epoch 21 | train loss: 1.489 | val loss: 1.637
Epoch 22 | train loss: 1.473 | val loss: 1.635
Epoch 23 | train loss: 1.458 | val loss: 1.622
Epoch 24 | train loss: 1.446 | val loss: 1.608
Epoch 25 | train loss: 1.435 | val loss: 1.603
Epoch 26 | train loss: 1.425 | val loss: 1.599
Epoch 27 | train loss: 1.416 | val loss: 1.593
Epoch 28 | train loss: 1.407 | val loss: 1.586
Epoch 29 | train loss: 1.397 | val loss: 1.584
Epoch 30 | train loss: 1.388 | val loss: 1.580
Run history:
beast_pearsonr | ▁ |
epoch | ▁▁▁▂▂▂▂▃▃▃▃▄▄▄▄▅▅▅▅▆▆▆▆▇▇▇▇███ |
test_avg_pearsonr | ▁ |
train_loss | █▇▇▇▇▆▆▅▅▄▄▄▃▃▃▃▂▂▂▂▂▂▁▁▁▁▁▁▁▁ |
val_loss | ███▇▇▆▆▅▅▄▄▃▃▃▃▂▂▂▂▂▂▁▁▁▁▁▁▁▁▁ |
Run summary:
beast_pearsonr | 0.92833 |
epoch | 30 |
test_avg_pearsonr | 0.57389 |
train_loss | 1.38804 |
val_loss | 1.57982 |
View project at: https://wandb.ai/ssalazar_02/DNA_model
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)
./wandb/run-20250407_151218-bt4sqsaj/logs
wandb: Agent Starting Run: 7ca57ikz with config:
wandb: add_sigmoid: True
wandb: batch_size: 16
wandb: kernel_size: 10
wandb: learning_rate: 0.05
wandb: num_filters: 32
wandb: optimizer: SGD
/Users/sofiasalazar/Desktop/IM_lab/DNA_model/wandb/run-20250407_151414-7ca57ikz
Sweep page: https://wandb.ai/ssalazar_02/DNA_model/sweeps/yfwcc62j
Epoch 1 | train loss: 2.423 | val loss: 2.451
Epoch 2 | train loss: 2.415 | val loss: 2.447
Epoch 3 | train loss: 2.408 | val loss: 2.436
Epoch 4 | train loss: 2.385 | val loss: 2.404
Epoch 5 | train loss: 2.345 | val loss: 2.370
Epoch 6 | train loss: 2.316 | val loss: 2.350
Epoch 7 | train loss: 2.300 | val loss: 2.340
Epoch 8 | train loss: 2.289 | val loss: 2.336
Epoch 9 | train loss: 2.281 | val loss: 2.328
Epoch 10 | train loss: 2.274 | val loss: 2.325
Epoch 11 | train loss: 2.268 | val loss: 2.323
Epoch 12 | train loss: 2.262 | val loss: 2.321
Epoch 13 | train loss: 2.257 | val loss: 2.318
Epoch 14 | train loss: 2.251 | val loss: 2.318
Epoch 15 | train loss: 2.247 | val loss: 2.316
Epoch 16 | train loss: 2.242 | val loss: 2.316
Epoch 17 | train loss: 2.237 | val loss: 2.315
Epoch 18 | train loss: 2.232 | val loss: 2.315
Epoch 19 | train loss: 2.227 | val loss: 2.313
Epoch 20 | train loss: 2.222 | val loss: 2.314
Epoch 21 | train loss: 2.217 | val loss: 2.314
Epoch 22 | train loss: 2.212 | val loss: 2.314
Epoch 23 | train loss: 2.206 | val loss: 2.313
Epoch 24 | train loss: 2.201 | val loss: 2.314
Epoch 25 | train loss: 2.196 | val loss: 2.315
Epoch 26 | train loss: 2.191 | val loss: 2.315
Epoch 27 | train loss: 2.186 | val loss: 2.315
Epoch 28 | train loss: 2.181 | val loss: 2.315
Epoch 29 | train loss: 2.176 | val loss: 2.315
Epoch 30 | train loss: 2.171 | val loss: 2.316
Run history:
beast_pearsonr | ▁ |
epoch | ▁▁▁▂▂▂▂▃▃▃▃▄▄▄▄▅▅▅▅▆▆▆▆▇▇▇▇███ |
test_avg_pearsonr | ▁ |
train_loss | ███▇▆▅▅▄▄▄▄▄▃▃▃▃▃▃▃▂▂▂▂▂▂▂▁▁▁▁ |
val_loss | ██▇▆▄▃▂▂▂▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ |
Run summary:
beast_pearsonr | 0.6957 |
epoch | 30 |
test_avg_pearsonr | 0.21384 |
train_loss | 2.17106 |
val_loss | 2.31633 |
View project at: https://wandb.ai/ssalazar_02/DNA_model
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)
./wandb/run-20250407_151414-7ca57ikz/logs
wandb: Agent Starting Run: 2pk9jgsh with config:
wandb: add_sigmoid: False
wandb: batch_size: 32
wandb: kernel_size: 10
wandb: learning_rate: 0.1
wandb: num_filters: 32
wandb: optimizer: Adam
/Users/sofiasalazar/Desktop/IM_lab/DNA_model/wandb/run-20250407_151710-2pk9jgsh
Sweep page: https://wandb.ai/ssalazar_02/DNA_model/sweeps/yfwcc62j
Epoch 1 | train loss: 8.430 | val loss: 2.453
Epoch 2 | train loss: 2.422 | val loss: 2.457
Epoch 3 | train loss: 2.423 | val loss: 2.455
Epoch 4 | train loss: 2.424 | val loss: 2.457
Epoch 5 | train loss: 2.425 | val loss: 2.457
Epoch 6 | train loss: 2.425 | val loss: 2.459
Epoch 7 | train loss: 2.427 | val loss: 2.464
Epoch 8 | train loss: 2.428 | val loss: 2.463
Epoch 9 | train loss: 2.429 | val loss: 2.461
Epoch 10 | train loss: 2.430 | val loss: 2.463
Epoch 11 | train loss: 2.429 | val loss: 2.459
Epoch 12 | train loss: 2.430 | val loss: 2.465
Epoch 13 | train loss: 2.430 | val loss: 2.461
Epoch 14 | train loss: 2.431 | val loss: 2.458
Epoch 15 | train loss: 2.430 | val loss: 2.462
Epoch 16 | train loss: 2.431 | val loss: 2.464
Epoch 17 | train loss: 2.431 | val loss: 2.463
Epoch 18 | train loss: 2.430 | val loss: 2.461
Epoch 19 | train loss: 2.431 | val loss: 2.458
Epoch 20 | train loss: 2.430 | val loss: 2.466
Epoch 21 | train loss: 2.431 | val loss: 2.466
Epoch 22 | train loss: 2.431 | val loss: 2.463
Epoch 23 | train loss: 2.431 | val loss: 2.463
Epoch 24 | train loss: 2.430 | val loss: 2.470
Epoch 25 | train loss: 2.431 | val loss: 2.460
Epoch 26 | train loss: 2.431 | val loss: 2.459
Epoch 27 | train loss: 2.431 | val loss: 2.463
Epoch 28 | train loss: 2.431 | val loss: 2.462
Epoch 29 | train loss: 2.431 | val loss: 2.462
Epoch 30 | train loss: 2.431 | val loss: 2.461
Run history:
beast_pearsonr | ▁ |
epoch | ▁▁▁▂▂▂▂▃▃▃▃▄▄▄▄▅▅▅▅▆▆▆▆▇▇▇▇███ |
test_avg_pearsonr | ▁ |
train_loss | █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ |
val_loss | ▁▃▂▂▃▄▆▅▄▅▄▆▄▃▅▅▅▄▃▆▆▅▅█▄▃▅▅▅▄ |
Run summary:
beast_pearsonr | 0.35183 |
epoch | 30 |
test_avg_pearsonr | -0.00231 |
train_loss | 2.43134 |
val_loss | 2.46144 |
View project at: https://wandb.ai/ssalazar_02/DNA_model
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)
./wandb/run-20250407_151710-2pk9jgsh/logs
wandb: Agent Starting Run: ohia2jz9 with config:
wandb: add_sigmoid: True
wandb: batch_size: 32
wandb: kernel_size: 10
wandb: learning_rate: 0.05
wandb: num_filters: 32
wandb: optimizer: Adam
/Users/sofiasalazar/Desktop/IM_lab/DNA_model/wandb/run-20250407_151916-ohia2jz9
Sweep page: https://wandb.ai/ssalazar_02/DNA_model/sweeps/yfwcc62j
Epoch 1 | train loss: 2.520 | val loss: 2.554
Epoch 2 | train loss: 2.520 | val loss: 2.556
Epoch 3 | train loss: 2.520 | val loss: 2.556
Epoch 4 | train loss: 2.520 | val loss: 2.556
Epoch 5 | train loss: 2.520 | val loss: 2.556
Epoch 6 | train loss: 2.520 | val loss: 2.556
Epoch 7 | train loss: 2.520 | val loss: 2.556
Epoch 8 | train loss: 2.520 | val loss: 2.556
Epoch 9 | train loss: 2.520 | val loss: 2.556
Epoch 10 | train loss: 2.520 | val loss: 2.556
Epoch 11 | train loss: 2.520 | val loss: 2.556
Epoch 12 | train loss: 2.520 | val loss: 2.556
Epoch 13 | train loss: 2.520 | val loss: 2.556
Epoch 14 | train loss: 2.520 | val loss: 2.556
Epoch 15 | train loss: 2.520 | val loss: 2.556
Epoch 16 | train loss: 2.520 | val loss: 2.556
Epoch 17 | train loss: 2.520 | val loss: 2.556
Epoch 18 | train loss: 2.520 | val loss: 2.556
Epoch 19 | train loss: 2.520 | val loss: 2.556
Epoch 20 | train loss: 2.520 | val loss: 2.556
Epoch 21 | train loss: 2.520 | val loss: 2.556
Epoch 22 | train loss: 2.520 | val loss: 2.556
Epoch 23 | train loss: 2.520 | val loss: 2.556
Epoch 24 | train loss: 2.520 | val loss: 2.556
Epoch 25 | train loss: 2.520 | val loss: 2.556
Epoch 26 | train loss: 2.520 | val loss: 2.556
Epoch 27 | train loss: 2.520 | val loss: 2.556
Epoch 28 | train loss: 2.520 | val loss: 2.556
Epoch 29 | train loss: 2.520 | val loss: 2.556
Epoch 30 | train loss: 2.520 | val loss: 2.556
Run history:
beast_pearsonr | ▁ |
epoch | ▁▁▁▂▂▂▂▃▃▃▃▄▄▄▄▅▅▅▅▆▆▆▆▇▇▇▇███ |
test_avg_pearsonr | ▁ |
train_loss | ▁█████████████████████████████ |
val_loss | ▁█████████████████████████████ |
Run summary:
beast_pearsonr | 0.20144 |
epoch | 30 |
test_avg_pearsonr | -0.00629 |
train_loss | 2.52015 |
val_loss | 2.5559 |
View project at: https://wandb.ai/ssalazar_02/DNA_model
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)
./wandb/run-20250407_151916-ohia2jz9/logs
wandb: Agent Starting Run: gmg0gdbw with config:
wandb: add_sigmoid: False
wandb: batch_size: 64
wandb: kernel_size: 10
wandb: learning_rate: 0.1
wandb: num_filters: 16
wandb: optimizer: SGD
/Users/sofiasalazar/Desktop/IM_lab/DNA_model/wandb/run-20250407_152128-gmg0gdbw
Sweep page: https://wandb.ai/ssalazar_02/DNA_model/sweeps/yfwcc62j
Epoch 1 | train loss: 2.424 | val loss: 2.440
Epoch 2 | train loss: 2.370 | val loss: 2.371
Epoch 3 | train loss: 2.307 | val loss: 2.336
Epoch 4 | train loss: 2.278 | val loss: 2.320
Epoch 5 | train loss: 2.260 | val loss: 2.312
Epoch 6 | train loss: 2.245 | val loss: 2.304
Epoch 7 | train loss: 2.231 | val loss: 2.297
Epoch 8 | train loss: 2.216 | val loss: 2.294
Epoch 9 | train loss: 2.202 | val loss: 2.290
Epoch 10 | train loss: 2.185 | val loss: 2.285
Epoch 11 | train loss: 2.168 | val loss: 2.281
Epoch 12 | train loss: 2.148 | val loss: 2.271
Epoch 13 | train loss: 2.119 | val loss: 2.255
Epoch 14 | train loss: 2.080 | val loss: 2.228
Epoch 15 | train loss: 2.039 | val loss: 2.208
Epoch 16 | train loss: 2.001 | val loss: 2.185
Epoch 17 | train loss: 1.966 | val loss: 2.169
Epoch 18 | train loss: 1.933 | val loss: 2.157
Epoch 19 | train loss: 1.903 | val loss: 2.140
Epoch 20 | train loss: 1.875 | val loss: 2.126
Epoch 21 | train loss: 1.848 | val loss: 2.113
Epoch 22 | train loss: 1.822 | val loss: 2.107
Epoch 23 | train loss: 1.799 | val loss: 2.098
Epoch 24 | train loss: 1.776 | val loss: 2.092
Epoch 25 | train loss: 1.755 | val loss: 2.086
Epoch 26 | train loss: 1.736 | val loss: 2.081
Epoch 27 | train loss: 1.717 | val loss: 2.075
Epoch 28 | train loss: 1.699 | val loss: 2.075
Epoch 29 | train loss: 1.682 | val loss: 2.069
Epoch 30 | train loss: 1.666 | val loss: 2.068
Run history:
beast_pearsonr | ▁ |
epoch | ▁▁▁▂▂▂▂▃▃▃▃▄▄▄▄▅▅▅▅▆▆▆▆▇▇▇▇███ |
test_avg_pearsonr | ▁ |
train_loss | ██▇▇▆▆▆▆▆▆▆▅▅▅▄▄▄▃▃▃▃▂▂▂▂▂▁▁▁▁ |
val_loss | █▇▆▆▆▅▅▅▅▅▅▅▅▄▄▃▃▃▂▂▂▂▂▁▁▁▁▁▁▁ |
Run summary:
beast_pearsonr | 0.80236 |
epoch | 30 |
test_avg_pearsonr | 0.38552 |
train_loss | 1.66555 |
val_loss | 2.06831 |
View project at: https://wandb.ai/ssalazar_02/DNA_model
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)
./wandb/run-20250407_152128-gmg0gdbw/logs