things
This commit is contained in:
6
main.py
6
main.py
@@ -18,11 +18,11 @@ train_dataset, test_dataset = get_dataset()
|
||||
|
||||
device = "cuda:1"
|
||||
|
||||
batch_size = 8 * 4 * 2
|
||||
batch_size = 32
|
||||
accumulation_steps = 2
|
||||
total_epoch = 500
|
||||
|
||||
steps_per_epoch = len(train_dataset) // batch_size
|
||||
steps_per_epoch = len(train_dataset) // (batch_size)
|
||||
total_steps = steps_per_epoch * total_epoch
|
||||
warmup_steps = int(0.05 * total_steps)
|
||||
|
||||
@@ -160,7 +160,7 @@ for epoch in range(start_epoch, total_epoch):
|
||||
)
|
||||
wandb.log({"viz/decoded": images})
|
||||
|
||||
psnr, ssim, lpips = benchmark(image.cpu(), original.cpu())
|
||||
psnr, ssim, lpips, flawed_lpips = benchmark(image.cpu(), original.cpu())
|
||||
psnr_sum += psnr.sum().item()
|
||||
ssim_sum += ssim.sum().item()
|
||||
lpips_sum += lpips.sum().item()
|
||||
|
||||
@@ -22,7 +22,11 @@ dependencies = [
|
||||
"tqdm>=4.67.1",
|
||||
"transformers>=4.56.2",
|
||||
"wandb[media]>=0.22.0",
|
||||
"flash-attn"
|
||||
"flash-attn",
|
||||
"accelerate>=1.11.0",
|
||||
"hdit>=0.0.1b6",
|
||||
"lovely-tensors>=0.1.19",
|
||||
"natten==0.21.1+torch280cu128",
|
||||
]
|
||||
|
||||
[tool.uv.extra-build-dependencies]
|
||||
|
||||
@@ -7,6 +7,7 @@ from torchmetrics.image import (
|
||||
|
||||
psnr = PeakSignalNoiseRatio(1.0, reduction="none", dim=(1, 2, 3))
|
||||
lp = lpips.LPIPS(net="alex")
|
||||
flawed_lp = lpips.LPIPS(net="alex")
|
||||
|
||||
|
||||
def benchmark(image1, image2):
|
||||
@@ -19,4 +20,5 @@ def benchmark(image1, image2):
|
||||
size_average=False,
|
||||
),
|
||||
lp(image1 * 2 - 1, image2 * 2 - 1),
|
||||
flawed_lp(image1 * 255, image2 * 255),
|
||||
)
|
||||
|
||||
@@ -2,9 +2,11 @@ import os
|
||||
from pathlib import Path
|
||||
|
||||
from datasets import Dataset, DatasetDict, Image
|
||||
from src.dataset.preprocess import make_transform
|
||||
|
||||
from src.dataset.preprocess import make_nir_transform, make_transform
|
||||
|
||||
transform = make_transform(256)
|
||||
nir_transform = make_nir_transform(256)
|
||||
|
||||
|
||||
def get_dataset() -> tuple[Dataset, Dataset]:
|
||||
@@ -13,30 +15,43 @@ def get_dataset() -> tuple[Dataset, Dataset]:
|
||||
return dataset["train"], dataset["test"]
|
||||
|
||||
data_dir = Path("/data2/C-CUHK/CUHK-CR2")
|
||||
nir_dir = Path("/data2/C-CUHK/nir/CUHK-CR2")
|
||||
|
||||
train_cloud = sorted((data_dir / "train/cloud").glob("*.png"))
|
||||
train_cloud_nir = sorted((nir_dir / "train/cloud").glob("*.png"))
|
||||
train_no_cloud = sorted((data_dir / "train/label").glob("*.png"))
|
||||
train_no_cloud_nir = sorted((nir_dir / "train/label").glob("*.png"))
|
||||
test_cloud = sorted((data_dir / "test/cloud").glob("*.png"))
|
||||
test_cloud_nir = sorted((nir_dir / "test/cloud").glob("*.png"))
|
||||
test_no_cloud = sorted((data_dir / "test/label").glob("*.png"))
|
||||
test_no_cloud_nir = sorted((nir_dir / "test/label").glob("*.png"))
|
||||
|
||||
dataset = DatasetDict(
|
||||
{
|
||||
"train": Dataset.from_dict(
|
||||
{
|
||||
"cloud": [str(p) for p in train_cloud],
|
||||
"cloud_nir": [str(p) for p in train_cloud_nir],
|
||||
"label": [str(p) for p in train_no_cloud],
|
||||
"label_nir": [str(p) for p in train_no_cloud_nir],
|
||||
}
|
||||
)
|
||||
.cast_column("cloud", Image())
|
||||
.cast_column("label", Image()),
|
||||
.cast_column("label", Image())
|
||||
.cast_column("cloud_nir", Image())
|
||||
.cast_column("label_nir", Image()),
|
||||
"test": Dataset.from_dict(
|
||||
{
|
||||
"cloud": [str(p) for p in test_cloud],
|
||||
"cloud_nir": [str(p) for p in test_cloud_nir],
|
||||
"label": [str(p) for p in test_no_cloud],
|
||||
"label_nir": [str(p) for p in test_no_cloud_nir],
|
||||
}
|
||||
)
|
||||
.cast_column("cloud", Image())
|
||||
.cast_column("label", Image()),
|
||||
.cast_column("label", Image())
|
||||
.cast_column("cloud_nir", Image())
|
||||
.cast_column("label_nir", Image()),
|
||||
}
|
||||
)
|
||||
dataset = dataset.map(
|
||||
@@ -45,7 +60,7 @@ def get_dataset() -> tuple[Dataset, Dataset]:
|
||||
batch_size=32,
|
||||
remove_columns=dataset["train"].column_names,
|
||||
)
|
||||
dataset.set_format(type="torch", columns=["cloud", "gt"])
|
||||
dataset.set_format(type="torch", columns=["cloud", "gt", "cloud_nir", "gt_nir"])
|
||||
dataset.save_to_disk("datasets/CUHK-CR2")
|
||||
|
||||
return dataset["train"], dataset["test"]
|
||||
@@ -54,9 +69,25 @@ def get_dataset() -> tuple[Dataset, Dataset]:
|
||||
def preprocess_function(examples):
|
||||
x0_list = []
|
||||
x1_list = []
|
||||
for x0_img, x1_img in zip(examples["cloud"], examples["label"]):
|
||||
x0_nir_list = []
|
||||
x1_nir_list = []
|
||||
for x0_img, x1_img, x0_nir, x1_nir in zip(
|
||||
examples["cloud"],
|
||||
examples["label"],
|
||||
examples["cloud_nir"],
|
||||
examples["label_nir"],
|
||||
):
|
||||
x0_transformed = transform(x0_img)
|
||||
x1_transformed = transform(x1_img)
|
||||
x0_nir = nir_transform(x0_nir)
|
||||
x1_nir = nir_transform(x1_nir)
|
||||
x0_list.append(x0_transformed)
|
||||
x1_list.append(x1_transformed)
|
||||
return {"cloud": x0_list, "gt": x1_list}
|
||||
x0_nir_list.append(x0_nir)
|
||||
x1_nir_list.append(x1_nir)
|
||||
return {
|
||||
"cloud": x0_list,
|
||||
"gt": x1_list,
|
||||
"cloud_nir": x0_nir_list,
|
||||
"gt_nir": x1_nir_list,
|
||||
}
|
||||
|
||||
@@ -14,13 +14,20 @@ def make_transform(resize_size: int = 256):
|
||||
return v2.Compose([to_tensor, resize, to_float, normalize])
|
||||
|
||||
|
||||
def make_nir_transform(resize_size: int = 256):
|
||||
to_tensor = v2.ToImage()
|
||||
resize = v2.Resize((resize_size, resize_size), antialias=True)
|
||||
to_float = v2.ToDtype(torch.float32, scale=True)
|
||||
return v2.Compose([to_tensor, v2.Grayscale(), resize, to_float])
|
||||
|
||||
|
||||
def denormalize(tensor: torch.Tensor) -> torch.Tensor:
|
||||
mean = torch.tensor([0.430, 0.411, 0.296]).view(3, 1, 1).to(tensor.device)
|
||||
std = torch.tensor([0.213, 0.156, 0.143]).view(3, 1, 1).to(tensor.device)
|
||||
return tensor * std + mean
|
||||
return tensor[:, :3] * std + mean
|
||||
|
||||
|
||||
def normalize(tensor: torch.Tensor) -> torch.Tensor:
|
||||
mean = torch.tensor([0.430, 0.411, 0.296]).view(3, 1, 1).to(tensor.device)
|
||||
std = torch.tensor([0.213, 0.156, 0.143]).view(3, 1, 1).to(tensor.device)
|
||||
return (tensor - mean) / std
|
||||
return (tensor[:, :3] - mean) / std
|
||||
|
||||
@@ -165,7 +165,10 @@ class DINOv3ViTRopePositionEmbedding(nn.Module):
|
||||
)
|
||||
|
||||
angles = (
|
||||
2 * math.pi * patch_coords[:, :, None] * self.inv_freq[None, None, :] # type: ignore
|
||||
2
|
||||
* math.pi
|
||||
* patch_coords[:, :, None]
|
||||
* self.inv_freq[None, None, :].to(patch_coords.device) # type: ignore
|
||||
)
|
||||
angles = angles.flatten(1, 2)
|
||||
angles = angles.tile(2)
|
||||
|
||||
0
src/model/hdit.py
Normal file
0
src/model/hdit.py
Normal file
@@ -1,4 +1,5 @@
|
||||
import math
|
||||
from functools import lru_cache
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
@@ -260,22 +261,113 @@ class DinoV3ViTDecoder(nn.Module):
|
||||
# return x
|
||||
|
||||
|
||||
class NerfEmbedder(nn.Module):
|
||||
def __init__(self, in_channels, hidden_size_input, max_freqs):
|
||||
super().__init__()
|
||||
self.max_freqs = max_freqs
|
||||
self.hidden_size_input = hidden_size_input
|
||||
self.embedder = nn.Sequential(
|
||||
nn.Linear(in_channels + max_freqs**2, hidden_size_input, bias=True),
|
||||
)
|
||||
|
||||
@lru_cache
|
||||
def fetch_pos(self, patch_size, device, dtype):
|
||||
pos_x = torch.linspace(0, 1, patch_size, device=device, dtype=dtype)
|
||||
pos_y = torch.linspace(0, 1, patch_size, device=device, dtype=dtype)
|
||||
pos_y, pos_x = torch.meshgrid(pos_y, pos_x, indexing="ij")
|
||||
pos_x = pos_x.reshape(-1, 1, 1)
|
||||
pos_y = pos_y.reshape(-1, 1, 1)
|
||||
|
||||
freqs = torch.linspace(
|
||||
0, self.max_freqs, self.max_freqs, dtype=dtype, device=device
|
||||
)
|
||||
freqs_x = freqs[None, :, None]
|
||||
freqs_y = freqs[None, None, :]
|
||||
coeffs = (1 + freqs_x * freqs_y) ** -1
|
||||
dct_x = torch.cos(pos_x * freqs_x * torch.pi)
|
||||
dct_y = torch.cos(pos_y * freqs_y * torch.pi)
|
||||
dct = (dct_x * dct_y * coeffs).view(1, -1, self.max_freqs**2)
|
||||
return dct
|
||||
|
||||
def forward(self, inputs):
|
||||
target_dtype = self.embedder[0].weight.dtype
|
||||
inputs = inputs.to(dtype=target_dtype)
|
||||
B, P2, C = inputs.shape
|
||||
patch_size = int(P2**0.5)
|
||||
device = inputs.device
|
||||
dtype = inputs.dtype
|
||||
dct = self.fetch_pos(patch_size, device, dtype)
|
||||
dct = dct.repeat(B, 1, 1)
|
||||
inputs = torch.cat([inputs, dct], dim=-1)
|
||||
inputs = self.embedder(inputs)
|
||||
return inputs
|
||||
|
||||
|
||||
class NerfBlock(nn.Module):
|
||||
def __init__(self, hidden_size_s: int, hidden_size_x: int, mlp_ratio: int = 4):
|
||||
super().__init__()
|
||||
self.param_generator1 = nn.Sequential(
|
||||
nn.Linear(hidden_size_s, 2 * hidden_size_x**2 * mlp_ratio, bias=True),
|
||||
)
|
||||
self.norm = nn.RMSNorm(hidden_size_x, eps=1e-6)
|
||||
self.mlp_ratio = mlp_ratio
|
||||
|
||||
def forward(self, x, s):
|
||||
batch_size, num_x, hidden_size_x = x.shape
|
||||
mlp_params1 = self.param_generator1(s)
|
||||
fc1_param1, fc2_param1 = mlp_params1.chunk(2, dim=-1)
|
||||
fc1_param1 = fc1_param1.view(
|
||||
batch_size, hidden_size_x, hidden_size_x * self.mlp_ratio
|
||||
)
|
||||
fc2_param1 = fc2_param1.view(
|
||||
batch_size, hidden_size_x * self.mlp_ratio, hidden_size_x
|
||||
)
|
||||
|
||||
# normalize fc1
|
||||
normalized_fc1_param1 = torch.nn.functional.normalize(fc1_param1, dim=-2)
|
||||
# normalize fc2
|
||||
normalized_fc2_param1 = torch.nn.functional.normalize(fc2_param1, dim=-2)
|
||||
# mlp 1
|
||||
res_x = x
|
||||
x = self.norm(x)
|
||||
x = torch.bmm(x, normalized_fc1_param1)
|
||||
x = torch.nn.functional.silu(x)
|
||||
x = torch.bmm(x, normalized_fc2_param1)
|
||||
x = x + res_x
|
||||
return x
|
||||
|
||||
|
||||
class NerfFinalLayer(nn.Module):
|
||||
def __init__(self, hidden_size, out_channels):
|
||||
super().__init__()
|
||||
self.norm = nn.RMSNorm(hidden_size, eps=1e-6)
|
||||
self.linear = nn.Linear(hidden_size, out_channels, bias=True)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.norm(x)
|
||||
x = self.linear(x)
|
||||
return x
|
||||
|
||||
|
||||
class UTransformer(nn.Module):
|
||||
def __init__(
|
||||
self, config: DINOv3ViTConfig, num_classes: int, scale_factor: int = 4
|
||||
self,
|
||||
config: DINOv3ViTConfig,
|
||||
num_classes: int,
|
||||
nerf_patch=16,
|
||||
nerf_hidden=64,
|
||||
scale_factor: int = 4,
|
||||
):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.scale_factor = scale_factor
|
||||
self.nerf_patch_size = nerf_patch
|
||||
|
||||
assert config.num_hidden_layers % scale_factor == 0
|
||||
|
||||
self.embeddings = DINOv3ViTEmbeddings(config)
|
||||
self.rope_embeddings = DINOv3ViTRopePositionEmbedding(config)
|
||||
self.t_embedder = TimestepEmbedder(config.hidden_size)
|
||||
# self.y_embedder = LabelEmbedder(
|
||||
# num_classes, config.hidden_size, config.drop_path_rate
|
||||
# ) # disable cond for now
|
||||
|
||||
self.encoder_layers = nn.ModuleList(
|
||||
[
|
||||
@@ -302,8 +394,13 @@ class UTransformer(nn.Module):
|
||||
self.rest_decoder = nn.ModuleList(
|
||||
[DinoConditionedLayer(config, False) for _ in range(4)]
|
||||
)
|
||||
self.decoder_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.decoder = DinoV3ViTDecoder(config)
|
||||
|
||||
# nerf!
|
||||
self.nerf_encoder = NerfEmbedder(3, nerf_hidden, 8) # (rgb, hidden, freq)
|
||||
self.nerf_decoder = nn.ModuleList(
|
||||
[NerfBlock(self.config.hidden_size, nerf_hidden) for _ in range(12)]
|
||||
)
|
||||
self.final_layer = NerfFinalLayer(nerf_hidden, 3)
|
||||
|
||||
# freeze pretrained
|
||||
self.embeddings.requires_grad_(False)
|
||||
@@ -321,6 +418,13 @@ class UTransformer(nn.Module):
|
||||
if time.dim() == 0:
|
||||
time = time.repeat(pixel_values.shape[0])
|
||||
|
||||
# resolution config
|
||||
B = pixel_values.shape[0]
|
||||
dino_h = pixel_values.shape[-2] // self.config.patch_size
|
||||
dino_w = pixel_values.shape[-1] // self.config.patch_size
|
||||
nerf_h = pixel_values.shape[-2] // self.nerf_patch_size
|
||||
nerf_w = pixel_values.shape[-1] // self.nerf_patch_size
|
||||
|
||||
pixel_values = pixel_values.to(self.embeddings.patch_embeddings.weight.dtype)
|
||||
position_embeddings = self.rope_embeddings(pixel_values)
|
||||
|
||||
@@ -367,11 +471,52 @@ class UTransformer(nn.Module):
|
||||
attention_mask=layer_head_mask,
|
||||
position_embeddings=position_embeddings,
|
||||
do_condition=False,
|
||||
) # (batch, image // patch^2, 1024)
|
||||
|
||||
x = x[:, 1 + self.config.num_register_tokens :, :]
|
||||
|
||||
nerf_cond = nn.functional.silu(t + x) # (batch, image // patch^2, 1024)
|
||||
nerf_cond = nerf_cond.reshape(
|
||||
B, dino_h, dino_w, self.config.hidden_size
|
||||
).permute(0, 3, 1, 2) # (batch, 1024, image // patch, image // patch)
|
||||
# nerf_cond = nn.functional.interpolate(
|
||||
# nerf_cond, size=(nerf_h, nerf_w), mode="bilinear", align_corners=False
|
||||
# )
|
||||
nerf_cond = (
|
||||
nerf_cond.permute(0, 2, 3, 1)
|
||||
.reshape(-1, nerf_h * nerf_w, self.config.hidden_size)
|
||||
.view(-1, self.config.hidden_size)
|
||||
)
|
||||
|
||||
x = self.decoder_norm(x)
|
||||
# nerf
|
||||
x_nerf = nn.functional.unfold(
|
||||
pixel_values, self.nerf_patch_size, stride=self.nerf_patch_size
|
||||
).transpose(1, 2)
|
||||
x_nerf = x_nerf.reshape(
|
||||
B * x_nerf.shape[1], -1, self.nerf_patch_size**2
|
||||
).transpose(1, 2)
|
||||
x_nerf = self.nerf_encoder(x_nerf)
|
||||
|
||||
return self.decoder(x, image_size=pixel_values.shape[-2:]), residual
|
||||
for module in self.nerf_decoder:
|
||||
x_nerf = module(x_nerf, nerf_cond)
|
||||
|
||||
x_nerf = self.final_layer(x_nerf)
|
||||
|
||||
num_patches = nerf_h * nerf_w
|
||||
x_nerf = x_nerf.reshape(
|
||||
B * num_patches, -1
|
||||
) # (B*num_patches, 48): flatten pixels+RGB per patch
|
||||
x_nerf = (
|
||||
x_nerf.view(B, num_patches, -1).transpose(1, 2).contiguous()
|
||||
) # (B, 48, num_patches)
|
||||
|
||||
res = nn.functional.fold(
|
||||
x_nerf,
|
||||
(pixel_values.shape[-2], pixel_values.shape[-1]),
|
||||
kernel_size=self.nerf_patch_size,
|
||||
stride=self.nerf_patch_size,
|
||||
)
|
||||
return res
|
||||
|
||||
def get_residual(
|
||||
self,
|
||||
@@ -410,7 +555,7 @@ class UTransformer(nn.Module):
|
||||
@staticmethod
|
||||
def from_pretrained_backbone(name: str):
|
||||
config = DINOv3ViTConfig.from_pretrained(name)
|
||||
instance = UTransformer(config, 0).to("cuda:1")
|
||||
instance = UTransformer(config, 0)
|
||||
|
||||
weight_dict = {}
|
||||
with safe_open(
|
||||
|
||||
19
src/rf.py
19
src/rf.py
@@ -22,7 +22,7 @@ use_lecam = True
|
||||
|
||||
|
||||
class RF:
|
||||
def __init__(self, model, fm="otcfm", loss="mse"):
|
||||
def __init__(self, model, fm="otcfm", loss="mse", lp=None):
|
||||
self.model = model
|
||||
self.loss = loss
|
||||
self.iter = 0
|
||||
@@ -40,7 +40,7 @@ class RF:
|
||||
raise NotImplementedError(
|
||||
f"Unknown model {fm}, must be one of ['otcfm', 'icfm', 'fm', 'si']"
|
||||
)
|
||||
|
||||
if not lp:
|
||||
self.lpips = lpips.LPIPS(net="vgg").to("cuda:1")
|
||||
self.lpips2 = lpips.LPIPS(net="alex").to("cuda:1")
|
||||
|
||||
@@ -53,6 +53,8 @@ class RF:
|
||||
weight_decay=1e-3,
|
||||
betas=(0.9, 0.95),
|
||||
)
|
||||
else:
|
||||
self.lpips = lp
|
||||
|
||||
def gan_loss(self, real, fake):
|
||||
global lecam_beta, lecam_anchor_real_logits, lecam_anchor_fake_logits, use_lecam
|
||||
@@ -105,7 +107,7 @@ class RF:
|
||||
if condition:
|
||||
vt = self.model(xt, t, cloud)
|
||||
else:
|
||||
vt, _ = self.model(xt, t)
|
||||
vt = self.model(xt, t)
|
||||
|
||||
if self.loss == "mse":
|
||||
loss = ((vt - ut) ** 2).mean(dim=list(range(1, len(gt.shape))))
|
||||
@@ -116,11 +118,18 @@ class RF:
|
||||
denormalize(gt) * 2 - 1,
|
||||
denormalize(xt + (1 - t[:, None, None, None]) * vt) * 2 - 1,
|
||||
)
|
||||
ssim = 1 - ms_ssim(
|
||||
denormalize(gt),
|
||||
denormalize(xt + (1 - t[:, None, None, None]) * vt),
|
||||
data_range=1.0,
|
||||
size_average=False,
|
||||
)
|
||||
loss_list = {
|
||||
"train/mse": mse.mean().item(),
|
||||
"train/lpips": lpips.mean().item(),
|
||||
"train/ssim": ssim.mean().item(),
|
||||
}
|
||||
loss = mse + lpips * 2.0
|
||||
loss = mse + lpips * 2.0 + ssim
|
||||
elif self.loss == "gan_lpips_mse":
|
||||
self.iter += 1
|
||||
# if self.iter % 4 == 0:
|
||||
@@ -179,7 +188,7 @@ class RF:
|
||||
)
|
||||
else:
|
||||
traj = odeint(
|
||||
lambda t, x: self.model(x, t)[0],
|
||||
lambda t, x: self.model(x, t),
|
||||
cloud,
|
||||
t_span,
|
||||
rtol=tol,
|
||||
|
||||
3
test_dataset.py
Normal file
3
test_dataset.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from src.dataset.cuhk_cr2 import get_dataset
|
||||
|
||||
get_dataset()
|
||||
247
train_accelerator.py
Normal file
247
train_accelerator.py
Normal file
@@ -0,0 +1,247 @@
|
||||
import math
|
||||
import os
|
||||
from datetime import timedelta
|
||||
|
||||
import lpips
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
from accelerate import Accelerator, InitProcessGroupKwargs
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision.utils import make_grid
|
||||
from tqdm import tqdm
|
||||
|
||||
import wandb
|
||||
from src.benchmark import benchmark
|
||||
from src.dataset.cuhk_cr2 import get_dataset
|
||||
from src.dataset.preprocess import denormalize
|
||||
from src.model.utransformer import UTransformer
|
||||
from src.rf import RF
|
||||
|
||||
# --- Configuration ---
|
||||
batch_size = 16
|
||||
accumulation_steps = 2
|
||||
total_epoch = 500
|
||||
grad_norm = 1.0
|
||||
learning_rate = 3e-4
|
||||
|
||||
# --- Accelerator Setup ---
|
||||
# Set a longer timeout for initialization, which can be useful when downloading
|
||||
# large models or datasets on multiple nodes.
|
||||
kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=1800))
|
||||
accelerator = Accelerator(
|
||||
gradient_accumulation_steps=accumulation_steps,
|
||||
mixed_precision="bf16", # Use "bf16" for modern GPUs, or "fp16"
|
||||
log_with="wandb",
|
||||
kwargs_handlers=[kwargs],
|
||||
)
|
||||
|
||||
# --- Dataset Loading ---
|
||||
# Load datasets on the main process. They will be accessible by all processes.
|
||||
train_dataset, test_dataset = get_dataset()
|
||||
|
||||
train_dataset, test_dataset = (
|
||||
DataLoader(train_dataset, batch_size=batch_size),
|
||||
DataLoader(test_dataset, batch_size=batch_size),
|
||||
)
|
||||
# --- LR Scheduler Logic ---
|
||||
# Correctly calculate total steps based on optimizer updates, not micro-batches.
|
||||
# Use math.ceil to account for the last partial batch.
|
||||
num_batches_per_epoch = math.ceil(len(train_dataset) / batch_size)
|
||||
optimizer_steps_per_epoch = math.ceil(num_batches_per_epoch / accumulation_steps)
|
||||
total_steps = optimizer_steps_per_epoch * total_epoch
|
||||
warmup_steps = int(0.05 * total_steps)
|
||||
|
||||
# --- Model, Optimizer, and RF Helper Initialization ---
|
||||
# Initialize on CPU. Accelerator will move them to the correct device.
|
||||
model = UTransformer.from_pretrained_backbone(
|
||||
"facebook/dinov3-vitl16-pretrain-sat493m"
|
||||
).bfloat16()
|
||||
lp = lpips.LPIPS(net="vgg")
|
||||
rf = RF(model, "icfm", "lpips_mse", lp) # RF holds a reference to the model
|
||||
optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
|
||||
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_epoch)
|
||||
|
||||
# --- Prepare objects with Accelerator ---
|
||||
# We prepare everything except the train_dataloader, which is created per-epoch
|
||||
# to allow for webdataset's shuffling mechanism.
|
||||
model, lp, optimizer, scheduler, train_dataset, test_dataset = accelerator.prepare(
|
||||
model, lp, optimizer, scheduler, train_dataset, test_dataset
|
||||
)
|
||||
|
||||
# --- W&B and Checkpoint Setup ---
|
||||
# Initialize tracker (wandb) on the main process
|
||||
accelerator.init_trackers(
|
||||
project_name="cloud-removal-kmu",
|
||||
config={
|
||||
"batch_size": batch_size,
|
||||
"accumulation_steps": accumulation_steps,
|
||||
"total_epoch": total_epoch,
|
||||
"learning_rate": learning_rate,
|
||||
"grad_norm": grad_norm,
|
||||
"total_steps": total_steps,
|
||||
},
|
||||
)
|
||||
|
||||
# Use the run name from the tracker for a consistent artifact path
|
||||
# This check is needed in case there are no trackers configured.
|
||||
run_name = "nerf-3"
|
||||
if accelerator.trackers:
|
||||
run_name = accelerator.trackers[0].run.name
|
||||
|
||||
artifact_dir = f"artifact/{run_name}"
|
||||
checkpoint_dir = os.path.join(artifact_dir, "checkpoints")
|
||||
|
||||
if accelerator.is_main_process:
|
||||
os.makedirs(checkpoint_dir, exist_ok=True)
|
||||
accelerator.wait_for_everyone() # Ensure directory is created before any process tries to access it
|
||||
|
||||
# Register scheduler for checkpointing
|
||||
accelerator.register_for_checkpointing(scheduler)
|
||||
|
||||
start_epoch = 0
|
||||
# Check if a checkpoint exists to resume training
|
||||
if os.path.exists(checkpoint_dir):
|
||||
try:
|
||||
accelerator.print(f"Resuming from checkpoint: {checkpoint_dir}")
|
||||
accelerator.load_state(checkpoint_dir)
|
||||
# Manually load the epoch from a tracker file
|
||||
if os.path.exists(os.path.join(checkpoint_dir, "epoch_tracker.pt")):
|
||||
start_epoch = (
|
||||
torch.load(os.path.join(checkpoint_dir, "epoch_tracker.pt"))["epoch"]
|
||||
+ 1
|
||||
)
|
||||
except Exception as e:
|
||||
accelerator.print(
|
||||
f"Could not load checkpoint. Starting from scratch. Error: {e}"
|
||||
)
|
||||
|
||||
|
||||
# --- Training Loop ---
|
||||
for epoch in range(start_epoch, total_epoch):
|
||||
model.train()
|
||||
lossbin = {i: 0 for i in range(10)}
|
||||
losscnt = {i: 1e-6 for i in range(10)}
|
||||
|
||||
progress_bar = tqdm(
|
||||
train_dataset,
|
||||
disable=not accelerator.is_local_main_process,
|
||||
desc=f"Epoch {epoch + 1}/{total_epoch}",
|
||||
)
|
||||
|
||||
for step, batch in enumerate(progress_bar):
|
||||
cloud, gt = batch["cloud"], batch["gt"]
|
||||
|
||||
with accelerator.accumulate(model):
|
||||
# Forward pass is automatically handled with mixed precision
|
||||
loss, blsct, loss_list = rf.forward(gt, cloud)
|
||||
|
||||
accelerator.backward(loss)
|
||||
|
||||
if accelerator.sync_gradients:
|
||||
accelerator.clip_grad_norm_(model.parameters(), grad_norm)
|
||||
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Log metrics
|
||||
if accelerator.sync_gradients:
|
||||
avg_loss = accelerator.gather(loss).mean().item()
|
||||
current_step = epoch * optimizer_steps_per_epoch + (
|
||||
step // accumulation_steps
|
||||
)
|
||||
accelerator.log(
|
||||
{
|
||||
"train/loss": avg_loss,
|
||||
"train/lr": scheduler.get_last_lr()[0],
|
||||
},
|
||||
)
|
||||
accelerator.log(loss_list)
|
||||
|
||||
# This per-process logging is an approximation. For perfect accuracy,
|
||||
# `blsct` would need to be gathered from all processes.
|
||||
for t, lss in blsct:
|
||||
bin_idx = min(int(t * 10), 9)
|
||||
lossbin[bin_idx] += lss
|
||||
losscnt[bin_idx] += 1
|
||||
|
||||
# Log epoch-level metrics from the main process
|
||||
if accelerator.is_main_process:
|
||||
epoch_metrics = {
|
||||
f"lossbin/lossbin_{i}": lossbin[i] / losscnt[i] for i in range(10)
|
||||
}
|
||||
epoch_metrics["epoch"] = epoch
|
||||
accelerator.log(epoch_metrics)
|
||||
|
||||
# --- Evaluation and Checkpointing ---
|
||||
if (epoch + 1) % 50 == 0:
|
||||
model.eval()
|
||||
psnr_sum, ssim_sum, lpips_sum, count = 0.0, 0.0, 0.0, 0
|
||||
|
||||
with torch.no_grad():
|
||||
for i, batch in tqdm(
|
||||
enumerate(test_dataset),
|
||||
disable=not accelerator.is_local_main_process,
|
||||
desc=f"Benchmark {epoch + 1}/{total_epoch}",
|
||||
):
|
||||
images = rf.sample(batch["cloud"])
|
||||
image = denormalize(images[-1]).clamp(0, 1)
|
||||
original = denormalize(batch["gt"]).clamp(0, 1)
|
||||
|
||||
# Gather results from all processes for accurate metrics
|
||||
image_gathered = accelerator.gather_for_metrics(image)
|
||||
original_gathered = accelerator.gather_for_metrics(original)
|
||||
|
||||
if accelerator.is_main_process:
|
||||
# Log visualization images from the first batch on the main process
|
||||
if i == 0:
|
||||
demo_images = [images[0][:4], images[-1][:4]]
|
||||
for step_idx, demo in enumerate(demo_images):
|
||||
grid = make_grid(
|
||||
denormalize(demo).clamp(0, 1).float().cpu(), nrow=2
|
||||
)
|
||||
wandb_image = wandb.Image(grid, caption=f"step {step_idx}")
|
||||
accelerator.log({"viz/decoded": wandb_image})
|
||||
|
||||
psnr, ssim, lpips, flawed_lpips = benchmark(
|
||||
image_gathered.cpu(), original_gathered.cpu()
|
||||
)
|
||||
psnr_sum += psnr.sum().item()
|
||||
ssim_sum += ssim.sum().item()
|
||||
lpips_sum += lpips.sum().item()
|
||||
count += image_gathered.shape[0]
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
|
||||
if accelerator.is_main_process:
|
||||
avg_psnr = psnr_sum / count if count > 0 else 0
|
||||
avg_ssim = ssim_sum / count if count > 0 else 0
|
||||
avg_lpips = lpips_sum / count if count > 0 else 0
|
||||
accelerator.log(
|
||||
{
|
||||
"eval/psnr": avg_psnr,
|
||||
"eval/ssim": avg_ssim,
|
||||
"eval/lpips": avg_lpips,
|
||||
"epoch": epoch + 1,
|
||||
}
|
||||
)
|
||||
|
||||
# Save checkpoint on the main process
|
||||
accelerator.save_state(os.path.join(checkpoint_dir, f"epoch_{epoch + 1}"))
|
||||
accelerator.save_state(checkpoint_dir) # Overwrite latest
|
||||
torch.save(
|
||||
{"epoch": epoch}, os.path.join(checkpoint_dir, "epoch_tracker.pt")
|
||||
)
|
||||
|
||||
# scheduler.step()
|
||||
|
||||
|
||||
# --- Final Save and Cleanup ---
|
||||
accelerator.wait_for_everyone()
|
||||
if accelerator.is_main_process:
|
||||
accelerator.print("Saving final model state.")
|
||||
accelerator.save_state(checkpoint_dir)
|
||||
torch.save(
|
||||
{"epoch": total_epoch - 1}, os.path.join(checkpoint_dir, "epoch_tracker.pt")
|
||||
)
|
||||
|
||||
accelerator.end_training()
|
||||
265
train_accelerator_hdit.py
Normal file
265
train_accelerator_hdit.py
Normal file
@@ -0,0 +1,265 @@
|
||||
import math
|
||||
import os
|
||||
from datetime import timedelta
|
||||
|
||||
import lpips
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
from accelerate import Accelerator, InitProcessGroupKwargs
|
||||
from hdit import HDiT
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision.utils import make_grid
|
||||
from tqdm import tqdm
|
||||
|
||||
import wandb
|
||||
from src.benchmark import benchmark
|
||||
from src.dataset.cuhk_cr2 import get_dataset
|
||||
from src.dataset.preprocess import denormalize
|
||||
from src.rf import RF
|
||||
|
||||
# --- Configuration ---
|
||||
batch_size = 4
|
||||
accumulation_steps = 2
|
||||
total_epoch = 500
|
||||
grad_norm = 1.0
|
||||
learning_rate = 3e-4
|
||||
|
||||
# --- Accelerator Setup ---
|
||||
# Set a longer timeout for initialization, which can be useful when downloading
|
||||
# large models or datasets on multiple nodes.
|
||||
kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=1800))
|
||||
accelerator = Accelerator(
|
||||
gradient_accumulation_steps=accumulation_steps,
|
||||
mixed_precision="bf16", # Use "bf16" for modern GPUs, or "fp16"
|
||||
log_with="wandb",
|
||||
kwargs_handlers=[kwargs],
|
||||
)
|
||||
|
||||
# --- Dataset Loading ---
|
||||
# Load datasets on the main process. They will be accessible by all processes.
|
||||
train_dataset, test_dataset = get_dataset()
|
||||
|
||||
train_dataset, test_dataset = (
|
||||
DataLoader(train_dataset, batch_size=batch_size), # type: ignore
|
||||
DataLoader(test_dataset, batch_size=batch_size), # type: ignore
|
||||
)
|
||||
|
||||
num_batches_per_epoch = math.ceil(len(train_dataset) / batch_size)
|
||||
optimizer_steps_per_epoch = math.ceil(num_batches_per_epoch / accumulation_steps)
|
||||
total_steps = optimizer_steps_per_epoch * total_epoch
|
||||
warmup_steps = int(0.05 * total_steps)
|
||||
|
||||
# --- Model, Optimizer, and RF Helper Initialization ---
|
||||
# Initialize on CPU. Accelerator will move them to the correct device.
|
||||
model = HDiT(
|
||||
in_channels=4,
|
||||
out_channels=4,
|
||||
patch_size=[1, 1], # type: ignore
|
||||
widths=[256, 512],
|
||||
middle_width=1024,
|
||||
depths=[4, 4],
|
||||
middle_depth=8,
|
||||
mapping_width=512,
|
||||
mapping_depth=2,
|
||||
).bfloat16()
|
||||
|
||||
print(sum(p.numel() for p in model.parameters() if p.requires_grad), "params")
|
||||
lp = lpips.LPIPS(net="vgg")
|
||||
rf = RF(model, "icfm", "lpips_mse", lp) # RF holds a reference to the model
|
||||
optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
|
||||
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_epoch)
|
||||
|
||||
# --- Prepare objects with Accelerator ---
|
||||
# We prepare everything except the train_dataloader, which is created per-epoch
|
||||
# to allow for webdataset's shuffling mechanism.
|
||||
model, lp, optimizer, scheduler, train_dataset, test_dataset = accelerator.prepare(
|
||||
model, lp, optimizer, scheduler, train_dataset, test_dataset
|
||||
)
|
||||
|
||||
# --- W&B and Checkpoint Setup ---
|
||||
# Initialize tracker (wandb) on the main process
|
||||
accelerator.init_trackers(
|
||||
project_name="cloud-removal-kmu",
|
||||
config={
|
||||
"batch_size": batch_size,
|
||||
"accumulation_steps": accumulation_steps,
|
||||
"total_epoch": total_epoch,
|
||||
"learning_rate": learning_rate,
|
||||
"grad_norm": grad_norm,
|
||||
"total_steps": total_steps,
|
||||
},
|
||||
)
|
||||
|
||||
# Use the run name from the tracker for a consistent artifact path
|
||||
# This check is needed in case there are no trackers configured.
|
||||
run_name = "testest"
|
||||
if accelerator.trackers:
|
||||
run_name = accelerator.trackers[0].run.name
|
||||
|
||||
artifact_dir = f"artifact/{run_name}"
|
||||
checkpoint_dir = os.path.join(artifact_dir, "checkpoints")
|
||||
|
||||
if accelerator.is_main_process:
|
||||
os.makedirs(checkpoint_dir, exist_ok=True)
|
||||
accelerator.wait_for_everyone() # Ensure directory is created before any process tries to access it
|
||||
|
||||
# Register scheduler for checkpointing
|
||||
accelerator.register_for_checkpointing(scheduler)
|
||||
|
||||
start_epoch = 0
|
||||
# Check if a checkpoint exists to resume training
|
||||
if os.path.exists(checkpoint_dir):
|
||||
try:
|
||||
accelerator.print(f"Resuming from checkpoint: {checkpoint_dir}")
|
||||
accelerator.load_state(checkpoint_dir)
|
||||
# Manually load the epoch from a tracker file
|
||||
if os.path.exists(os.path.join(checkpoint_dir, "epoch_tracker.pt")):
|
||||
start_epoch = (
|
||||
torch.load(os.path.join(checkpoint_dir, "epoch_tracker.pt"))["epoch"]
|
||||
+ 1
|
||||
)
|
||||
except Exception as e:
|
||||
accelerator.print(
|
||||
f"Could not load checkpoint. Starting from scratch. Error: {e}"
|
||||
)
|
||||
|
||||
|
||||
# --- Training Loop ---
|
||||
for epoch in range(start_epoch, total_epoch):
|
||||
model.train()
|
||||
lossbin = {i: 0 for i in range(10)}
|
||||
losscnt = {i: 1e-6 for i in range(10)}
|
||||
|
||||
progress_bar = tqdm(
|
||||
train_dataset,
|
||||
disable=not accelerator.is_local_main_process,
|
||||
desc=f"Epoch {epoch + 1}/{total_epoch}",
|
||||
)
|
||||
|
||||
for step, batch in enumerate(progress_bar):
|
||||
cloud, gt = batch["cloud"], batch["gt"]
|
||||
|
||||
with accelerator.accumulate(model):
|
||||
# Forward pass is automatically handled with mixed precision
|
||||
loss, blsct, loss_list = rf.forward(
|
||||
torch.cat((batch["gt"], batch["gt_nir"]), dim=1),
|
||||
torch.cat((batch["cloud"], batch["cloud_nir"]), dim=1),
|
||||
)
|
||||
|
||||
accelerator.backward(loss)
|
||||
|
||||
if accelerator.sync_gradients:
|
||||
accelerator.clip_grad_norm_(model.parameters(), grad_norm)
|
||||
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Log metrics
|
||||
if accelerator.sync_gradients:
|
||||
avg_loss = accelerator.gather(loss).mean().item() # type: ignore
|
||||
current_step = epoch * optimizer_steps_per_epoch + (
|
||||
step // accumulation_steps
|
||||
)
|
||||
accelerator.log(
|
||||
{
|
||||
"train/loss": avg_loss,
|
||||
"train/lr": scheduler.get_last_lr()[0],
|
||||
},
|
||||
# step=current_step,
|
||||
)
|
||||
accelerator.log(loss_list)
|
||||
|
||||
# This per-process logging is an approximation. For perfect accuracy,
|
||||
# `blsct` would need to be gathered from all processes.
|
||||
for t, lss in blsct:
|
||||
bin_idx = min(int(t * 10), 9)
|
||||
lossbin[bin_idx] += lss
|
||||
losscnt[bin_idx] += 1
|
||||
|
||||
# Log epoch-level metrics from the main process
|
||||
if accelerator.is_main_process:
|
||||
epoch_metrics = {
|
||||
f"lossbin/lossbin_{i}": lossbin[i] / losscnt[i] for i in range(10)
|
||||
}
|
||||
epoch_metrics["epoch"] = epoch
|
||||
accelerator.log(epoch_metrics)
|
||||
|
||||
# --- Evaluation and Checkpointing ---
|
||||
if (epoch + 1) % 50 == 0:
|
||||
model.eval()
|
||||
psnr_sum, ssim_sum, lpips_sum, flawed_lpips_sum, count = 0.0, 0.0, 0.0, 0.0, 0
|
||||
|
||||
with torch.no_grad():
|
||||
for i, batch in tqdm(
|
||||
enumerate(test_dataset),
|
||||
disable=not accelerator.is_local_main_process,
|
||||
desc=f"Benchmark {epoch + 1}/{total_epoch}",
|
||||
):
|
||||
images = rf.sample(
|
||||
torch.cat((batch["cloud"], batch["cloud_nir"]), dim=1)
|
||||
)
|
||||
image = denormalize(images[-1]).clamp(0, 1)
|
||||
original = denormalize(batch["gt"]).clamp(0, 1)
|
||||
|
||||
# Gather results from all processes for accurate metrics
|
||||
image_gathered = accelerator.gather_for_metrics(image)
|
||||
original_gathered = accelerator.gather_for_metrics(original)
|
||||
|
||||
if accelerator.is_main_process:
|
||||
# Log visualization images from the first batch on the main process
|
||||
if i == 0:
|
||||
demo_images = [images[0][:4], images[-1][:4]]
|
||||
for step_idx, demo in enumerate(demo_images):
|
||||
grid = make_grid(
|
||||
denormalize(demo).clamp(0, 1).float().cpu(), nrow=2
|
||||
)
|
||||
wandb_image = wandb.Image(grid, caption=f"step {step_idx}")
|
||||
accelerator.log({"viz/decoded": wandb_image})
|
||||
|
||||
psnr, ssim, lpips, flawed_lpips = benchmark(
|
||||
image_gathered.cpu(), # type: ignore
|
||||
original_gathered.cpu(), # type: ignore
|
||||
)
|
||||
psnr_sum += psnr.sum().item()
|
||||
ssim_sum += ssim.sum().item()
|
||||
lpips_sum += lpips.sum().item()
|
||||
flawed_lpips_sum += flawed_lpips.sum().item()
|
||||
count += image_gathered.shape[0] # type: ignore
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
|
||||
if accelerator.is_main_process:
|
||||
avg_psnr = psnr_sum / count if count > 0 else 0
|
||||
avg_ssim = ssim_sum / count if count > 0 else 0
|
||||
avg_lpips = lpips_sum / count if count > 0 else 0
|
||||
avg_flawed_lpips = flawed_lpips_sum / count if count > 0 else 0
|
||||
accelerator.log(
|
||||
{
|
||||
"eval/psnr": avg_psnr,
|
||||
"eval/ssim": avg_ssim,
|
||||
"eval/lpips": avg_lpips,
|
||||
"eval/flawed_lpips": avg_flawed_lpips,
|
||||
"epoch": epoch + 1,
|
||||
}
|
||||
)
|
||||
|
||||
# Save checkpoint on the main process
|
||||
accelerator.save_state(os.path.join(checkpoint_dir, f"epoch_{epoch + 1}"))
|
||||
accelerator.save_state(checkpoint_dir) # Overwrite latest
|
||||
torch.save(
|
||||
{"epoch": epoch}, os.path.join(checkpoint_dir, "epoch_tracker.pt")
|
||||
)
|
||||
|
||||
# scheduler.step()
|
||||
|
||||
|
||||
# --- Final Save and Cleanup ---
|
||||
accelerator.wait_for_everyone()
|
||||
if accelerator.is_main_process:
|
||||
accelerator.print("Saving final model state.")
|
||||
accelerator.save_state(checkpoint_dir)
|
||||
torch.save(
|
||||
{"epoch": total_epoch - 1}, os.path.join(checkpoint_dir, "epoch_tracker.pt")
|
||||
)
|
||||
|
||||
accelerator.end_training()
|
||||
190
uv.lock
generated
190
uv.lock
generated
@@ -15,6 +15,24 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/aa/ba0014cc4659328dc818a28827be78e6d97312ab0cb98105a770924dc11e/absl_py-2.3.1-py3-none-any.whl", hash = "sha256:eeecf07f0c2a93ace0772c92e596ace6d3d3996c042b2128459aaae2a76de11d", size = 135811, upload-time = "2025-07-03T09:31:42.253Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "accelerate"
|
||||
version = "1.11.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "huggingface-hub" },
|
||||
{ name = "numpy" },
|
||||
{ name = "packaging" },
|
||||
{ name = "psutil" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "safetensors" },
|
||||
{ name = "torch" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/23/60/2757c4f03a8705dbf80b1268b03881927878dca5ed07d74f733fb6c219e0/accelerate-1.11.0.tar.gz", hash = "sha256:bb1caf2597b4cd632b917b5000c591d10730bb024a79746f1ee205bba80bd229", size = 393715, upload-time = "2025-10-20T14:42:25.025Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/77/85/85951bc0f9843e2c10baaa1b6657227056095de08f4d1eea7d8b423a6832/accelerate-1.11.0-py3-none-any.whl", hash = "sha256:a628fa6beb069b8e549460fc449135d5bd8d73e7a11fd09f0bc9fc4ace7f06f1", size = 375777, upload-time = "2025-10-20T14:42:23.256Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
version = "2.6.1"
|
||||
@@ -295,10 +313,14 @@ name = "cloud-removal"
|
||||
version = "0.1.0"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "accelerate" },
|
||||
{ name = "datasets" },
|
||||
{ name = "einops" },
|
||||
{ name = "flash-attn" },
|
||||
{ name = "hdit" },
|
||||
{ name = "lovely-tensors" },
|
||||
{ name = "lpips" },
|
||||
{ name = "natten" },
|
||||
{ name = "pyright" },
|
||||
{ name = "python-lsp-server" },
|
||||
{ name = "pytorch-msssim" },
|
||||
@@ -317,10 +339,14 @@ dependencies = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "accelerate", specifier = ">=1.11.0" },
|
||||
{ name = "datasets", specifier = ">=4.1.1" },
|
||||
{ name = "einops", specifier = ">=0.8.1" },
|
||||
{ name = "flash-attn" },
|
||||
{ name = "hdit", specifier = ">=0.0.1b6" },
|
||||
{ name = "lovely-tensors", specifier = ">=0.1.19" },
|
||||
{ name = "lpips", specifier = ">=0.1.4" },
|
||||
{ name = "natten", specifier = "==0.21.1+torch280cu128" },
|
||||
{ name = "pyright", specifier = ">=1.1.405" },
|
||||
{ name = "python-lsp-server", specifier = ">=1.13.1" },
|
||||
{ name = "pytorch-msssim", specifier = ">=1.0.0" },
|
||||
@@ -454,6 +480,20 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/c8/09012ac195a0aab58755800d2efdc0e7d5905053509f12cb5d136c911cda/datasets-4.1.1-py3-none-any.whl", hash = "sha256:62e4f6899a36be9ec74a7e759a6951253cc85b3fcfa0a759b0efa8353b149dac", size = 503623, upload-time = "2025-09-18T13:14:25.111Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dctorch"
|
||||
version = "0.1.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "numpy" },
|
||||
{ name = "scipy" },
|
||||
{ name = "torch" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/76/db/75bef6df7b6de66a9291a4924f9b836a60ff53da0937c84aa44c23fbac7d/dctorch-0.1.2.tar.gz", hash = "sha256:4e67cb0248a87eb40d598061225638cda7db64f8057ed2120869683bfae5106e", size = 1767, upload-time = "2023-09-21T23:39:38.083Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/47/61b627404c2d6f31dcbc491ff83da1f4336c7ae7893cfdc6c52db490ec59/dctorch-0.1.2-py3-none-any.whl", hash = "sha256:ce1dde9e3aabb941240e43522212d2e03dddae81279968c8ab182c59a5b41c8f", size = 2278, upload-time = "2023-09-21T23:39:36.584Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "debugpy"
|
||||
version = "1.8.17"
|
||||
@@ -506,6 +546,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/56/7b/af3d0da15bed3a8665419bb3a630585756920f4ad67abfdfef26240ebcc0/docstring_to_markdown-0.17-py3-none-any.whl", hash = "sha256:fd7d5094aa83943bf5f9e1a13701866b7c452eac19765380dead666e36d3711c", size = 23479, upload-time = "2025-05-02T15:09:06.676Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "einop"
|
||||
version = "0.0.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "einops" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/04/7b/99e9939fd089dff5d5f8d3030ca04b5b30a4286efd483da0f2e07a5db90e/einop-0.0.1.tar.gz", hash = "sha256:15add3732a54eb6c49618be0c59b97163e46ae87aeb5e387d9c418402ea33178", size = 3132, upload-time = "2022-03-07T21:28:32.025Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/ea/45cf629f41dcbe1da97080dd254a9bf344f39baa53c72eb417666b2100bc/einop-0.0.1-py3-none-any.whl", hash = "sha256:edff8c3916cf4a22eccd31254d752ec7959388bd6877eb5285eed6ca5170e36b", size = 3040, upload-time = "2022-03-07T21:28:29.864Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "einops"
|
||||
version = "0.8.1"
|
||||
@@ -524,6 +576,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastcore"
|
||||
version = "1.8.14"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "packaging" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/41/fc/4782041a7e96ae3de2b6bc7a287693d619688d938f43e6d9e70a23874d51/fastcore-1.8.14.tar.gz", hash = "sha256:869735ef493dbc7e5e8cbfc35fa3310772ce4c768d5b3a82d6a0d571148401be", size = 83648, upload-time = "2025-10-29T05:38:46.285Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/c6/236247deaa155fad1b38b6deb95b8b76efd20f5107b4577eee42002cbf11/fastcore-1.8.14-py3-none-any.whl", hash = "sha256:a02a749c26243ffd54d6dd11165cf4a556c7cb08f4c7e47ff67b32c7b0183ce9", size = 86791, upload-time = "2025-10-29T05:38:44.343Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "filelock"
|
||||
version = "3.19.1"
|
||||
@@ -682,6 +746,27 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/01/61/d4b89fec821f72385526e1b9d9a3a0385dda4a72b206d28049e2c7cd39b8/gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77", size = 208168, upload-time = "2025-07-24T03:45:52.517Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hdit"
|
||||
version = "0.0.1b6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "dctorch" },
|
||||
{ name = "einop" },
|
||||
{ name = "pillow" },
|
||||
{ name = "safetensors" },
|
||||
{ name = "scipy" },
|
||||
{ name = "torch" },
|
||||
{ name = "torchdiffeq" },
|
||||
{ name = "torchsde" },
|
||||
{ name = "torchvision" },
|
||||
{ name = "tqdm" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cc/ca/6e00358e0af086661a1a816cb4a73f3202d87ffb64388e3c1ff2530168ec/hdit-0.0.1b6.tar.gz", hash = "sha256:35f723cf0955d5217fe16f8e2ad703c7dcd5ec6da7bd7b1a0f661849b58bd1b4", size = 33279, upload-time = "2025-09-10T13:28:10.316Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/c6/98eb867319a46c310db5be37c842709669f596cc0a0fd0de3b18d9f66924/hdit-0.0.1b6-py3-none-any.whl", hash = "sha256:de8efd85286b5b50199dbb9306bd2de5098bf29e186e4290d164ab40b70be524", size = 36277, upload-time = "2025-09-10T13:28:09.121Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hf-xet"
|
||||
version = "1.1.10"
|
||||
@@ -995,6 +1080,34 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/de/73/3d757cb3fc16f0f9794dd289bcd0c4a031d9cf54d8137d6b984b2d02edf3/lightning_utilities-0.15.2-py3-none-any.whl", hash = "sha256:ad3ab1703775044bbf880dbf7ddaaac899396c96315f3aa1779cec9d618a9841", size = 29431, upload-time = "2025-08-06T13:57:38.046Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lovely-numpy"
|
||||
version = "0.2.17"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "fastcore" },
|
||||
{ name = "matplotlib" },
|
||||
{ name = "numpy" },
|
||||
{ name = "packaging" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2c/5c/e422f913b5722bc0e697342cf16e7b39fe04ebf694e1334034ef913bbb59/lovely_numpy-0.2.17.tar.gz", hash = "sha256:c414db415701f060c0875cd1d8013a7167664f95e85d11644fdff646941abdb0", size = 24396, upload-time = "2025-10-23T08:16:36.334Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/87/cd2c462355886debdd8ef2064e7de10fcb5eea2928c5c7a1d3566d36cc2d/lovely_numpy-0.2.17-py3-none-any.whl", hash = "sha256:17e1b817d10ce28ce7f4d943f87d4ae7c033250b03c24284af436c200b51f1ac", size = 24424, upload-time = "2025-10-23T08:16:35.024Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lovely-tensors"
|
||||
version = "0.1.19"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "lovely-numpy" },
|
||||
{ name = "torch" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f8/c2/8c21ea15038a2e98079285c52a180e53ea7f0aadcf5b1fa58c1cadfb68da/lovely_tensors-0.1.19.tar.gz", hash = "sha256:0a9cec41c6a13d7de3ca3688c10f61991071352116c5303c3e62c91febf32016", size = 22558, upload-time = "2025-10-04T15:56:33.085Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/ae/93fd971b7a067ddca543302e7632509a36078b48ad0d08643bc17ac35cf3/lovely_tensors-0.1.19-py3-none-any.whl", hash = "sha256:b0fee4f5cfbb0494be6bbce0963bf8e39edc1e4ef89afe922c30edfb55d65237", size = 19394, upload-time = "2025-10-04T15:56:31.682Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lpips"
|
||||
version = "0.1.4"
|
||||
@@ -1239,6 +1352,16 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/50/3b/0e2c535c3e6970cfc5763b67f6cc31accaab35a7aa3e322fb6a12830450f/narwhals-2.6.0-py3-none-any.whl", hash = "sha256:3215ea42afb452c6c8527e79cefbe542b674aa08d7e2e99d46b2c9708870e0d4", size = 408435, upload-time = "2025-09-29T09:08:54.503Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "natten"
|
||||
version = "0.21.1+torch280cu128"
|
||||
source = { registry = "https://whl.natten.org/" }
|
||||
wheels = [
|
||||
{ url = "https://github.com/SHI-Labs/NATTEN/releases/download/v0.21.1/natten-0.21.1%2Btorch280cu128-cp312-cp312-linux_x86_64.whl" },
|
||||
{ url = "https://github.com/SHI-Labs/NATTEN/releases/download/v0.21.1/natten-0.21.1%2Btorch280cu128-cp313-cp313-linux_x86_64.whl" },
|
||||
{ url = "https://github.com/SHI-Labs/NATTEN/releases/download/v0.21.1/natten-0.21.1%2Btorch280cu128-cp313-cp313t-linux_x86_64.whl" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nest-asyncio"
|
||||
version = "1.6.0"
|
||||
@@ -1268,65 +1391,18 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
version = "2.3.3"
|
||||
version = "1.26.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d0/19/95b3d357407220ed24c139018d2518fab0a61a948e68286a25f1a4d049ff/numpy-2.3.3.tar.gz", hash = "sha256:ddc7c39727ba62b80dfdbedf400d1c10ddfa8eefbd7ec8dcb118be8b56d31029", size = 20576648, upload-time = "2025-09-09T16:54:12.543Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/65/6e/09db70a523a96d25e115e71cc56a6f9031e7b8cd166c1ac8438307c14058/numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010", size = 15786129, upload-time = "2024-02-06T00:26:44.495Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/51/5d/bb7fc075b762c96329147799e1bcc9176ab07ca6375ea976c475482ad5b3/numpy-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cfdd09f9c84a1a934cde1eec2267f0a43a7cd44b2cca4ff95b7c0d14d144b0bf", size = 20957014, upload-time = "2025-09-09T15:56:29.966Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/0e/c6211bb92af26517acd52125a237a92afe9c3124c6a68d3b9f81b62a0568/numpy-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb32e3cf0f762aee47ad1ddc6672988f7f27045b0783c887190545baba73aa25", size = 14185220, upload-time = "2025-09-09T15:56:32.175Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/f2/07bb754eb2ede9073f4054f7c0286b0d9d2e23982e090a80d478b26d35ca/numpy-2.3.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:396b254daeb0a57b1fe0ecb5e3cff6fa79a380fa97c8f7781a6d08cd429418fe", size = 5113918, upload-time = "2025-09-09T15:56:34.175Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/81/0a/afa51697e9fb74642f231ea36aca80fa17c8fb89f7a82abd5174023c3960/numpy-2.3.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:067e3d7159a5d8f8a0b46ee11148fc35ca9b21f61e3c49fbd0a027450e65a33b", size = 6647922, upload-time = "2025-09-09T15:56:36.149Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/f5/122d9cdb3f51c520d150fef6e87df9279e33d19a9611a87c0d2cf78a89f4/numpy-2.3.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1c02d0629d25d426585fb2e45a66154081b9fa677bc92a881ff1d216bc9919a8", size = 14281991, upload-time = "2025-09-09T15:56:40.548Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/64/7de3c91e821a2debf77c92962ea3fe6ac2bc45d0778c1cbe15d4fce2fd94/numpy-2.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9192da52b9745f7f0766531dcfa978b7763916f158bb63bdb8a1eca0068ab20", size = 16641643, upload-time = "2025-09-09T15:56:43.343Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/e4/961a5fa681502cd0d68907818b69f67542695b74e3ceaa513918103b7e80/numpy-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cd7de500a5b66319db419dc3c345244404a164beae0d0937283b907d8152e6ea", size = 16056787, upload-time = "2025-09-09T15:56:46.141Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/26/92c912b966e47fbbdf2ad556cb17e3a3088e2e1292b9833be1dfa5361a1a/numpy-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:93d4962d8f82af58f0b2eb85daaf1b3ca23fe0a85d0be8f1f2b7bb46034e56d7", size = 18579598, upload-time = "2025-09-09T15:56:49.844Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/b6/fc8f82cb3520768718834f310c37d96380d9dc61bfdaf05fe5c0b7653e01/numpy-2.3.3-cp312-cp312-win32.whl", hash = "sha256:5534ed6b92f9b7dca6c0a19d6df12d41c68b991cef051d108f6dbff3babc4ebf", size = 6320800, upload-time = "2025-09-09T15:56:52.499Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/ee/de999f2625b80d043d6d2d628c07d0d5555a677a3cf78fdf868d409b8766/numpy-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:497d7cad08e7092dba36e3d296fe4c97708c93daf26643a1ae4b03f6294d30eb", size = 12786615, upload-time = "2025-09-09T15:56:54.422Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/6e/b479032f8a43559c383acb20816644f5f91c88f633d9271ee84f3b3a996c/numpy-2.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:ca0309a18d4dfea6fc6262a66d06c26cfe4640c3926ceec90e57791a82b6eee5", size = 10195936, upload-time = "2025-09-09T15:56:56.541Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/b9/984c2b1ee61a8b803bf63582b4ac4242cf76e2dbd663efeafcb620cc0ccb/numpy-2.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f5415fb78995644253370985342cd03572ef8620b934da27d77377a2285955bf", size = 20949588, upload-time = "2025-09-09T15:56:59.087Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/e4/07970e3bed0b1384d22af1e9912527ecbeb47d3b26e9b6a3bced068b3bea/numpy-2.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d00de139a3324e26ed5b95870ce63be7ec7352171bc69a4cf1f157a48e3eb6b7", size = 14177802, upload-time = "2025-09-09T15:57:01.73Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/c7/477a83887f9de61f1203bad89cf208b7c19cc9fef0cebef65d5a1a0619f2/numpy-2.3.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:9dc13c6a5829610cc07422bc74d3ac083bd8323f14e2827d992f9e52e22cd6a6", size = 5106537, upload-time = "2025-09-09T15:57:03.765Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/47/93b953bd5866a6f6986344d045a207d3f1cfbad99db29f534ea9cee5108c/numpy-2.3.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:d79715d95f1894771eb4e60fb23f065663b2298f7d22945d66877aadf33d00c7", size = 6640743, upload-time = "2025-09-09T15:57:07.921Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/83/377f84aaeb800b64c0ef4de58b08769e782edcefa4fea712910b6f0afd3c/numpy-2.3.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:952cfd0748514ea7c3afc729a0fc639e61655ce4c55ab9acfab14bda4f402b4c", size = 14278881, upload-time = "2025-09-09T15:57:11.349Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/a5/bf3db6e66c4b160d6ea10b534c381a1955dfab34cb1017ea93aa33c70ed3/numpy-2.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5b83648633d46f77039c29078751f80da65aa64d5622a3cd62aaef9d835b6c93", size = 16636301, upload-time = "2025-09-09T15:57:14.245Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/59/1287924242eb4fa3f9b3a2c30400f2e17eb2707020d1c5e3086fe7330717/numpy-2.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b001bae8cea1c7dfdb2ae2b017ed0a6f2102d7a70059df1e338e307a4c78a8ae", size = 16053645, upload-time = "2025-09-09T15:57:16.534Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e6/93/b3d47ed882027c35e94ac2320c37e452a549f582a5e801f2d34b56973c97/numpy-2.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e9aced64054739037d42fb84c54dd38b81ee238816c948c8f3ed134665dcd86", size = 18578179, upload-time = "2025-09-09T15:57:18.883Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/d9/487a2bccbf7cc9d4bfc5f0f197761a5ef27ba870f1e3bbb9afc4bbe3fcc2/numpy-2.3.3-cp313-cp313-win32.whl", hash = "sha256:9591e1221db3f37751e6442850429b3aabf7026d3b05542d102944ca7f00c8a8", size = 6312250, upload-time = "2025-09-09T15:57:21.296Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/b5/263ebbbbcede85028f30047eab3d58028d7ebe389d6493fc95ae66c636ab/numpy-2.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:f0dadeb302887f07431910f67a14d57209ed91130be0adea2f9793f1a4f817cf", size = 12783269, upload-time = "2025-09-09T15:57:23.034Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/75/67b8ca554bbeaaeb3fac2e8bce46967a5a06544c9108ec0cf5cece559b6c/numpy-2.3.3-cp313-cp313-win_arm64.whl", hash = "sha256:3c7cf302ac6e0b76a64c4aecf1a09e51abd9b01fc7feee80f6c43e3ab1b1dbc5", size = 10195314, upload-time = "2025-09-09T15:57:25.045Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/d0/0d1ddec56b162042ddfafeeb293bac672de9b0cfd688383590090963720a/numpy-2.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:eda59e44957d272846bb407aad19f89dc6f58fecf3504bd144f4c5cf81a7eacc", size = 21048025, upload-time = "2025-09-09T15:57:27.257Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/9e/1996ca6b6d00415b6acbdd3c42f7f03ea256e2c3f158f80bd7436a8a19f3/numpy-2.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:823d04112bc85ef5c4fda73ba24e6096c8f869931405a80aa8b0e604510a26bc", size = 14301053, upload-time = "2025-09-09T15:57:30.077Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/24/43da09aa764c68694b76e84b3d3f0c44cb7c18cdc1ba80e48b0ac1d2cd39/numpy-2.3.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:40051003e03db4041aa325da2a0971ba41cf65714e65d296397cc0e32de6018b", size = 5229444, upload-time = "2025-09-09T15:57:32.733Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/14/50ffb0f22f7218ef8af28dd089f79f68289a7a05a208db9a2c5dcbe123c1/numpy-2.3.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6ee9086235dd6ab7ae75aba5662f582a81ced49f0f1c6de4260a78d8f2d91a19", size = 6738039, upload-time = "2025-09-09T15:57:34.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/52/af46ac0795e09657d45a7f4db961917314377edecf66db0e39fa7ab5c3d3/numpy-2.3.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94fcaa68757c3e2e668ddadeaa86ab05499a70725811e582b6a9858dd472fb30", size = 14352314, upload-time = "2025-09-09T15:57:36.255Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/b1/dc226b4c90eb9f07a3fff95c2f0db3268e2e54e5cce97c4ac91518aee71b/numpy-2.3.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da1a74b90e7483d6ce5244053399a614b1d6b7bc30a60d2f570e5071f8959d3e", size = 16701722, upload-time = "2025-09-09T15:57:38.622Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/9d/9d8d358f2eb5eced14dba99f110d83b5cd9a4460895230f3b396ad19a323/numpy-2.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2990adf06d1ecee3b3dcbb4977dfab6e9f09807598d647f04d385d29e7a3c3d3", size = 16132755, upload-time = "2025-09-09T15:57:41.16Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/27/b3922660c45513f9377b3fb42240bec63f203c71416093476ec9aa0719dc/numpy-2.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ed635ff692483b8e3f0fcaa8e7eb8a75ee71aa6d975388224f70821421800cea", size = 18651560, upload-time = "2025-09-09T15:57:43.459Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/8e/3ab61a730bdbbc201bb245a71102aa609f0008b9ed15255500a99cd7f780/numpy-2.3.3-cp313-cp313t-win32.whl", hash = "sha256:a333b4ed33d8dc2b373cc955ca57babc00cd6f9009991d9edc5ddbc1bac36bcd", size = 6442776, upload-time = "2025-09-09T15:57:45.793Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/3a/e22b766b11f6030dc2decdeff5c2fb1610768055603f9f3be88b6d192fb2/numpy-2.3.3-cp313-cp313t-win_amd64.whl", hash = "sha256:4384a169c4d8f97195980815d6fcad04933a7e1ab3b530921c3fef7a1c63426d", size = 12927281, upload-time = "2025-09-09T15:57:47.492Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/42/c2e2bc48c5e9b2a83423f99733950fbefd86f165b468a3d85d52b30bf782/numpy-2.3.3-cp313-cp313t-win_arm64.whl", hash = "sha256:75370986cc0bc66f4ce5110ad35aae6d182cc4ce6433c40ad151f53690130bf1", size = 10265275, upload-time = "2025-09-09T15:57:49.647Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/01/342ad585ad82419b99bcf7cebe99e61da6bedb89e213c5fd71acc467faee/numpy-2.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cd052f1fa6a78dee696b58a914b7229ecfa41f0a6d96dc663c1220a55e137593", size = 20951527, upload-time = "2025-09-09T15:57:52.006Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/d8/204e0d73fc1b7a9ee80ab1fe1983dd33a4d64a4e30a05364b0208e9a241a/numpy-2.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:414a97499480067d305fcac9716c29cf4d0d76db6ebf0bf3cbce666677f12652", size = 14186159, upload-time = "2025-09-09T15:57:54.407Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/af/f11c916d08f3a18fb8ba81ab72b5b74a6e42ead4c2846d270eb19845bf74/numpy-2.3.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:50a5fe69f135f88a2be9b6ca0481a68a136f6febe1916e4920e12f1a34e708a7", size = 5114624, upload-time = "2025-09-09T15:57:56.5Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/11/0ed919c8381ac9d2ffacd63fd1f0c34d27e99cab650f0eb6f110e6ae4858/numpy-2.3.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:b912f2ed2b67a129e6a601e9d93d4fa37bef67e54cac442a2f588a54afe5c67a", size = 6642627, upload-time = "2025-09-09T15:57:58.206Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/83/deb5f77cb0f7ba6cb52b91ed388b47f8f3c2e9930d4665c600408d9b90b9/numpy-2.3.3-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9e318ee0596d76d4cb3d78535dc005fa60e5ea348cd131a51e99d0bdbe0b54fe", size = 14296926, upload-time = "2025-09-09T15:58:00.035Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/77/cc/70e59dcb84f2b005d4f306310ff0a892518cc0c8000a33d0e6faf7ca8d80/numpy-2.3.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce020080e4a52426202bdb6f7691c65bb55e49f261f31a8f506c9f6bc7450421", size = 16638958, upload-time = "2025-09-09T15:58:02.738Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/5a/b2ab6c18b4257e099587d5b7f903317bd7115333ad8d4ec4874278eafa61/numpy-2.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:e6687dc183aa55dae4a705b35f9c0f8cb178bcaa2f029b241ac5356221d5c021", size = 16071920, upload-time = "2025-09-09T15:58:05.029Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/f1/8b3fdc44324a259298520dd82147ff648979bed085feeacc1250ef1656c0/numpy-2.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d8f3b1080782469fdc1718c4ed1d22549b5fb12af0d57d35e992158a772a37cf", size = 18577076, upload-time = "2025-09-09T15:58:07.745Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/a1/b87a284fb15a42e9274e7fcea0dad259d12ddbf07c1595b26883151ca3b4/numpy-2.3.3-cp314-cp314-win32.whl", hash = "sha256:cb248499b0bc3be66ebd6578b83e5acacf1d6cb2a77f2248ce0e40fbec5a76d0", size = 6366952, upload-time = "2025-09-09T15:58:10.096Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/5f/1816f4d08f3b8f66576d8433a66f8fa35a5acfb3bbd0bf6c31183b003f3d/numpy-2.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:691808c2b26b0f002a032c73255d0bd89751425f379f7bcd22d140db593a96e8", size = 12919322, upload-time = "2025-09-09T15:58:12.138Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/de/072420342e46a8ea41c324a555fa90fcc11637583fb8df722936aed1736d/numpy-2.3.3-cp314-cp314-win_arm64.whl", hash = "sha256:9ad12e976ca7b10f1774b03615a2a4bab8addce37ecc77394d8e986927dc0dfe", size = 10478630, upload-time = "2025-09-09T15:58:14.64Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/df/ee2f1c0a9de7347f14da5dd3cd3c3b034d1b8607ccb6883d7dd5c035d631/numpy-2.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9cc48e09feb11e1db00b320e9d30a4151f7369afb96bd0e48d942d09da3a0d00", size = 21047987, upload-time = "2025-09-09T15:58:16.889Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/92/9453bdc5a4e9e69cf4358463f25e8260e2ffc126d52e10038b9077815989/numpy-2.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:901bf6123879b7f251d3631967fd574690734236075082078e0571977c6a8e6a", size = 14301076, upload-time = "2025-09-09T15:58:20.343Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/77/1447b9eb500f028bb44253105bd67534af60499588a5149a94f18f2ca917/numpy-2.3.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:7f025652034199c301049296b59fa7d52c7e625017cae4c75d8662e377bf487d", size = 5229491, upload-time = "2025-09-09T15:58:22.481Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/f9/d72221b6ca205f9736cb4b2ce3b002f6e45cd67cd6a6d1c8af11a2f0b649/numpy-2.3.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:533ca5f6d325c80b6007d4d7fb1984c303553534191024ec6a524a4c92a5935a", size = 6737913, upload-time = "2025-09-09T15:58:24.569Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/5f/d12834711962ad9c46af72f79bb31e73e416ee49d17f4c797f72c96b6ca5/numpy-2.3.3-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0edd58682a399824633b66885d699d7de982800053acf20be1eaa46d92009c54", size = 14352811, upload-time = "2025-09-09T15:58:26.416Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/0d/fdbec6629d97fd1bebed56cd742884e4eead593611bbe1abc3eb40d304b2/numpy-2.3.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:367ad5d8fbec5d9296d18478804a530f1191e24ab4d75ab408346ae88045d25e", size = 16702689, upload-time = "2025-09-09T15:58:28.831Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/09/0a35196dc5575adde1eb97ddfbc3e1687a814f905377621d18ca9bc2b7dd/numpy-2.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8f6ac61a217437946a1fa48d24c47c91a0c4f725237871117dea264982128097", size = 16133855, upload-time = "2025-09-09T15:58:31.349Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/ca/c9de3ea397d576f1b6753eaa906d4cdef1bf97589a6d9825a349b4729cc2/numpy-2.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:179a42101b845a816d464b6fe9a845dfaf308fdfc7925387195570789bb2c970", size = 18652520, upload-time = "2025-09-09T15:58:33.762Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/c2/e5ed830e08cd0196351db55db82f65bc0ab05da6ef2b72a836dcf1936d2f/numpy-2.3.3-cp314-cp314t-win32.whl", hash = "sha256:1250c5d3d2562ec4174bce2e3a1523041595f9b651065e4a4473f5f48a6bc8a5", size = 6515371, upload-time = "2025-09-09T15:58:36.04Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/c7/b0f6b5b67f6788a0725f744496badbb604d226bf233ba716683ebb47b570/numpy-2.3.3-cp314-cp314t-win_amd64.whl", hash = "sha256:b37a0b2e5935409daebe82c1e42274d30d9dd355852529eab91dab8dcca7419f", size = 13112576, upload-time = "2025-09-09T15:58:37.927Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/b9/33bba5ff6fb679aa0b1f8a07e853f002a6b04b9394db3069a1270a7784ca/numpy-2.3.3-cp314-cp314t-win_arm64.whl", hash = "sha256:78c9f6560dc7e6b3990e32df7ea1a50bbd0e2a111e05209963f5ddcab7073b0b", size = 10545953, upload-time = "2025-09-09T15:58:40.576Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/12/8f2020a8e8b8383ac0177dc9570aad031a3beb12e38847f7129bacd96228/numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218", size = 20335901, upload-time = "2024-02-05T23:55:32.801Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/5b/ca6c8bd14007e5ca171c7c03102d17b4f4e0ceb53957e8c44343a9546dcc/numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b", size = 13685868, upload-time = "2024-02-05T23:55:56.28Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/f8/97f10e6755e2a7d027ca783f63044d5b1bc1ae7acb12afe6a9b4286eac17/numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b", size = 13925109, upload-time = "2024-02-05T23:56:20.368Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/50/de23fde84e45f5c4fda2488c759b69990fd4512387a8632860f3ac9cd225/numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed", size = 17950613, upload-time = "2024-02-05T23:56:56.054Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/0c/9c603826b6465e82591e05ca230dfc13376da512b25ccd0894709b054ed0/numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a", size = 13572172, upload-time = "2024-02-05T23:57:21.56Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/8c/2ba3902e1a0fc1c74962ea9bb33a534bb05984ad7ff9515bf8d07527cadd/numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0", size = 17786643, upload-time = "2024-02-05T23:57:56.585Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/28/4a/46d9e65106879492374999e76eb85f87b15328e06bd1550668f79f7b18c6/numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110", size = 5677803, upload-time = "2024-02-05T23:58:08.963Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/16/2e/86f24451c2d530c88daf997cb8d6ac622c1d40d19f5a031ed68a4b73a374/numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818", size = 15517754, upload-time = "2024-02-05T23:58:36.364Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
Reference in New Issue
Block a user