1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192 |
- import logging
- import torch
- import torchvision.transforms as T
- from torch.utils.data import DataLoader
- # from datasets.sampler import RandomIdentitySampler
- # from datasets.sampler_ddp import RandomIdentitySampler_DDP
- from utils.comm import get_world_size
- from .cuhkpedes import CUHKPEDES
- from .bases import ImageDataset, TextDataset, ImageTextDataset, ImageTextMLMDataset
- # __factory = {'CUHK-PEDES': CUHKPEDES, 'ICFG-PEDES': ICFGPEDES, 'RSTPReid': RSTPReid}
- __factory = {'CUHK-PEDES': CUHKPEDES}
- def build_transforms(img_size=(384, 128), aug=False, is_train=True):
- height, width = img_size
- mean = [0.48145466, 0.4578275, 0.40821073]
- std = [0.26862954, 0.26130258, 0.27577711]
- if not is_train:
- transform = T.Compose([
- T.Resize((height, width)),
- T.ToTensor(),
- T.Normalize(mean=mean, std=std),
- ])
- return transform
- # transform for training
- if aug:
- transform = T.Compose([
- T.Resize((height, width)),
- T.RandomHorizontalFlip(0.5),
- T.Pad(10),
- T.RandomCrop((height, width)),
- T.ToTensor(),
- T.Normalize(mean=mean, std=std),
- T.RandomErasing(scale=(0.02, 0.4), value=mean),
- ])
- else:
- transform = T.Compose([
- T.Resize((height, width)),
- T.RandomHorizontalFlip(0.5),
- T.ToTensor(),
- T.Normalize(mean=mean, std=std),
- ])
- return transform
- def collate(batch):
- keys = set([key for b in batch for key in b.keys()])
- # turn list of dicts data structure to dict of lists data structure
- dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys}
- batch_tensor_dict = {}
- for k, v in dict_batch.items():
- if isinstance(v[0], int):
- batch_tensor_dict.update({k: torch.tensor(v)})
- elif torch.is_tensor(v[0]):
- batch_tensor_dict.update({k: torch.stack(v)})
- else:
- raise TypeError(f"Unexpect data type: {type(v[0])} in a batch.")
- return batch_tensor_dict
- def build_dataloader(args, tranforms=None):
- logger = logging.getLogger("IRRA.dataset")
- num_workers = args.data.num_workers
- dataset = __factory[args.data.dataset.meta.cuhkpedes_val.name](root=args.data.dataset.meta.cuhkpedes_val.raw_path)
- num_classes = len(dataset.train_id_container)
- val_transforms = build_transforms(img_size=(args.data.img_aug.img_size * 3, args.data.img_aug.img_size),
- is_train=False)
- # use test set as validate set
- ds = dataset.val
- val_img_set = ImageDataset(ds['image_pids'], ds['img_paths'],
- val_transforms)
- val_txt_set = TextDataset(ds['caption_pids'],
- ds['captions'],
- text_length=args.data.text_aug.max_seq_len)
- val_img_loader = DataLoader(val_img_set,
- batch_size=args.data.batch_size,
- shuffle=False,
- num_workers=num_workers)
- val_txt_loader = DataLoader(val_txt_set,
- batch_size=args.data.batch_size,
- shuffle=False,
- num_workers=num_workers)
- return val_img_loader, val_txt_loader, num_classes
|