main_group_vit.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. # -------------------------------------------------------------------------
  2. # Swin Transformer
  3. # Copyright (c) 2021 Microsoft
  4. #
  5. # MIT License
  6. #
  7. # Permission is hereby granted, free of charge, to any person obtaining a copy
  8. # of this software and associated documentation files (the "Software"), to deal
  9. # in the Software without restriction, including without limitation the rights
  10. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. # copies of the Software, and to permit persons to whom the Software is
  12. # furnished to do so, subject to the following conditions:
  13. #
  14. # The above copyright notice and this permission notice shall be included in all
  15. # copies or substantial portions of the Software.
  16. #
  17. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23. # SOFTWARE
  24. #
  25. # Written by Ze Liu, Zhenda Xie
  26. # Modified by Jiarui Xu
  27. # -------------------------------------------------------------------------
  28. import argparse
  29. import datetime
  30. import os
  31. import os.path as osp
  32. import time
  33. from collections import defaultdict
  34. import torch
  35. import torch.backends.cudnn as cudnn
  36. import torch.distributed as dist
  37. import torch.multiprocessing as mp
  38. from datasets import build_loader, build_text_transform, imagenet_classes
  39. from mmcv.parallel import MMDistributedDataParallel
  40. from mmcv.runner import get_dist_info, init_dist, set_random_seed
  41. from mmcv.utils import collect_env, get_git_hash
  42. from mmseg.apis import multi_gpu_test
  43. from models import build_model
  44. from omegaconf import OmegaConf, read_write
  45. from segmentation.evaluation import build_seg_dataloader, build_seg_dataset, build_seg_inference
  46. from datasets.build import build_dataloader
  47. from timm.utils import AverageMeter, accuracy
  48. from utils import (auto_resume_helper, build_dataset_class_tokens, build_optimizer, build_scheduler, data2cuda,
  49. get_config, get_grad_norm, get_logger, load_checkpoint, parse_losses, reduce_tensor, save_checkpoint)
  50. from tools.cfg2arg import cfg2arg
  51. try:
  52. # noinspection PyUnresolvedReferences
  53. from apex import amp
  54. except ImportError:
  55. amp = None
  56. def parse_args():
  57. parser = argparse.ArgumentParser('GroupViT training and evaluation script')
  58. parser.add_argument('--cfg', type=str, required=True, help='path to config file')
  59. parser.add_argument('--opts', help="Modify config options by adding 'KEY=VALUE' list. ", default=None, nargs='+')
  60. # easy config modification
  61. parser.add_argument('--batch-size', type=int, help='batch size for single GPU')
  62. parser.add_argument('--resume', help='resume from checkpoint')
  63. parser.add_argument(
  64. '--amp-opt-level',
  65. type=str,
  66. default='O1',
  67. choices=['O0', 'O1', 'O2'],
  68. help='mixed precision opt level, if O0, no amp is used')
  69. parser.add_argument(
  70. '--output', type=str, help='root of output folder, '
  71. 'the full path is <output>/<model_name>/<tag>')
  72. parser.add_argument('--tag', type=str, help='tag of experiment')
  73. parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
  74. parser.add_argument('--wandb', action='store_true', help='Use W&B to log experiments')
  75. parser.add_argument('--keep', type=int, help='Maximum checkpoint to keep')
  76. # distributed training
  77. parser.add_argument('--local_rank', type=int, required=True, help='local rank for DistributedDataParallel')
  78. args = parser.parse_args()
  79. return args
  80. def train(cfg):
  81. if cfg.wandb and dist.get_rank() == 0:
  82. import wandb
  83. wandb.init(
  84. project='group_vit',
  85. name=osp.join(cfg.model_name, cfg.tag),
  86. dir=cfg.output,
  87. config=OmegaConf.to_container(cfg, resolve=True),
  88. resume=cfg.checkpoint.auto_resume)
  89. else:
  90. wandb = None
  91. # waiting wandb init
  92. dist.barrier()
  93. dataset_train, dataset_val, \
  94. data_loader_train, data_loader_val = build_loader(cfg.data)
  95. data_loader_seg = build_seg_dataloader(build_seg_dataset(cfg.evaluate.seg))
  96. print("\n\n\n")
  97. print(cfg)
  98. print("\n\n\n")
  99. # get image-text pair datasets dataloader
  100. # train_loader, val_img_loader, val_txt_loader, num_classes = build_dataloader(cfg)
  101. val_img_loader, val_txt_loader, num_classes = build_dataloader(cfg)
  102. logger = get_logger()
  103. logger.info(f'Creating model:{cfg.model.type}/{cfg.model_name}')
  104. model = build_model(cfg.model)
  105. # load_checkpoint(cfg, model, None, None)
  106. # 冻结所有层
  107. for param in model.parameters():
  108. param.requires_grad = False
  109. # 如果你只想冻结特定的层,可以按照以下方式进行
  110. # 例如,冻结所有的 img_projector 层
  111. for param in model.img_projector.parameters():
  112. param.requires_grad = True
  113. # 如果你只想冻结特定的层,可以按照以下方式进行
  114. # 例如,冻结所有的 text_projector 层
  115. for param in model.text_projector.parameters():
  116. param.requires_grad = True
  117. model.cuda()
  118. logger.info(str(model))
  119. optimizer = build_optimizer(cfg.train, model)
  120. if cfg.train.amp_opt_level != 'O0':
  121. model, optimizer = amp.initialize(model, optimizer, opt_level=cfg.train.amp_opt_level)
  122. model = MMDistributedDataParallel(model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
  123. model_without_ddp = model.module
  124. n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
  125. logger.info(f'number of params: {n_parameters}')
  126. lr_scheduler = build_scheduler(cfg.train, optimizer, len(data_loader_train))
  127. if cfg.checkpoint.auto_resume:
  128. resume_file = auto_resume_helper(cfg.output)
  129. if resume_file:
  130. if cfg.checkpoint.resume:
  131. logger.warning(f'auto-resume changing resume file from {cfg.checkpoint.resume} to {resume_file}')
  132. with read_write(cfg):
  133. cfg.checkpoint.resume = resume_file
  134. logger.info(f'auto resuming from {resume_file}')
  135. else:
  136. logger.info(f'no checkpoint found in {cfg.output}, ignoring auto resume')
  137. max_accuracy = max_miou = max_rank1 = 0.0
  138. max_metrics = {'max_accuracy': max_accuracy, 'max_miou': max_miou, 'max_rank1': max_rank1}
  139. if cfg.checkpoint.resume:
  140. max_metrics = load_checkpoint(cfg, model_without_ddp, optimizer, lr_scheduler)
  141. max_accuracy, max_miou = max_metrics['max_accuracy'], max_metrics['max_miou']
  142. if 'cls' in cfg.evaluate.task:
  143. acc1, acc5, loss = validate_cls(cfg, data_loader_val, model)
  144. logger.info(f'Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%')
  145. if 'seg' in cfg.evaluate.task:
  146. miou = validate_seg(cfg, data_loader_seg, model)
  147. logger.info(f'mIoU of the network on the {len(data_loader_seg.dataset)} test images: {miou:.2f}%')
  148. if 'reid' in cfg.evaluate.task:
  149. mrank1 = validate_reid(cfg, data_loader_reid, model)
  150. logger.info(f'Rank1 of the network on the {len(data_loader_reid)} test images: {mrank1:.2f}%')
  151. if cfg.evaluate.eval_only:
  152. return
  153. logger.info('Start training')
  154. start_time = time.time()
  155. for epoch in range(cfg.train.start_epoch, cfg.train.epochs):
  156. loss_train_dict = train_one_epoch(cfg, model, data_loader_train, optimizer, epoch, lr_scheduler)
  157. if dist.get_rank() == 0 and (epoch % cfg.checkpoint.save_freq == 0 or epoch == (cfg.train.epochs - 1)):
  158. save_checkpoint(cfg, epoch, model_without_ddp, {
  159. 'max_accuracy': max_accuracy,
  160. 'max_miou': max_miou,
  161. 'max_rank1': max_rank1
  162. }, optimizer, lr_scheduler)
  163. dist.barrier()
  164. loss_train = loss_train_dict['total_loss']
  165. logger.info(f'Avg loss of the network on the {len(dataset_train)} train images: {loss_train:.2f}')
  166. # evaluate
  167. if (epoch % cfg.evaluate.eval_freq == 0 or epoch == (cfg.train.epochs - 1)):
  168. if 'cls' in cfg.evaluate.task:
  169. acc1, acc5, loss = validate_cls(cfg, data_loader_val, model)
  170. logger.info(f'Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%')
  171. max_metrics['max_accuracy'] = max(max_metrics['max_accuracy'], acc1)
  172. if cfg.evaluate.cls.save_best and dist.get_rank() == 0 and acc1 > max_accuracy:
  173. save_checkpoint(
  174. cfg, epoch, model_without_ddp, max_metrics, optimizer, lr_scheduler, suffix='best_acc1')
  175. dist.barrier()
  176. max_accuracy = max_metrics['max_accuracy']
  177. logger.info(f'Max accuracy: {max_accuracy:.2f}%')
  178. if 'seg' in cfg.evaluate.task:
  179. miou = validate_seg(cfg, data_loader_seg, model)
  180. logger.info(f'mIoU of the network on the {len(data_loader_seg.dataset)} test images: {miou:.2f}%')
  181. max_metrics['max_miou'] = max(max_metrics['max_miou'], miou)
  182. if cfg.evaluate.seg.save_best and dist.get_rank() == 0 and miou > max_miou:
  183. save_checkpoint(
  184. cfg, epoch, model_without_ddp, max_metrics, optimizer, lr_scheduler, suffix='best_miou')
  185. dist.barrier()
  186. max_miou = max_metrics['max_miou']
  187. logger.info(f'Max mIoU: {max_miou:.2f}%')
  188. if 'reid' in cfg.evaluate.task:
  189. mrank1 = validate_reid(cfg, data_loader_reid, model)
  190. logger.info(f'mRank1 of the network on the {len(data_loader_reid)} test images: {mrank1:.2f}%')
  191. max_metrics['max_rank1'] = max(max_metrics['max_rank1'], mrank1)
  192. if cfg.evaluate.reid.save_best and dist.get_rank() == 0 and mrank1 > max_rank1:
  193. save_checkpoint(
  194. cfg, epoch, model_without_ddp, max_metrics, optimizer, lr_scheduler, suffix='best_rank1')
  195. dist.barrier()
  196. max_rank1 = max_metrics['max_rank1']
  197. logger.info(f'Max mRank1: {max_rank1:.2f}%')
  198. if wandb is not None:
  199. log_stat = {f'epoch/train_{k}': v for k, v in loss_train_dict.items()}
  200. log_stat.update({
  201. 'epoch/val_acc1': acc1,
  202. 'epoch/val_acc5': acc5,
  203. 'epoch/val_loss': loss,
  204. 'epoch/val_miou': miou,
  205. 'epoch/val_rank1': mrank1,
  206. 'epoch/epoch': epoch,
  207. 'epoch/n_parameters': n_parameters
  208. })
  209. wandb.log(log_stat)
  210. total_time = time.time() - start_time
  211. total_time_str = str(datetime.timedelta(seconds=int(total_time)))
  212. logger.info('Training time {}'.format(total_time_str))
  213. dist.barrier()
  214. def train_one_epoch(config, model, data_loader, optimizer, epoch, lr_scheduler):
  215. logger = get_logger()
  216. dist.barrier()
  217. model.train()
  218. optimizer.zero_grad()
  219. if config.wandb and dist.get_rank() == 0:
  220. import wandb
  221. else:
  222. wandb = None
  223. num_steps = len(data_loader)
  224. batch_time = AverageMeter()
  225. loss_meter = AverageMeter()
  226. norm_meter = AverageMeter()
  227. log_vars_meters = defaultdict(AverageMeter)
  228. start = time.time()
  229. end = time.time()
  230. for idx, samples in enumerate(data_loader):
  231. batch_size = config.data.batch_size
  232. losses = model(**samples)
  233. loss, log_vars = parse_losses(losses)
  234. if config.train.accumulation_steps > 1:
  235. loss = loss / config.train.accumulation_steps
  236. if config.train.amp_opt_level != 'O0':
  237. with amp.scale_loss(loss, optimizer) as scaled_loss:
  238. scaled_loss.backward()
  239. if config.train.clip_grad:
  240. grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.train.clip_grad)
  241. else:
  242. grad_norm = get_grad_norm(amp.master_params(optimizer))
  243. else:
  244. loss.backward()
  245. if config.train.clip_grad:
  246. grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.train.clip_grad)
  247. else:
  248. grad_norm = get_grad_norm(model.parameters())
  249. if (idx + 1) % config.train.accumulation_steps == 0:
  250. optimizer.step()
  251. optimizer.zero_grad()
  252. lr_scheduler.step_update(epoch * num_steps + idx)
  253. else:
  254. optimizer.zero_grad()
  255. if config.train.amp_opt_level != 'O0':
  256. with amp.scale_loss(loss, optimizer) as scaled_loss:
  257. scaled_loss.backward()
  258. if config.train.clip_grad:
  259. grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.train.clip_grad)
  260. else:
  261. grad_norm = get_grad_norm(amp.master_params(optimizer))
  262. else:
  263. loss.backward()
  264. if config.train.clip_grad:
  265. grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.train.clip_grad)
  266. else:
  267. grad_norm = get_grad_norm(model.parameters())
  268. optimizer.step()
  269. lr_scheduler.step_update(epoch * num_steps + idx)
  270. torch.cuda.synchronize()
  271. loss_meter.update(loss.item(), batch_size)
  272. for loss_name in log_vars:
  273. log_vars_meters[loss_name].update(log_vars[loss_name], batch_size)
  274. norm_meter.update(grad_norm)
  275. batch_time.update(time.time() - end)
  276. end = time.time()
  277. if idx % config.print_freq == 0:
  278. lr = optimizer.param_groups[0]['lr']
  279. memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
  280. etas = batch_time.avg * (num_steps - idx)
  281. log_vars_str = '\t'.join(f'{n} {m.val:.4f} ({m.avg:.4f})' for n, m in log_vars_meters.items())
  282. logger.info(f'Train: [{epoch}/{config.train.epochs}][{idx}/{num_steps}]\t'
  283. f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
  284. f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
  285. f'total_loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
  286. f'{log_vars_str}\t'
  287. f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
  288. f'mem {memory_used:.0f}MB')
  289. if wandb is not None:
  290. log_stat = {f'iter/train_{n}': m.avg for n, m in log_vars_meters.items()}
  291. log_stat['iter/train_total_loss'] = loss_meter.avg
  292. log_stat['iter/learning_rate'] = lr
  293. wandb.log(log_stat)
  294. epoch_time = time.time() - start
  295. logger.info(f'EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}')
  296. result_dict = dict(total_loss=loss_meter.avg)
  297. for n, m in log_vars_meters.items():
  298. result_dict[n] = m.avg
  299. dist.barrier()
  300. return result_dict
  301. @torch.no_grad()
  302. def validate_cls(config, data_loader, model):
  303. logger = get_logger()
  304. dist.barrier()
  305. criterion = torch.nn.CrossEntropyLoss()
  306. model.eval()
  307. batch_time = AverageMeter()
  308. loss_meter = AverageMeter()
  309. acc1_meter = AverageMeter()
  310. acc5_meter = AverageMeter()
  311. text_transform = build_text_transform(False, config.data.text_aug, with_dc=False)
  312. end = time.time()
  313. logger.info('Building zero shot classifier')
  314. text_embedding = data2cuda(
  315. model.module.build_text_embedding(
  316. build_dataset_class_tokens(text_transform, config.evaluate.cls.template, imagenet_classes)))
  317. logger.info('Zero shot classifier built')
  318. for idx, samples in enumerate(data_loader):
  319. target = samples.pop('target').data[0].cuda()
  320. target = data2cuda(target)
  321. # compute output
  322. output = model(**samples, text=text_embedding)
  323. # measure accuracy and record loss
  324. loss = criterion(output, target)
  325. acc1, acc5 = accuracy(output, target, topk=(1, 5))
  326. acc1 = reduce_tensor(acc1)
  327. acc5 = reduce_tensor(acc5)
  328. loss = reduce_tensor(loss)
  329. loss_meter.update(loss.item(), target.size(0))
  330. acc1_meter.update(acc1.item(), target.size(0))
  331. acc5_meter.update(acc5.item(), target.size(0))
  332. # measure elapsed time
  333. batch_time.update(time.time() - end)
  334. end = time.time()
  335. if idx % config.print_freq == 0:
  336. memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
  337. logger.info(f'Test: [{idx}/{len(data_loader)}]\t'
  338. f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
  339. f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
  340. f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
  341. f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
  342. f'Mem {memory_used:.0f}MB')
  343. logger.info('Clearing zero shot classifier')
  344. torch.cuda.empty_cache()
  345. logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
  346. dist.barrier()
  347. return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
  348. @torch.no_grad()
  349. def validate_seg(config, data_loader, model):
  350. logger = get_logger()
  351. dist.barrier()
  352. model.eval()
  353. if hasattr(model, 'module'):
  354. model_without_ddp = model.module
  355. else:
  356. model_without_ddp = model
  357. text_transform = build_text_transform(False, config.data.text_aug, with_dc=False)
  358. seg_model = build_seg_inference(model_without_ddp, data_loader.dataset, text_transform, config.evaluate.seg)
  359. mmddp_model = MMDistributedDataParallel(
  360. seg_model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
  361. mmddp_model.eval()
  362. results = multi_gpu_test(
  363. model=mmddp_model,
  364. data_loader=data_loader,
  365. tmpdir=None,
  366. gpu_collect=True,
  367. efficient_test=False,
  368. pre_eval=True,
  369. format_only=False)
  370. if dist.get_rank() == 0:
  371. metric = [data_loader.dataset.evaluate(results, metric='mIoU')]
  372. else:
  373. metric = [None]
  374. dist.broadcast_object_list(metric)
  375. miou_result = metric[0]['mIoU'] * 100
  376. torch.cuda.empty_cache()
  377. logger.info(f'Eval Seg mIoU {miou_result:.2f}')
  378. dist.barrier()
  379. return miou_result
  380. @torch.no_grad()
  381. def validate_reid(config, data_loader, model):
  382. print()
  383. def main():
  384. args = parse_args()
  385. cfg = get_config(args)
  386. if cfg.train.amp_opt_level != 'O0':
  387. assert amp is not None, 'amp not installed!'
  388. # start faster ref: https://github.com/open-mmlab/mmdetection/pull/7036
  389. mp.set_start_method('fork', force=True)
  390. init_dist('pytorch')
  391. rank, world_size = get_dist_info()
  392. print(f'RANK and WORLD_SIZE in environ: {rank}/{world_size}')
  393. dist.barrier()
  394. set_random_seed(cfg.seed, use_rank_shift=True)
  395. cudnn.benchmark = True
  396. os.makedirs(cfg.output, exist_ok=True)
  397. logger = get_logger(cfg)
  398. # linear scale the learning rate according to total batch size, may not be optimal
  399. linear_scaled_lr = cfg.train.base_lr * cfg.data.batch_size * world_size / 4096.0
  400. linear_scaled_warmup_lr = cfg.train.warmup_lr * cfg.data.batch_size * world_size / 4096.0
  401. linear_scaled_min_lr = cfg.train.min_lr * cfg.data.batch_size * world_size / 4096.0
  402. # gradient accumulation also need to scale the learning rate
  403. if cfg.train.accumulation_steps > 1:
  404. linear_scaled_lr = linear_scaled_lr * cfg.train.accumulation_steps
  405. linear_scaled_warmup_lr = linear_scaled_warmup_lr * cfg.train.accumulation_steps
  406. linear_scaled_min_lr = linear_scaled_min_lr * cfg.train.accumulation_steps
  407. with read_write(cfg):
  408. logger.info(f'Scale base_lr from {cfg.train.base_lr} to {linear_scaled_lr}')
  409. logger.info(f'Scale warmup_lr from {cfg.train.warmup_lr} to {linear_scaled_warmup_lr}')
  410. logger.info(f'Scale min_lr from {cfg.train.min_lr} to {linear_scaled_min_lr}')
  411. cfg.train.base_lr = linear_scaled_lr
  412. cfg.train.warmup_lr = linear_scaled_warmup_lr
  413. cfg.train.min_lr = linear_scaled_min_lr
  414. if dist.get_rank() == 0:
  415. path = os.path.join(cfg.output, 'config.json')
  416. OmegaConf.save(cfg, path)
  417. logger.info(f'Full config saved to {path}')
  418. # log env info
  419. env_info_dict = collect_env()
  420. env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
  421. dash_line = '-' * 60 + '\n'
  422. logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
  423. logger.info(f'Git hash: {get_git_hash(digits=7)}')
  424. # print config
  425. logger.info(OmegaConf.to_yaml(cfg))
  426. train(cfg)
  427. dist.barrier()
  428. if __name__ == '__main__':
  429. main()