metrics.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. from prettytable import PrettyTable
  2. import torch
  3. import numpy as np
  4. import os
  5. import torch.nn.functional as F
  6. import logging
  7. def rank(similarity, q_pids, g_pids, max_rank=10, get_mAP=True):
  8. if get_mAP:
  9. indices = torch.argsort(similarity, dim=1, descending=True)
  10. else:
  11. # acclerate sort with topk
  12. _, indices = torch.topk(
  13. similarity, k=max_rank, dim=1, largest=True, sorted=True
  14. ) # q * topk
  15. pred_labels = g_pids[indices.cpu()] # q * k
  16. matches = pred_labels.eq(q_pids.view(-1, 1)) # q * k
  17. all_cmc = matches[:, :max_rank].cumsum(1) # cumulative sum
  18. all_cmc[all_cmc > 1] = 1
  19. all_cmc = all_cmc.float().mean(0) * 100
  20. # all_cmc = all_cmc[topk - 1]
  21. if not get_mAP:
  22. return all_cmc, indices
  23. num_rel = matches.sum(1) # q
  24. tmp_cmc = matches.cumsum(1) # q * k
  25. inp = [tmp_cmc[i][match_row.nonzero()[-1]] / (match_row.nonzero()[-1] + 1.) for i, match_row in enumerate(matches)]
  26. mINP = torch.cat(inp).mean() * 100
  27. tmp_cmc = [tmp_cmc[:, i] / (i + 1.0) for i in range(tmp_cmc.shape[1])]
  28. tmp_cmc = torch.stack(tmp_cmc, 1) * matches
  29. AP = tmp_cmc.sum(1) / num_rel # q
  30. mAP = AP.mean() * 100
  31. return all_cmc, mAP, mINP, indices
  32. class Evaluator():
  33. def __init__(self, img_loader, txt_loader):
  34. self.img_loader = img_loader # gallery
  35. self.txt_loader = txt_loader # query
  36. self.logger = logging.getLogger("IRRA.eval")
  37. def _compute_embedding(self, model):
  38. model = model.eval()
  39. device = next(model.parameters()).device
  40. qids, gids, qfeats, gfeats = [], [], [], []
  41. # text
  42. for pid, caption in self.txt_loader:
  43. caption = caption.to(device)
  44. with torch.no_grad():
  45. text_feat = model.encode_text(caption)
  46. qids.append(pid.view(-1)) # flatten
  47. qfeats.append(text_feat)
  48. qids = torch.cat(qids, 0)
  49. qfeats = torch.cat(qfeats, 0)
  50. # image
  51. for pid, img in self.img_loader:
  52. img = img.to(device)
  53. with torch.no_grad():
  54. img_feat = model.encode_image(img)
  55. gids.append(pid.view(-1)) # flatten
  56. gfeats.append(img_feat)
  57. gids = torch.cat(gids, 0)
  58. gfeats = torch.cat(gfeats, 0)
  59. return qfeats, gfeats, qids, gids
  60. def eval(self, model, i2t_metric=False):
  61. qfeats, gfeats, qids, gids = self._compute_embedding(model)
  62. qfeats = F.normalize(qfeats, p=2, dim=1) # text features
  63. gfeats = F.normalize(gfeats, p=2, dim=1) # image features
  64. similarity = qfeats @ gfeats.t()
  65. t2i_cmc, t2i_mAP, t2i_mINP, _ = rank(similarity=similarity, q_pids=qids, g_pids=gids, max_rank=10, get_mAP=True)
  66. t2i_cmc, t2i_mAP, t2i_mINP = t2i_cmc.numpy(), t2i_mAP.numpy(), t2i_mINP.numpy()
  67. table = PrettyTable(["task", "R1", "R5", "R10", "mAP", "mINP"])
  68. table.add_row(['t2i', t2i_cmc[0], t2i_cmc[4], t2i_cmc[9], t2i_mAP, t2i_mINP])
  69. if i2t_metric:
  70. i2t_cmc, i2t_mAP, i2t_mINP, _ = rank(similarity=similarity.t(), q_pids=gids, g_pids=qids, max_rank=10, get_mAP=True)
  71. i2t_cmc, i2t_mAP, i2t_mINP = i2t_cmc.numpy(), i2t_mAP.numpy(), i2t_mINP.numpy()
  72. table.add_row(['i2t', i2t_cmc[0], i2t_cmc[4], i2t_cmc[9], i2t_mAP, i2t_mINP])
  73. # table.float_format = '.4'
  74. table.custom_format["R1"] = lambda f, v: f"{v:.3f}"
  75. table.custom_format["R5"] = lambda f, v: f"{v:.3f}"
  76. table.custom_format["R10"] = lambda f, v: f"{v:.3f}"
  77. table.custom_format["mAP"] = lambda f, v: f"{v:.3f}"
  78. table.custom_format["mINP"] = lambda f, v: f"{v:.3f}"
  79. self.logger.info('\n' + str(table))
  80. return t2i_cmc[0]