diff --git a/.gitignore b/.gitignore
index e34abed4fa..84a9dbbf2c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,5 +10,6 @@ aiServer/runs/detect/exp/2021-08-05 13-11-50 .jpg
/aiServer/TextReID/runs
aiServer/crowdhuman_yolov5m.pt
ai/.env
-frontend/.env
+frontend/.env
server/src/main/java/com/capstone/server/dataInitializer/CCTVInitializer.java
+
diff --git a/ai/TextReID/datasets/cuhkpedes/clip_vocab_vit.npy b/ai/TextReID/datasets/cuhkpedes/clip_vocab_vit.npy
deleted file mode 100644
index d45b29510d..0000000000
Binary files a/ai/TextReID/datasets/cuhkpedes/clip_vocab_vit.npy and /dev/null differ
diff --git a/ai/TextReID/lib/data/datasets/__init__.py b/ai/TextReID/lib/data/datasets/__init__.py
deleted file mode 100644
index e4ee7a6fc9..0000000000
--- a/ai/TextReID/lib/data/datasets/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .concat_dataset import ConcatDataset
-from .cuhkpedes import CUHKPEDESDataset
-
-__all__ = ["ConcatDataset", "CUHKPEDESDataset"]
diff --git a/ai/TextReID/lib/data/datasets/concat_dataset.py b/ai/TextReID/lib/data/datasets/concat_dataset.py
deleted file mode 100644
index 87e054a6bc..0000000000
--- a/ai/TextReID/lib/data/datasets/concat_dataset.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-import bisect
-
-from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
-
-
-class ConcatDataset(_ConcatDataset):
- """
- Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
- method for querying the sizes of the image
- """
-
- def get_idxs(self, idx):
- dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
- if dataset_idx == 0:
- sample_idx = idx
- else:
- sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
- return dataset_idx, sample_idx
-
- def get_id_info(self, idx):
- dataset_idx, sample_idx = self.get_idxs(idx)
- return self.datasets[dataset_idx].get_id_info(sample_idx)
diff --git a/ai/TextReID/lib/data/datasets/cuhkpedes.py b/ai/TextReID/lib/data/datasets/cuhkpedes.py
deleted file mode 100644
index 31707579d8..0000000000
--- a/ai/TextReID/lib/data/datasets/cuhkpedes.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import json
-import os
-
-import torch
-from PIL import Image
-
-from lib.utils.caption import Caption
-
-
-class CUHKPEDESDataset(torch.utils.data.Dataset):
- def __init__(
- self,
- root,
- ann_file,
- use_onehot=True,
- max_length=100,
- transforms=None,
- ):
- self.root = root
- self.use_onehot = use_onehot
- self.max_length = max_length
- self.transforms = transforms
-
- self.img_dir = os.path.join(self.root, "imgs")
- # self.img_dir = self.root
-
- print("loading annotations into memory...")
- dataset = json.load(open(ann_file, "r"))
- self.dataset = dataset["annotations"]
-
- def __getitem__(self, index):
- """
- Args:
- index(int): Index
- Returns:
- tuple: (images, labels, captions)
- """
- data = self.dataset[index]
-
- img_path = data["file_path"]
- img = Image.open(os.path.join(self.img_dir, img_path)).convert("RGB")
-
- if self.use_onehot:
- caption = data["onehot"]
- caption = torch.tensor(caption)
- caption = Caption([caption], max_length=self.max_length, padded=False)
- else:
- caption = data["sentence"]
- caption = Caption(caption)
-
- caption.add_field("img_path", img_path)
-
- label = int(data["id"])
- label = torch.tensor(label)
- caption.add_field("id", label)
-
- if self.transforms is not None:
- img = self.transforms(img)
-
- query = data["sentence"]
-
- return img, caption, index, query
-
- def __len__(self):
- return len(self.dataset)
-
- def get_id_info(self, index):
- image_id = self.dataset[index]["image_id"]
- pid = self.dataset[index]["id"]
- return image_id, pid
diff --git a/ai/TextReID/lib/data/encode/encoding.py b/ai/TextReID/lib/data/encode/encoding.py
index 05133ccd92..ffe109ffcb 100644
--- a/ai/TextReID/lib/data/encode/encoding.py
+++ b/ai/TextReID/lib/data/encode/encoding.py
@@ -1,75 +1,75 @@
-import json
-import re
-import os
-
-
-word_dict = {} # word (str) : encode (int)
-max_onehot = -1 # 없는 단어를 새로 딕셔너리에 추가할 때 필요
-
-
-def load_word_dict(file_path):
- global word_dict
- global max_onehot
-
- if os.path.exists(file_path):
- with open(file_path, "r") as file:
- data = json.load(file)
- word_dict = data["word_dict"]
- max_onehot = data["max_onehot"]
- else:
- word_dict_path = os.path.dirname(os.path.abspath(__file__))+"/word_dict/test.json"
- update_word_dict(word_dict_path)
-
-
-def save_word_dict(file_path):
- global word_dict
- global max_onehot
-
- data = {"word_dict": word_dict, "max_onehot": max_onehot}
-
- with open(file_path, "w") as file:
- json.dump(data, file)
-
-
-def update_word_dict(file_path):
- global word_dict
- global max_onehot
-
- with open(file_path, "r") as file:
- data = json.load(file)
-
- for i in range(len(data["annotations"])):
- words = re.sub(r'[^a-zA-Z0-9\s]', '', data["annotations"][i]["sentence"])
- words = words.split()
- for word, onehot in zip(words, data["annotations"][i]["onehot"]):
- if onehot > max_onehot:
- max_onehot = onehot
- if word.lower() not in word_dict.keys():
- word_dict[word.lower()] = onehot
-
-
-def encode(query):
- global word_dict
- global max_onehot
-
- output = []
- query = re.sub(r'[^a-zA-Z0-9\s]', ' ', query)
-
- for w in query.split():
- try:
- output.append(word_dict[w.lower()])
- except KeyError:
- print("Key %s not found in the dictionary." % w)
- word_dict[w.lower()] = max_onehot + 1
- output.append(word_dict[w.lower()])
- max_onehot += 1
-
- return output
-
-
-def encoder(caption, file_path=os.path.dirname(os.path.abspath(__file__))+"/word_dict/annotations.json"):
- load_word_dict(file_path)
- caption = encode(caption)
- save_word_dict(file_path)
-
+import json
+import re
+import os
+
+
+word_dict = {} # word (str) : encode (int)
+max_onehot = -1 # 없는 단어를 새로 딕셔너리에 추가할 때 필요
+
+
+def load_word_dict(file_path):
+ global word_dict
+ global max_onehot
+
+ if os.path.exists(file_path):
+ with open(file_path, "r") as file:
+ data = json.load(file)
+ word_dict = data["word_dict"]
+ max_onehot = data["max_onehot"]
+ else:
+ word_dict_path = os.path.dirname(os.path.abspath(__file__))+"/word_dict/test.json"
+ update_word_dict(word_dict_path)
+
+
+def save_word_dict(file_path):
+ global word_dict
+ global max_onehot
+
+ data = {"word_dict": word_dict, "max_onehot": max_onehot}
+
+ with open(file_path, "w") as file:
+ json.dump(data, file)
+
+
+def update_word_dict(file_path):
+ global word_dict
+ global max_onehot
+
+ with open(file_path, "r") as file:
+ data = json.load(file)
+
+ for i in range(len(data["annotations"])):
+ words = re.sub(r'[^a-zA-Z0-9\s]', '', data["annotations"][i]["sentence"])
+ words = words.split()
+ for word, onehot in zip(words, data["annotations"][i]["onehot"]):
+ if onehot > max_onehot:
+ max_onehot = onehot
+ if word.lower() not in word_dict.keys():
+ word_dict[word.lower()] = onehot
+
+
+def encode(query):
+ global word_dict
+ global max_onehot
+
+ output = []
+ query = re.sub(r'[^a-zA-Z0-9\s]', ' ', query)
+
+ for w in query.split():
+ try:
+ output.append(word_dict[w.lower()])
+ except KeyError:
+ print("Key %s not found in the dictionary." % w)
+ word_dict[w.lower()] = max_onehot + 1
+ output.append(word_dict[w.lower()])
+ max_onehot += 1
+
+ return output
+
+
+def encoder(caption, file_path=os.path.dirname(os.path.abspath(__file__))+"/word_dict/annotations.json"):
+ load_word_dict(file_path)
+ caption = encode(caption)
+ save_word_dict(file_path)
+
return caption
\ No newline at end of file
diff --git a/ai/TextReID/lib/data/metrics/evaluation.py b/ai/TextReID/lib/data/metrics/evaluation.py
index bde5d46db2..60c63f4378 100644
--- a/ai/TextReID/lib/data/metrics/evaluation.py
+++ b/ai/TextReID/lib/data/metrics/evaluation.py
@@ -1,151 +1,151 @@
-import logging
-import os
-
-import json
-import numpy as np
-import torch
-import torch.nn.functional as F
-from lib.config import cfg
-
-from lib.utils.logger import table_log
-
-
-def rank(similarity, q_pids, g_pids, topk=[1, 5, 10], get_mAP=True):
- max_rank = max(topk)
- if get_mAP:
- indices = torch.argsort(similarity, dim=1, descending=True)
- else:
- # acclerate sort with topk
- _, indices = torch.topk(
- similarity, k=max_rank, dim=1, largest=True, sorted=True
- ) # q * topk
- indices = indices.to(g_pids.device)
- pred_labels = g_pids[indices] # q * k
- matches = pred_labels.eq(q_pids.view(-1, 1)) # q * k
-
- all_cmc = matches[:, :max_rank].cumsum(1)
- all_cmc[all_cmc > 1] = 1
- all_cmc = all_cmc.float().mean(0) * 100
- all_cmc = all_cmc[topk - 1]
-
- if not get_mAP:
- return all_cmc, indices
-
- num_rel = matches.sum(1) # q
- tmp_cmc = matches.cumsum(1) # q * k
- tmp_cmc = [tmp_cmc[:, i] / (i + 1.0) for i in range(tmp_cmc.shape[1])]
- tmp_cmc = torch.stack(tmp_cmc, 1) * matches
- AP = tmp_cmc.sum(1) / num_rel # q
- mAP = AP.mean() * 100
- return all_cmc, mAP, indices
-
-
-def jaccard(a_list, b_list):
- return float(len(set(a_list) & set(b_list))) / float(len(set(a_list) | set(b_list)))
-
-
-def jaccard_mat(row_nn, col_nn):
- jaccard_sim = np.zeros((row_nn.shape[0], col_nn.shape[0]))
- # FIXME: need optimization
- for i in range(row_nn.shape[0]):
- for j in range(col_nn.shape[0]):
- jaccard_sim[i, j] = jaccard(row_nn[i], col_nn[j])
- return torch.from_numpy(jaccard_sim)
-
-
-def k_reciprocal(q_feats, g_feats, neighbor_num=5, alpha=0.05):
- qg_sim = torch.matmul(q_feats, g_feats.t()) # q * g
- gg_sim = torch.matmul(g_feats, g_feats.t()) # g * g
-
- qg_indices = torch.argsort(qg_sim, dim=1, descending=True)
- gg_indices = torch.argsort(gg_sim, dim=1, descending=True)
-
- qg_nn = qg_indices[:, :neighbor_num] # q * n
- gg_nn = gg_indices[:, :neighbor_num] # g * n
-
- jaccard_sim = jaccard_mat(qg_nn.cpu().numpy(), gg_nn.cpu().numpy()) # q * g
- jaccard_sim = jaccard_sim.to(qg_sim.device)
- return alpha * jaccard_sim # q * g
-
-
-def get_unique(image_ids):
- keep_idx = {}
- for idx, image_id in enumerate(image_ids):
- if image_id not in keep_idx.keys():
- keep_idx[image_id] = idx
- return torch.tensor(list(keep_idx.values()))
-
-
-def evaluation(
- dataset,
- predictions,
- output_folder,
- topk,
- cap,
- save_data=True,
- rerank=True,
- search_num=0,
- save_folder = "./output/output.json"
-):
- logger = logging.getLogger("PersonSearch.inference")
- data_dir = os.path.join(output_folder, "inference_data.npz")
-
- if predictions is None:
- inference_data = np.load(data_dir)
- logger.info("Load inference data from {}".format(data_dir))
- image_pid = torch.tensor(inference_data["image_pid"])
- text_pid = torch.tensor(inference_data["text_pid"])
- similarity = torch.tensor(inference_data["similarity"])
- if rerank:
- rvn_mat = torch.tensor(inference_data["rvn_mat"])
- rtn_mat = torch.tensor(inference_data["rtn_mat"])
- else:
- image_ids, pids = [], []
- image_global, text_global = [], []
-
- # FIXME: need optimization
- for idx, prediction in predictions.items():
- image_id, pid = dataset.get_id_info(idx)
- image_ids.append(image_id)
- pids.append(pid)
- image_global.append(prediction[0])
- if len(prediction) == 2:
- # text query를 하나만 넣었으므로, text emgedding은 배치의 제일 처음 이미지에만 들어감
- # 왜냐하면 유사도 검사 시 배치 별로 검사를 했으니까
- text_global.append(prediction[1])
-
- pids = list(map(int, pids))
- image_pid = torch.tensor(pids)
- text_pid = torch.tensor(pids)
- image_global = torch.stack(image_global, dim=0)
- text_global = torch.stack(text_global, dim=0)
-
- keep_idx = get_unique(image_ids)
- image_global = image_global[keep_idx]
- image_pid = image_pid[keep_idx]
-
- image_global = F.normalize(image_global, p=2, dim=1)
- text_global = F.normalize(text_global, p=2, dim=1)
-
- similarity = torch.matmul(text_global, image_global.t())
-
- # top 10 results 반환
- sorted_indices = torch.argsort(similarity[0], descending=True)
- sorted_values = similarity[0][sorted_indices]
- top_k = cfg.TEST.TOP_K
- write = [cap[0]] # 저장할 output
- for index, value in zip(sorted_indices[:top_k], sorted_values[:top_k]):
- # image_id, pid = dataset.get_id_info(idx)
- img, caption, idx, query = dataset.__getitem__(index)
- img_path = caption.get_field("img_path")
- # print(f"Index: {index}, Similarity: {value}, pid: {pid}")
- dict = {"img_path": img_path, "Similarity": value.item()}
- write.append(dict)
-
-
- #저장 폴더 생성
- if not os.path.exists(os.path.dirname(save_folder)):
- os.makedirs(os.path.dirname(save_folder), exist_ok=True)
-
- with open(save_folder, "w", encoding='utf-8') as f:
+import logging
+import os
+
+import json
+import numpy as np
+import torch
+import torch.nn.functional as F
+from lib.config import cfg
+
+from lib.utils.logger import table_log
+
+
+def rank(similarity, q_pids, g_pids, topk=[1, 5, 10], get_mAP=True):
+ max_rank = max(topk)
+ if get_mAP:
+ indices = torch.argsort(similarity, dim=1, descending=True)
+ else:
+ # acclerate sort with topk
+ _, indices = torch.topk(
+ similarity, k=max_rank, dim=1, largest=True, sorted=True
+ ) # q * topk
+ indices = indices.to(g_pids.device)
+ pred_labels = g_pids[indices] # q * k
+ matches = pred_labels.eq(q_pids.view(-1, 1)) # q * k
+
+ all_cmc = matches[:, :max_rank].cumsum(1)
+ all_cmc[all_cmc > 1] = 1
+ all_cmc = all_cmc.float().mean(0) * 100
+ all_cmc = all_cmc[topk - 1]
+
+ if not get_mAP:
+ return all_cmc, indices
+
+ num_rel = matches.sum(1) # q
+ tmp_cmc = matches.cumsum(1) # q * k
+ tmp_cmc = [tmp_cmc[:, i] / (i + 1.0) for i in range(tmp_cmc.shape[1])]
+ tmp_cmc = torch.stack(tmp_cmc, 1) * matches
+ AP = tmp_cmc.sum(1) / num_rel # q
+ mAP = AP.mean() * 100
+ return all_cmc, mAP, indices
+
+
+def jaccard(a_list, b_list):
+ return float(len(set(a_list) & set(b_list))) / float(len(set(a_list) | set(b_list)))
+
+
+def jaccard_mat(row_nn, col_nn):
+ jaccard_sim = np.zeros((row_nn.shape[0], col_nn.shape[0]))
+ # FIXME: need optimization
+ for i in range(row_nn.shape[0]):
+ for j in range(col_nn.shape[0]):
+ jaccard_sim[i, j] = jaccard(row_nn[i], col_nn[j])
+ return torch.from_numpy(jaccard_sim)
+
+
+def k_reciprocal(q_feats, g_feats, neighbor_num=5, alpha=0.05):
+ qg_sim = torch.matmul(q_feats, g_feats.t()) # q * g
+ gg_sim = torch.matmul(g_feats, g_feats.t()) # g * g
+
+ qg_indices = torch.argsort(qg_sim, dim=1, descending=True)
+ gg_indices = torch.argsort(gg_sim, dim=1, descending=True)
+
+ qg_nn = qg_indices[:, :neighbor_num] # q * n
+ gg_nn = gg_indices[:, :neighbor_num] # g * n
+
+ jaccard_sim = jaccard_mat(qg_nn.cpu().numpy(), gg_nn.cpu().numpy()) # q * g
+ jaccard_sim = jaccard_sim.to(qg_sim.device)
+ return alpha * jaccard_sim # q * g
+
+
+def get_unique(image_ids):
+ keep_idx = {}
+ for idx, image_id in enumerate(image_ids):
+ if image_id not in keep_idx.keys():
+ keep_idx[image_id] = idx
+ return torch.tensor(list(keep_idx.values()))
+
+
+def evaluation(
+ dataset,
+ predictions,
+ output_folder,
+ topk,
+ cap,
+ save_data=True,
+ rerank=True,
+ search_num=0,
+ save_folder = "./output/output.json"
+):
+ logger = logging.getLogger("PersonSearch.inference")
+ data_dir = os.path.join(output_folder, "inference_data.npz")
+
+ if predictions is None:
+ inference_data = np.load(data_dir)
+ logger.info("Load inference data from {}".format(data_dir))
+ image_pid = torch.tensor(inference_data["image_pid"])
+ text_pid = torch.tensor(inference_data["text_pid"])
+ similarity = torch.tensor(inference_data["similarity"])
+ if rerank:
+ rvn_mat = torch.tensor(inference_data["rvn_mat"])
+ rtn_mat = torch.tensor(inference_data["rtn_mat"])
+ else:
+ image_ids, pids = [], []
+ image_global, text_global = [], []
+
+ # FIXME: need optimization
+ for idx, prediction in predictions.items():
+ image_id, pid = dataset.get_id_info(idx)
+ image_ids.append(image_id)
+ pids.append(pid)
+ image_global.append(prediction[0])
+ if len(prediction) == 2:
+ # text query를 하나만 넣었으므로, text emgedding은 배치의 제일 처음 이미지에만 들어감
+ # 왜냐하면 유사도 검사 시 배치 별로 검사를 했으니까
+ text_global.append(prediction[1])
+
+ pids = list(map(int, pids))
+ image_pid = torch.tensor(pids)
+ text_pid = torch.tensor(pids)
+ image_global = torch.stack(image_global, dim=0)
+ text_global = torch.stack(text_global, dim=0)
+
+ keep_idx = get_unique(image_ids)
+ image_global = image_global[keep_idx]
+ image_pid = image_pid[keep_idx]
+
+ image_global = F.normalize(image_global, p=2, dim=1)
+ text_global = F.normalize(text_global, p=2, dim=1)
+
+ similarity = torch.matmul(text_global, image_global.t())
+
+ # top 10 results 반환
+ sorted_indices = torch.argsort(similarity[0], descending=True)
+ sorted_values = similarity[0][sorted_indices]
+ top_k = cfg.TEST.TOP_K
+ write = [cap[0]] # 저장할 output
+ for index, value in zip(sorted_indices[:top_k], sorted_values[:top_k]):
+ # image_id, pid = dataset.get_id_info(idx)
+ img, caption, idx, query = dataset.__getitem__(index)
+ img_path = caption.get_field("img_path")
+ # print(f"Index: {index}, Similarity: {value}, pid: {pid}")
+ dict = {"img_path": img_path, "Similarity": value.item()}
+ write.append(dict)
+
+
+ #저장 폴더 생성
+ if not os.path.exists(os.path.dirname(save_folder)):
+ os.makedirs(os.path.dirname(save_folder), exist_ok=True)
+
+ with open(save_folder, "w", encoding='utf-8') as f:
json.dump(write, f, ensure_ascii=False, indent=4)
\ No newline at end of file
diff --git a/ai/TextReID/lib/engine/inference.py b/ai/TextReID/lib/engine/inference.py
index cb775af6dd..d719e24b47 100644
--- a/ai/TextReID/lib/engine/inference.py
+++ b/ai/TextReID/lib/engine/inference.py
@@ -1,111 +1,111 @@
-import datetime
-import logging
-import os
-import time
-from collections import defaultdict
-
-import torch
-from tqdm import tqdm
-
-from lib.data.metrics import evaluation
-from lib.utils.comm import all_gather, is_main_process, synchronize
-
-from lib.utils.caption import Caption
-
-from lib.data.encode.encoding import encoder
-
-
-def compute_on_dataset(model, data_loader, cap, device, query):
- model.eval()
- results_dict = defaultdict(list)
- print("query: ", query)
- caption = query
- cap.append(caption)
- caption = encoder(caption)
- caption = Caption([torch.tensor(caption)])
- for batch in tqdm(data_loader):
- images, captions, image_ids = batch
- images = images.to(device)
- captions = [caption.to(device)]
- with torch.no_grad():
- output = model(images, captions)
- for result in output:
- for img_id, pred in zip(image_ids, result):
- results_dict[img_id].append(pred)
- return results_dict, cap
-
-
-def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
- all_predictions = all_gather(predictions_per_gpu)
- if not is_main_process():
- return
- # merge the list of dicts
- predictions = {}
- for p in all_predictions:
- predictions.update(p)
- # convert a dict where the key is the index in a list
- image_ids = list(sorted(predictions.keys()))
- if len(image_ids) != image_ids[-1] + 1:
- logger = logging.getLogger("PersonSearch.inference")
- logger.warning(
- "Number of images that were gathered from multiple processes is not "
- "a contiguous set. Some images might be missing from the evaluation"
- )
- return predictions
-
-
-def inference(
- model,
- data_loader,
- dataset_name="cuhkpedes-test",
- device="cuda",
- output_folder="",
- save_data=True,
- rerank=True,
- search_num=0,
- query ="",
- save_folder = "./output/output.json"
-):
- logger = logging.getLogger("PersonSearch.inference")
- dataset = data_loader.dataset
- logger.info(
- "Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset))
- )
-
- predictions = None
- if not os.path.exists(os.path.join(output_folder, "inference_data.npz")):
- # convert to a torch.device for efficiency
- device = torch.device(device)
- num_devices = (
- torch.distributed.get_world_size()
- if torch.distributed.is_initialized()
- else 1
- )
- start_time = time.time()
-
- predictions, cap = compute_on_dataset(model, data_loader, [], device,query)
- # wait for all processes to complete before measuring the time
- synchronize()
- total_time = time.time() - start_time
- total_time_str = str(datetime.timedelta(seconds=total_time))
- logger.info(
- "Total inference time: {} ({} s / img per device, on {} devices)".format(
- total_time_str, total_time * num_devices / len(dataset), num_devices
- )
- )
- predictions = _accumulate_predictions_from_multiple_gpus(predictions)
-
- if not is_main_process():
- return
-
- return evaluation(
- dataset=dataset,
- predictions=predictions,
- output_folder=output_folder,
- save_data=save_data,
- rerank=rerank,
- topk=[1, 5, 10],
- cap=cap,
- search_num=search_num,
- save_folder = save_folder
- )
+import datetime
+import logging
+import os
+import time
+from collections import defaultdict
+
+import torch
+from tqdm import tqdm
+
+from lib.data.metrics import evaluation
+from lib.utils.comm import all_gather, is_main_process, synchronize
+
+from lib.utils.caption import Caption
+
+from lib.data.encode.encoding import encoder
+
+
+def compute_on_dataset(model, data_loader, cap, device, query):
+ model.eval()
+ results_dict = defaultdict(list)
+ print("query: ", query)
+ caption = query
+ cap.append(caption)
+ caption = encoder(caption)
+ caption = Caption([torch.tensor(caption)])
+ for batch in tqdm(data_loader):
+ images, captions, image_ids = batch
+ images = images.to(device)
+ captions = [caption.to(device)]
+ with torch.no_grad():
+ output = model(images, captions)
+ for result in output:
+ for img_id, pred in zip(image_ids, result):
+ results_dict[img_id].append(pred)
+ return results_dict, cap
+
+
+def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
+ all_predictions = all_gather(predictions_per_gpu)
+ if not is_main_process():
+ return
+ # merge the list of dicts
+ predictions = {}
+ for p in all_predictions:
+ predictions.update(p)
+ # convert a dict where the key is the index in a list
+ image_ids = list(sorted(predictions.keys()))
+ if len(image_ids) != image_ids[-1] + 1:
+ logger = logging.getLogger("PersonSearch.inference")
+ logger.warning(
+ "Number of images that were gathered from multiple processes is not "
+ "a contiguous set. Some images might be missing from the evaluation"
+ )
+ return predictions
+
+
+def inference(
+ model,
+ data_loader,
+ dataset_name="cuhkpedes-test",
+ device="cuda",
+ output_folder="",
+ save_data=True,
+ rerank=True,
+ search_num=0,
+ query ="",
+ save_folder = "./output/output.json"
+):
+ logger = logging.getLogger("PersonSearch.inference")
+ dataset = data_loader.dataset
+ logger.info(
+ "Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset))
+ )
+
+ predictions = None
+ if not os.path.exists(os.path.join(output_folder, "inference_data.npz")):
+ # convert to a torch.device for efficiency
+ device = torch.device(device)
+ num_devices = (
+ torch.distributed.get_world_size()
+ if torch.distributed.is_initialized()
+ else 1
+ )
+ start_time = time.time()
+
+ predictions, cap = compute_on_dataset(model, data_loader, [], device,query)
+ # wait for all processes to complete before measuring the time
+ synchronize()
+ total_time = time.time() - start_time
+ total_time_str = str(datetime.timedelta(seconds=total_time))
+ logger.info(
+ "Total inference time: {} ({} s / img per device, on {} devices)".format(
+ total_time_str, total_time * num_devices / len(dataset), num_devices
+ )
+ )
+ predictions = _accumulate_predictions_from_multiple_gpus(predictions)
+
+ if not is_main_process():
+ return
+
+ return evaluation(
+ dataset=dataset,
+ predictions=predictions,
+ output_folder=output_folder,
+ save_data=save_data,
+ rerank=rerank,
+ topk=[1, 5, 10],
+ cap=cap,
+ search_num=search_num,
+ save_folder = save_folder
+ )
diff --git a/ai/requirements.txt b/ai/requirements.txt
old mode 100755
new mode 100644
diff --git a/ai/yolov5-crowdhuman/data/scripts/get_coco.sh b/ai/yolov5-crowdhuman/data/scripts/get_coco.sh
old mode 100755
new mode 100644
diff --git a/ai/yolov5-crowdhuman/test/20210905-162400.mp4 b/ai/yolov5-crowdhuman/test/20210905-162400.mp4
old mode 100755
new mode 100644
diff --git a/ai/yolov5_crowdhuman/.gitignore b/ai/yolov5_crowdhuman/.gitignore
old mode 100755
new mode 100644
diff --git a/ai/yolov5_crowdhuman/README.md b/ai/yolov5_crowdhuman/README.md
old mode 100755
new mode 100644
diff --git a/ai/yolov5_crowdhuman/utils/datasets.py b/ai/yolov5_crowdhuman/utils/datasets.py
old mode 100755
new mode 100644
diff --git a/ai/yolov5_crowdhuman/utils/general.py b/ai/yolov5_crowdhuman/utils/general.py
old mode 100755
new mode 100644
diff --git a/ai/yolov5_crowdhuman/weights/download_weights.sh b/ai/yolov5_crowdhuman/weights/download_weights.sh
old mode 100755
new mode 100644
diff --git "a/docs/\354\244\221\352\260\204\353\263\264\352\263\240\354\204\234-Pro_bee.pdf" "b/docs/\354\244\221\352\260\204\353\263\264\352\263\240\354\204\234-Pro_bee.pdf"
deleted file mode 100644
index 24a5b04962..0000000000
Binary files "a/docs/\354\244\221\352\260\204\353\263\264\352\263\240\354\204\234-Pro_bee.pdf" and /dev/null differ
diff --git "a/docs/\354\272\241\354\212\244\355\206\244_\354\244\221\352\260\204\353\260\234\355\221\234 14\355\214\200-\354\265\234\354\242\205.pdf" "b/docs/\354\272\241\354\212\244\355\206\244_\354\244\221\352\260\204\353\260\234\355\221\234 14\355\214\200-\354\265\234\354\242\205.pdf"
deleted file mode 100644
index 20463c5d89..0000000000
Binary files "a/docs/\354\272\241\354\212\244\355\206\244_\354\244\221\352\260\204\353\260\234\355\221\234 14\355\214\200-\354\265\234\354\242\205.pdf" and /dev/null differ
diff --git a/frontend/.env b/frontend/.env
deleted file mode 100644
index 8cf489dda2..0000000000
--- a/frontend/.env
+++ /dev/null
@@ -1 +0,0 @@
-REACT_APP_API_ROOT="http://3.38.64.92:8080"
diff --git a/frontend/.gitignore b/frontend/.gitignore
index 4d29575de8..f21726c760 100644
--- a/frontend/.gitignore
+++ b/frontend/.gitignore
@@ -17,6 +17,7 @@
.env.development.local
.env.test.local
.env.production.local
+.env
npm-debug.log*
yarn-debug.log*
diff --git a/frontend/public/index.html b/frontend/public/index.html
index 0c54fd77d1..23659f23c8 100644
--- a/frontend/public/index.html
+++ b/frontend/public/index.html
@@ -19,7 +19,7 @@
-->
+ src="//dapi.kakao.com/v2/maps/sdk.js?appkey=%REACT_APP_KAKAOMAP_KEY%&libraries=services,clusterer">
React App
diff --git a/frontend/src/assets/icons/Marker_d.svg b/frontend/src/assets/icons/Marker_d.svg
new file mode 100644
index 0000000000..1d6f3637dc
--- /dev/null
+++ b/frontend/src/assets/icons/Marker_d.svg
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/src/assets/icons/cctvMarker.svg b/frontend/src/assets/icons/cctvMarker.svg
new file mode 100644
index 0000000000..ddacc32a1a
--- /dev/null
+++ b/frontend/src/assets/icons/cctvMarker.svg
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/src/assets/icons/cctvMarker_b.svg b/frontend/src/assets/icons/cctvMarker_b.svg
new file mode 100644
index 0000000000..bfc5cc0b45
--- /dev/null
+++ b/frontend/src/assets/icons/cctvMarker_b.svg
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/src/assets/icons/cctvMarker_d.svg b/frontend/src/assets/icons/cctvMarker_d.svg
new file mode 100644
index 0000000000..698b831c4d
--- /dev/null
+++ b/frontend/src/assets/icons/cctvMarker_d.svg
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/src/assets/icons/cctvMarker_g.svg b/frontend/src/assets/icons/cctvMarker_g.svg
new file mode 100644
index 0000000000..87bc193203
--- /dev/null
+++ b/frontend/src/assets/icons/cctvMarker_g.svg
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/src/assets/icons/cctvMarker_y.svg b/frontend/src/assets/icons/cctvMarker_y.svg
new file mode 100644
index 0000000000..99464849e6
--- /dev/null
+++ b/frontend/src/assets/icons/cctvMarker_y.svg
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/src/assets/icons/cctvMarker_yy.svg b/frontend/src/assets/icons/cctvMarker_yy.svg
new file mode 100644
index 0000000000..7333f7788e
--- /dev/null
+++ b/frontend/src/assets/icons/cctvMarker_yy.svg
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/src/assets/icons/centerMarker.svg b/frontend/src/assets/icons/centerMarker.svg
new file mode 100644
index 0000000000..780d8f29cf
--- /dev/null
+++ b/frontend/src/assets/icons/centerMarker.svg
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/frontend/src/assets/icons/locationMarker.svg b/frontend/src/assets/icons/locationMarker.svg
new file mode 100644
index 0000000000..817bce8275
--- /dev/null
+++ b/frontend/src/assets/icons/locationMarker.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/frontend/src/assets/icons/rangeMarker_activate.svg b/frontend/src/assets/icons/rangeMarker_activate.svg
new file mode 100644
index 0000000000..dfe91f7a74
--- /dev/null
+++ b/frontend/src/assets/icons/rangeMarker_activate.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/frontend/src/assets/icons/rangeMarker_disabled.svg b/frontend/src/assets/icons/rangeMarker_disabled.svg
new file mode 100644
index 0000000000..7726f782f3
--- /dev/null
+++ b/frontend/src/assets/icons/rangeMarker_disabled.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/frontend/src/components/addMisingPerson/IntelligentSearchInfo.js b/frontend/src/components/addMisingPerson/IntelligentSearchInfo.js
index d50169da8b..c83bd42eec 100644
--- a/frontend/src/components/addMisingPerson/IntelligentSearchInfo.js
+++ b/frontend/src/components/addMisingPerson/IntelligentSearchInfo.js
@@ -22,7 +22,7 @@ const rangeConfig = {
},
],
};
-export const IntelligentSearchInfo = ({ form }) => {
+export const IntelligentSearchInfo = ({ form, getLocation }) => {
return (
@@ -35,7 +35,7 @@ export const IntelligentSearchInfo = ({ form }) => {
/>
-
+
);
diff --git a/frontend/src/components/common/SearchBox.js b/frontend/src/components/common/SearchBox.js
index b017b043dd..81fc2264ee 100644
--- a/frontend/src/components/common/SearchBox.js
+++ b/frontend/src/components/common/SearchBox.js
@@ -1,14 +1,28 @@
+/*global kakao*/
import styled from "styled-components";
import { Input, Modal } from "antd";
import { SearchOutlined } from "@ant-design/icons";
import DaumPostcode from "react-daum-postcode";
-import { useState } from "react";
+import { useEffect, useRef, useState } from "react";
+import { debounce } from "lodash";
+
+import { Circle, Map, MapMarker } from "react-kakao-maps-sdk";
//도로명 주소 검색
-export const SeacrchBox = ({ title, form, name }) => {
+export const SeacrchBox = ({ title, form, name, getLocation }) => {
+ const mapRef = useRef();
const [openPostcode, setOpenPostcode] = useState(false);
const [location, setLocation] = useState("");
+ const [markerPosition, setMarkerPosition] = useState({});
+
+ useEffect(() => {
+ console.log("selectAddress", location);
+ if (location) {
+ handleGeocoder();
+ }
+ }, [location]);
+
const handle = {
// 버튼 클릭 이벤트
clickButton: () => {
@@ -19,10 +33,36 @@ export const SeacrchBox = ({ title, form, name }) => {
selectAddress: (data) => {
setLocation(data.address);
form.setFieldsValue({ [name]: data.address }); // 주소 정보를 Form.Item에 직접 설정
+ console.log("selectAddress", data.address);
+ },
+
+ // 선택 완료 이벤트
+ clickOK: () => {
+ getLocation(markerPosition);
setOpenPostcode(false);
},
};
+ const handleGeocoder = () => {
+ const geocoder = new kakao.maps.services.Geocoder();
+ var callback = function (result, status) {
+ if (status === kakao.maps.services.Status.OK) {
+ console.log(result);
+ setMarkerPosition({
+ lat: result[0].y,
+ lng: result[0].x,
+ });
+ const map = mapRef.current;
+ if (map) {
+ console.log("markerPosition", markerPosition);
+ map.setCenter(new kakao.maps.LatLng(result[0].y, result[0].x));
+ map.setLevel(6);
+ }
+ }
+ };
+ geocoder.addressSearch(location, callback);
+ };
+
return (
@@ -31,19 +71,46 @@ export const SeacrchBox = ({ title, form, name }) => {
- setOpenPostcode(false)}
+ onOk={handle.clickOK}
onCancel={() => setOpenPostcode(false)}>
-
-
+
+
+
+
+
+
+
+ {markerPosition.lat && markerPosition.lng && (
+ <>
+
+
+ >
+ )}
+
+
+
+
);
};
@@ -74,3 +141,21 @@ const SearchIconWrapper = styled.div`
padding: 0.9rem;
border-left: 0.1rem solid #d9d9d9;
`;
+
+const ModalWrapper = styled(Modal)``;
+const ModalContent = styled.div`
+ display: flex;
+ flex-direction: row;
+ width: 100%;
+ height: 100%;
+`;
+
+const DaumPostcodeWrapper = styled.div`
+ width: 50%;
+ height: 100%;
+`;
+
+const MapWrapper = styled.div`
+ width: 50%;
+ height: 100%;
+`;
diff --git a/frontend/src/components/missingPersonReport/ReportMain.js b/frontend/src/components/missingPersonReport/ReportMain.js
index 01ee21df4d..ec3368fe7b 100644
--- a/frontend/src/components/missingPersonReport/ReportMain.js
+++ b/frontend/src/components/missingPersonReport/ReportMain.js
@@ -7,12 +7,23 @@ import { IntelligentReportList } from "./IntelligentReportList";
import { ReportTabs } from "./ReportTabs";
import { ReportResultImages } from "./ReportResultImages";
-export const ReportMain = ({ data, step, history, step1data, onClick }) => {
+export const ReportMain = ({
+ data,
+ step,
+ history,
+ firstdata,
+ betweenData,
+ secondData,
+ onClick,
+ firstCCTVData,
+ betweenCCTVData,
+ secondCCTVData,
+}) => {
//console.log("ReportMain_id: " + id.id);
console.log("ReportMain_data: ", data);
console.log("ReportMain_step: ", step);
console.log("ReportMain_history: ", history);
- console.log("ReportMain_step1data: ", step1data);
+ console.log("ReportMain_step1data: ", firstdata);
return (
@@ -36,11 +47,19 @@ export const ReportMain = ({ data, step, history, step1data, onClick }) => {
-
+
-
+
diff --git a/frontend/src/components/missingPersonReport/ReportMap.js b/frontend/src/components/missingPersonReport/ReportMap.js
index 5625c3cffd..f509af37db 100644
--- a/frontend/src/components/missingPersonReport/ReportMap.js
+++ b/frontend/src/components/missingPersonReport/ReportMap.js
@@ -1,9 +1,343 @@
+/*global kakao*/
+
import styled from "styled-components";
-import { Map, MapMarker } from "react-kakao-maps-sdk";
-export const ReportMap = () => {
+import { useEffect, useRef, useState } from "react";
+import { Circle, CustomOverlayMap, Map, MapMarker, useMap } from "react-kakao-maps-sdk";
+import { Button, Divider, FloatButton, List, Segmented, Switch, Tooltip } from "antd";
+import Icon, { CloseOutlined, PlusOutlined, MinusOutlined } from "@ant-design/icons";
+import CenterMarker from "../../assets/icons/centerMarker.svg";
+import ActivateRangeMarker from "../../assets/icons/rangeMarker_activate.svg";
+import DisabledRangeMarker from "../../assets/icons/rangeMarker_disabled.svg";
+import LocationMarker from "../../assets/icons/locationMarker.svg";
+import CCTVMarker from "../../assets/icons/cctvMarker_yy.svg";
+import CCTVMarkerBlue from "../../assets/icons/cctvMarker_b.svg";
+import CCTVMarkerDisabled from "../../assets/icons/Marker_d.svg";
+
+/*실종자 리포트 맵 컴포넌트 */
+export const ReportMap = ({ start, end, searchRange, step, firstData, betweenData, secondData }) => {
+ const mapRef = useRef();
+ const [location, setLocation] = useState("");
+
+ /*Overlay filter*/
+ const [showStep, setShowStep] = useState("first");
+ const [showRange, setShowRange] = useState(true);
+
+ /*Marker State*/
+ const [firstState, setFirstState] = useState(true);
+ const [betweenState, setBetweenState] = useState(false);
+ const [secondState, setSecondState] = useState(false);
+
+ /*position */
+ const [rangePosition, setRangePosition] = useState({
+ lat: 37.410826,
+ lng: 126.894317,
+ });
+ const [firstPosition, setFirstPosition] = useState([]);
+ const [betweenPosition, setBetweenPosition] = useState([]);
+ const [secondPosition, setSecondPosition] = useState([]);
+ const [filterPosition, setFilterPosition] = useState([]);
+
+ useEffect(() => {
+ console.log("ReportMap", firstData);
+ if (searchRange) {
+ setRangePosition({
+ lat: searchRange.latitude,
+ lng: searchRange.longitude,
+ });
+ handleCenter();
+ }
+ if (firstData) {
+ setFirstPosition(firstData);
+ setFirstState(true);
+ }
+ if (betweenData) {
+ console.log("betweenData", betweenData);
+ setBetweenPosition(betweenData);
+ const excludedIds = new Set(betweenData.map((item) => item.id));
+ const filteredMarkers = firstData.filter((marker) => !excludedIds.has(marker.id));
+ setFilterPosition(filteredMarkers);
+ setBetweenState(true);
+ }
+ if (secondData) {
+ console.log("secondData", secondData);
+ setSecondPosition(secondData);
+ setSecondState(true);
+ }
+ }, [searchRange, firstData, betweenData, secondData]);
+
+ useEffect(() => {
+ console.log("step1Position", firstPosition);
+ handleLocation();
+ }, [rangePosition]);
+
+ const handleCenter = () => {
+ const map = mapRef.current;
+ if (map) {
+ console.log("markerPosition", rangePosition);
+ map.setCenter(new kakao.maps.LatLng(rangePosition.lat, rangePosition.lng));
+ map.setLevel(4);
+ }
+ console.log("handleCenter", rangePosition);
+ };
+
+ const handleLevel = (type) => {
+ const map = mapRef.current;
+ if (!map) return;
+
+ if (type === "increase") {
+ map.setLevel(map.getLevel() + 1);
+ } else {
+ type === "decrease";
+ map.setLevel(map.getLevel() - 1);
+ }
+ };
+
+ const handleLocation = () => {
+ const geocoder = new kakao.maps.services.Geocoder();
+ var coord = new kakao.maps.LatLng(rangePosition.lat, rangePosition.lng);
+
+ console.log("handleLocation", rangePosition);
+ var callback = function (result, status) {
+ if (status === kakao.maps.services.Status.OK) {
+ console.log("handleLocation", result);
+ if (result[0].road_address) {
+ setLocation(result[0].road_address.address_name);
+ }
+ console.log("location", location);
+ }
+ };
+ geocoder.coord2Address(coord.getLng(), coord.getLat(), callback);
+ };
+
+ const Item = ({ item }) => {
+ return (
+
+
+
+ );
+ };
+
+ const EventMarkerContainer = ({ position, images, markerStyle }) => {
+ const map = useMap();
+ const [isVisible, setIsVisible] = useState(false);
+
+ return (
+ <>
+ {
+ map.panTo(marker.getPosition());
+ setIsVisible(true);
+ }}
+ image={{
+ src: markerStyle, // 마커이미지의 주소입니다
+ size: {
+ width: 30,
+ height: 40,
+ }, // 마커이미지의 크기입니다
+ }}
+ />
+ {isVisible && (
+
+
+
+ 이미지 목록
+ } onClick={() => setIsVisible(false)}>
+
+ 탐색 결과는 최신순으로 정렬됩니다.
+
+ {
+ console.log(page);
+ },
+ pageSize: 3,
+ size: "small",
+ position: "bottom",
+ }}
+ dataSource={images}
+ renderItem={(item) => }
+ />
+
+
+ )}
+ >
+ );
+ };
+
+ const FilterMarkerContainer = ({ marker }) => {
+ const [isVisible, setIsVisible] = useState(false);
+ return (
+ <>
+ setIsVisible(true)}
+ // onMouseOut={() => setIsVisible(false)}
+ image={{
+ src: CCTVMarkerDisabled,
+ size: {
+ width: 18,
+ height: 24,
+ },
+ }}
+ />
+ {/* {isVisible && (
+
+
+ 1차 탐색 결과 중 선별된 이미지가 없는 cctv입니다.
+
+
+ )} */}
+ >
+ );
+ };
+
return (
-
+
+ {showRange && rangePosition.lat && rangePosition.lng && (
+ <>
+
+
+ >
+ )}
+ {showStep == "first" &&
+ firstState &&
+ firstPosition &&
+ firstPosition.map(
+ (position) => (
+ console.log("position", position),
+ (
+
+ )
+ ),
+ )}
+
+ {showStep == "between" && betweenState && betweenPosition && (
+ <>
+ {betweenPosition.map(
+ (position) => (
+ console.log("position", position),
+ (
+
+ )
+ ),
+ )}
+ {filterPosition &&
+ filterPosition.map((position) => )}
+ >
+ )}
+ {showStep == "second" &&
+ secondState &&
+ secondPosition &&
+ secondPosition.map(
+ (position) => (
+ console.log("position", position),
+ (
+
+ )
+ ),
+ )}
+
+
+ {
+ handleCenter();
+ }}>
+
+ {location}
+
+ {
+ setShowStep(value);
+ handleCenter();
+ }}
+ />
+
+
+
+ {
+ setShowRange(!showRange);
+ }}>
+ {showRange ? : }
+
+
+
+ handleLevel("decrease")}>
+
+ {/* */}
+
+
+ handleLevel("increase")}>
+ {/* */}
+
+
+
+
);
};
@@ -13,7 +347,196 @@ const StReportMap = styled.div`
flex-direction: column;
flex: 1;
flex-grow: 1;
+ position: relative;
+ width: 100%;
+ height: 100%;
+ /* max-width: 90rem; */
+`;
+
+const ContentsContainer = styled.div`
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ width: 37.3rem;
+ height: 30rem;
+ padding: 1rem;
+ background-color: white;
+ border-radius: 1rem;
+
+ overflow-y: hidden;
+ cursor: pointer;
+`;
+
+const TopContainer = styled.div`
+ display: flex;
+ flex-direction: row;
+ justify-content: space-between;
+ align-items: center;
+ width: 100%;
+ p {
+ font-size: 1.5rem;
+ }
+`;
+const ExplainText = styled.div`
+ display: flex;
+ justify-content: start;
+ width: 100%;
+ font-size: 1.2rem;
+ color: #8b8b8b;
+`;
+
+const ImageList = styled(List)`
width: 100%;
height: 100%;
- max-width: 90rem;
+`;
+
+const ItemContainer = styled.div`
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ margin-top: 0.9rem;
+
+ p {
+ @media all and (max-width: 1537px) {
+ font-size: 1rem;
+ }
+ font-size: 1.3rem;
+ }
+`;
+
+const ItemImage = styled.img`
+ /* width: 14.4rem;
+ height: 23.8rem; */
+ width: 11.52rem;
+ height: 19.04rem;
+ margin-right: 0.5rem;
+ @media all and (max-width: 1537px) {
+ width: 7.2rem;
+ height: 11.9rem;
+ }
+`;
+
+const OverlayTopContainer = styled.div`
+ display: flex;
+ flex-direction: row;
+
+ position: absolute;
+ z-index: 1;
+ top: 1rem;
+ left: 1rem;
+`;
+
+const OverlaySideContainer = styled.div`
+ display: flex;
+ flex-direction: column;
+
+ position: absolute;
+ z-index: 1;
+ top: 10rem;
+ right: 1rem;
+`;
+
+const OverlayTooltip = styled(Tooltip)``;
+
+const OverlayButtonStyle = `
+ display: flex;
+ margin-bottom: 1rem;
+ border-radius: 0.4rem;
+ background-color: white;
+ box-shadow: 0px 0px 5px 0px rgba(0, 0, 0, 0.1);`;
+
+const SingleButton = styled.div`
+ ${OverlayButtonStyle}
+
+ width: 3.5rem;
+ height: 3.5rem;
+ justify-content: center;
+`;
+
+const GroupButton = styled.div`
+ ${OverlayButtonStyle}
+
+ width: 3.5rem;
+ height: 7rem;
+ flex-direction: column;
+ justify-content: space-between;
+`;
+
+const ButtonWrapper = styled.div`
+ display: flex;
+ flex-direction: row;
+ justify-content: center;
+ width: 100%;
+ height: 3.5rem;
+`;
+
+const OverlayLocationWrapper = styled.div`
+ ${OverlayButtonStyle}
+ flex-direction:row;
+ justify-content: start;
+ align-items: center;
+ /* width: 25rem; */
+ height: 4rem;
+ padding: 0 1.2rem 0 1rem;
+ margin-right: 1rem;
+ opacity: 0.9;
+
+ cursor: pointer;
+ &:hover {
+ color: rgba(0, 0, 0, 0.88);
+ opacity: 1;
+ }
+
+ img {
+ width: 2rem;
+ margin-right: 0.2rem;
+ }
+ p {
+ color: #555555;
+ }
+`;
+
+const OverlaySegmented = styled(Segmented)`
+ height: 4rem;
+ &.ant-segmented {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ height: 4rem;
+ background-color: white;
+ }
+
+ &.ant-segmented .ant-segmented-item-label {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ height: 3.4rem;
+ }
+
+ &.ant-segmented .ant-segmented-item-selected {
+ background-color: #1890ff;
+ color: white;
+ }
+`;
+
+const IconWrapper = styled(Icon)`
+ color: #555555;
+ &:focus {
+ color: rgba(0, 0, 0, 0.5);
+ }
+ &:active {
+ color: rgba(0, 0, 0, 0.5);
+ }
+`;
+
+const CustomInfoWindow = styled.div`
+ display: flex;
+ flex-wrap: wrap;
+ width: 20rem;
+ height: 4rem;
+ border-radius: 1rem;
+
+ background-color: white;
+ opacity: 0.9;
`;
diff --git a/frontend/src/components/missingPersonReport/ReportTabs.js b/frontend/src/components/missingPersonReport/ReportTabs.js
index 07ae266002..3f01dc55cc 100644
--- a/frontend/src/components/missingPersonReport/ReportTabs.js
+++ b/frontend/src/components/missingPersonReport/ReportTabs.js
@@ -27,14 +27,14 @@ const operations = (
/>
);
-export const ReportTabs = ({ id, step1data }) => {
+export const ReportTabs = ({ id, firstdata, betweenData, secondData }) => {
console.log("ReportTabs_id: " + id);
const [data1, setData1] = useState([]);
const [dataBetween, setdataBetween] = useState([]); /* todo 이미지 고른거 가져오기*/
const [data2, setData2] = useState([]);
useEffect(() => {
- setData1(step1data);
- }, [step1data]);
+ setData1(firstdata);
+ }, [firstdata]);
console.log("data1", data1);
console.log("data2", data2);
const items = [
diff --git a/frontend/src/core/api/index.js b/frontend/src/core/api/index.js
index 16c048665f..b0b235ab9a 100644
--- a/frontend/src/core/api/index.js
+++ b/frontend/src/core/api/index.js
@@ -151,6 +151,24 @@ export const getBetweenResultImg = async (page, id) => {
return data;
};
+/* 탐색 결과에 대한 cctv 위치 (Get)*/
+export const getCCTVResult = async (id, step) => {
+ const data = axios
+ .get(`${process.env.REACT_APP_API_ROOT}/api/missing-people/${id}/mapposition?step=${step}`, {
+ headers: { "Content-Type": "application/json" },
+ })
+ .then(function (response) {
+ return response.data;
+ })
+ .catch(function (e) {
+ // 실패 시 처리
+ console.error(e);
+ console.log(e.response.data);
+ alert("cctv 위치 가져오기 실패. 재시도해주세요.");
+ });
+ return data;
+};
+
/*의뢰인용 메인 - 실종자 정보 (Get) */
export const getGuardianMissingPerson = async () => {
const data = axios
diff --git a/frontend/src/pages/AddMissingPersonPage.js b/frontend/src/pages/AddMissingPersonPage.js
index 0b10e795ff..b77b8d80e5 100644
--- a/frontend/src/pages/AddMissingPersonPage.js
+++ b/frontend/src/pages/AddMissingPersonPage.js
@@ -7,6 +7,7 @@ import { WearingInfo } from "../components/addMisingPerson/WearingInfo";
import { postMissingPerson } from "../core/api";
import { useNavigate } from "react-router-dom";
import { AnnotationInfo } from "../components/addMisingPerson/AnnotationInfo";
+import { useState } from "react";
const validateMessages = {
required: "필수 항목입니다!",
@@ -21,7 +22,15 @@ const validateMessages = {
function AddMissingPersonPage() {
const [form] = Form.useForm();
+ const [latlng, setLatlng] = useState({});
+
const navigate = useNavigate();
+
+ const getLocation = (latlng) => {
+ setLatlng(latlng);
+ console.log("latlng", latlng);
+ };
+
const onFinish = (fieldsValue) => {
console.log("gender", fieldsValue["user"]["gender"]);
const values = {
@@ -49,8 +58,8 @@ function AddMissingPersonPage() {
fieldsValue["searchPeriod"][0].format("YYYY-MM-DD") + "T" + fieldsValue["searchPeriod"][0].format("HH:mm"),
endTime:
fieldsValue["searchPeriod"][1].format("YYYY-MM-DD") + "T" + fieldsValue["searchPeriod"][1].format("HH:mm"),
- latitude: 37.610767,
- longitude: 126.996967,
+ latitude: latlng["lat"],
+ longitude: latlng["lng"],
locationAddress: fieldsValue["searchLocation"] || "서울 성북구 정릉로 77",
shoesColor: "빨강",
missingPeopleType: "아동",
@@ -92,7 +101,7 @@ function AddMissingPersonPage() {
지능형 탐색 초기 정보 등록
-
+
diff --git a/frontend/src/pages/MissingPersonReportPage.js b/frontend/src/pages/MissingPersonReportPage.js
index 15bf4e8ef1..bf02c0981c 100644
--- a/frontend/src/pages/MissingPersonReportPage.js
+++ b/frontend/src/pages/MissingPersonReportPage.js
@@ -11,7 +11,14 @@ import { IntelligentSearchOption } from "../components/reportIntelligent/Intelli
import { IntelligentBasicInfo } from "../components/reportIntelligent/IntelligentBasicInfo";
import { IntelligentMap } from "../components/reportIntelligent/IntelligentMap";
import { IntelligentSearchResult } from "../components/reportIntelligent/IntelligentSearchResult";
-import { getMissingPerson, getMissingPeopleStep, getSearchHistoryList, getSearchResultImg } from "../core/api";
+import {
+ getMissingPerson,
+ getMissingPeopleStep,
+ getSearchHistoryList,
+ getSearchResultImg,
+ getBetweenResultImg,
+ getCCTVResult,
+} from "../core/api";
import { useLocation } from "react-router-dom";
import { ReportMain } from "../components/missingPersonReport/ReportMain";
import { ReportIntelligent } from "../components/reportIntelligent/ReportIntelligent";
@@ -19,7 +26,12 @@ function MissingPersonReportPage() {
const [missingPerson, setMissingPerson] = useState([]);
const [step, setStep] = useState([]);
const [searchHistoryList, setSearchHistoryList] = useState([]);
- const [step1data, setStep1data] = useState([]);
+ const [firstdata, setFirstdata] = useState([]);
+ const [betweenData, setBetweenData] = useState([]);
+ const [secondData, setSecondData] = useState([]);
+ const [firstCCTVData, setFirstCCTVData] = useState([]);
+ const [betweenCCTVData, setBetweenCCTVData] = useState([]);
+ const [secondCCTVData, setSecondCCTVData] = useState([]);
const [loading, setLoading] = useState(false);
const location = useLocation();
console.log("sssdfsadfsad location", location);
@@ -39,6 +51,8 @@ function MissingPersonReportPage() {
useEffect(() => {
console.log("useEffect", userId);
fetchData();
+ fetchResultData();
+ fetchCCTVData();
}, []);
const fetchData = () => {
@@ -75,9 +89,35 @@ function MissingPersonReportPage() {
getSearchHistoryList(userId).then((res) => {
setSearchHistoryList(res.data);
});
+ };
+
+ const fetchResultData = () => {
getSearchResultImg(1, userId, "first").then((res) => {
- console.log("step1data", res.data);
- setStep1data(res.data);
+ console.log("firstData", res.data);
+ setFirstdata(res.data);
+ });
+ getBetweenResultImg(1, userId).then((res) => {
+ console.log("betweenData", res.data);
+ setBetweenData(res.data);
+ });
+ getSearchResultImg(1, userId, "second").then((res) => {
+ console.log("secondData", res.data);
+ setSecondData(res.data);
+ });
+ };
+
+ const fetchCCTVData = () => {
+ getCCTVResult(userId, "first").then((res) => {
+ console.log("firstCCTVData", res.data);
+ setFirstCCTVData(res.data);
+ });
+ getCCTVResult(userId, "between").then((res) => {
+ console.log("betweenCCTVData", res.data);
+ setBetweenCCTVData(res.data);
+ });
+ getCCTVResult(userId, "second").then((res) => {
+ console.log("secondCCTVData", res.data);
+ setSecondCCTVData(res.data);
});
};
@@ -93,77 +133,21 @@ function MissingPersonReportPage() {
);
};
- /*실종자 리포트 - 메인*/
- // const ReportMain = () => {
- // return (
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- // );
- // };
- /*실종자 리포트 - 지능형 탐색*/
- // const ReportIntelligent = () => {
- // return (
- //
- //
- //
- //
- // 지능형 탐색
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- //
- // );
- // };
return (
- {/*
- */}
diff --git a/server/gradlew b/server/gradlew
old mode 100755
new mode 100644
diff --git a/server/gradlew.bat b/server/gradlew.bat
index 6689b85bee..93e3f59f13 100644
--- a/server/gradlew.bat
+++ b/server/gradlew.bat
@@ -1,92 +1,92 @@
-@rem
-@rem Copyright 2015 the original author or authors.
-@rem
-@rem Licensed under the Apache License, Version 2.0 (the "License");
-@rem you may not use this file except in compliance with the License.
-@rem You may obtain a copy of the License at
-@rem
-@rem https://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-@rem
-
-@if "%DEBUG%"=="" @echo off
-@rem ##########################################################################
-@rem
-@rem Gradle startup script for Windows
-@rem
-@rem ##########################################################################
-
-@rem Set local scope for the variables with windows NT shell
-if "%OS%"=="Windows_NT" setlocal
-
-set DIRNAME=%~dp0
-if "%DIRNAME%"=="" set DIRNAME=.
-@rem This is normally unused
-set APP_BASE_NAME=%~n0
-set APP_HOME=%DIRNAME%
-
-@rem Resolve any "." and ".." in APP_HOME to make it shorter.
-for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
-
-@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
-
-@rem Find java.exe
-if defined JAVA_HOME goto findJavaFromJavaHome
-
-set JAVA_EXE=java.exe
-%JAVA_EXE% -version >NUL 2>&1
-if %ERRORLEVEL% equ 0 goto execute
-
-echo.
-echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
-
-goto fail
-
-:findJavaFromJavaHome
-set JAVA_HOME=%JAVA_HOME:"=%
-set JAVA_EXE=%JAVA_HOME%/bin/java.exe
-
-if exist "%JAVA_EXE%" goto execute
-
-echo.
-echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
-
-goto fail
-
-:execute
-@rem Setup the command line
-
-set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
-
-
-@rem Execute Gradle
-"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
-
-:end
-@rem End local scope for the variables with windows NT shell
-if %ERRORLEVEL% equ 0 goto mainEnd
-
-:fail
-rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
-rem the _cmd.exe /c_ return code!
-set EXIT_CODE=%ERRORLEVEL%
-if %EXIT_CODE% equ 0 set EXIT_CODE=1
-if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
-exit /b %EXIT_CODE%
-
-:mainEnd
-if "%OS%"=="Windows_NT" endlocal
-
-:omega
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@if "%DEBUG%"=="" @echo off
+@rem ##########################################################################
+@rem
+@rem Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%"=="" set DIRNAME=.
+@rem This is normally unused
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if %ERRORLEVEL% equ 0 goto execute
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+
+:end
+@rem End local scope for the variables with windows NT shell
+if %ERRORLEVEL% equ 0 goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+set EXIT_CODE=%ERRORLEVEL%
+if %EXIT_CODE% equ 0 set EXIT_CODE=1
+if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
+exit /b %EXIT_CODE%
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega