深度学习解决跨场景人群计数难题

需积分: 13 1 下载量 147 浏览量 更新于2024-09-08 收藏 3.59MB PDF 举报
"这篇论文是关于跨场景人群计数的研究,由Cong Zhang、Hongsheng Li、Xiaogang Wang和Xiaokang Yang等人在2015年的CVPR会议上发表。研究主要解决了在无需大量数据注解的情况下,如何在新的目标监控人群场景中进行准确的人群计数问题。" 在《Cross-scene Crowd Counting via Deep Convolutional Neural Networks》这篇论文中,作者们提出了一个深度卷积神经网络(CNN)模型,专门用于解决跨场景人群计数的挑战。传统的拥挤场景人数统计方法在面对未见过的新场景时,其性能显著下降。为了解决这个问题,他们设计了一种可切换的学习策略,该策略同时训练两个相关的目标:人群密度估计和人群总数预测,并在两者之间交替优化,以获得更好的局部最优解。 人群密度估计是通过CNN模型来实现的,它可以学习到不同场景下的人群特征,并生成表示人群密集程度的地图。而人群总数预测则基于这些密度图来进行,通过对密度图的全局分析,得出场景中的人数总和。这种结合了密度估计和总数预测的训练方式,使得模型在处理未见过的场景时具有更强的泛化能力。 为了应对未知的目标人群场景,论文还提出了一种适应性方法,这可能涉及到对新场景的快速适应或迁移学习策略,以减少对大量标注数据的依赖。这种方法对于实际应用中的监控系统尤其有价值,因为它能提高在不断变化的环境中的计数精度。 此外,论文可能还探讨了评估指标和实验设置,包括在多个公开数据集上的表现对比,以及与其他现有方法的性能比较。通过这些实验,作者证明了所提出的深度学习模型在跨场景人群计数任务上的优越性,并可能对未来的研究提供了新的方向和启示。 这篇论文在深度学习和计算机视觉领域中,为解决跨场景人群计数的难题提供了一个创新的解决方案,推动了拥挤场景理解技术的发展。
2023-06-02 上传

def generateOwnCarRoute(service_time, model, sol): pickup_node = copy.deepcopy(model.demand_id_list[0: 16]) own_pickup_node = [] own_delivery_node = [] route = [] sol.route_list = [] depot = model.depot_dict['d1'] vehicle_number = depot.depot_capacity departure = 0 arrival = 0 for i in pickup_node: if i not in model.crowd_pickup_node: own_pickup_node.append(i) own_delivery_node.append(i+16) while vehicle_number > 0 and len(own_pickup_node) > 0: route.append(depot.depot_id) minIndex = np.argmin([model.distance_matrix[depot.depot_id, own_pickup_node[i]] for i in range(0, len(own_pickup_node))]) minnode = own_pickup_node[minIndex] route.append(minnode) arrival = model.time_matrix[depot.depot_id, minnode] departure = arrival + service_time route.append(own_delivery_node[minIndex]) arrival = departure + model.time_matrix[minnode, own_delivery_node[minIndex]] departure += arrival + service_time last_node = own_delivery_node[minIndex] own_pickup_node.remove(minnode) own_delivery_node.remove(own_delivery_node[minIndex]) for j in own_pickup_node: next_minIndex = np.argmin([model.distance_matrix[last_node, j]]) next_minnode = own_pickup_node[next_minIndex] arrival = departure + model.time_matrix[last_node, next_minnode] if arrival <= model.demand_dict[next_minnode].end_time and arrival <= depot.dend_time: route.append(next_minnode) departure = arrival + service_time route.append(own_delivery_node[next_minIndex]) arrival = departure + model.time_matrix[next_minnode, own_delivery_node[next_minIndex]] departure += arrival + service_time last_node = own_delivery_node[next_minIndex] own_pickup_node.remove(next_minnode) own_delivery_node.remove(own_delivery_node[next_minIndex]) else: continue route.append(depot.depot_id) sol.route_list.append(route) vehicle_number = vehicle_number - 1 route = [] print(sol.route_list) return sol.route_list 这段代码的问题是有可能vehicle_number为0了,但是owner_pickup_node的长度还不为0,这种情况怎么解决

155 浏览量

import torch, os, cv2 from model.model import parsingNet from utils.common import merge_config from utils.dist_utils import dist_print import torch import scipy.special, tqdm import numpy as np import torchvision.transforms as transforms from data.dataset import LaneTestDataset from data.constant import culane_row_anchor, tusimple_row_anchor if __name__ == "__main__": torch.backends.cudnn.benchmark = True args, cfg = merge_config() dist_print('start testing...') assert cfg.backbone in ['18','34','50','101','152','50next','101next','50wide','101wide'] if cfg.dataset == 'CULane': cls_num_per_lane = 18 elif cfg.dataset == 'Tusimple': cls_num_per_lane = 56 else: raise NotImplementedError net = parsingNet(pretrained = False, backbone=cfg.backbone,cls_dim = (cfg.griding_num+1,cls_num_per_lane,4), use_aux=False).cuda() # we dont need auxiliary segmentation in testing state_dict = torch.load(cfg.test_model, map_location='cpu')['model'] compatible_state_dict = {} for k, v in state_dict.items(): if 'module.' in k: compatible_state_dict[k[7:]] = v else: compatible_state_dict[k] = v net.load_state_dict(compatible_state_dict, strict=False) net.eval() img_transforms = transforms.Compose([ transforms.Resize((288, 800)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) if cfg.dataset == 'CULane': splits = ['test0_normal.txt', 'test1_crowd.txt', 'test2_hlight.txt', 'test3_shadow.txt', 'test4_noline.txt', 'test5_arrow.txt', 'test6_curve.txt', 'test7_cross.txt', 'test8_night.txt'] datasets = [LaneTestDataset(cfg.data_root,os.path.join(cfg.data_root, 'list/test_split/'+split),img_transform = img_transforms) for split in splits] img_w, img_h = 1640, 590 row_anchor = culane_row_anchor elif cfg.dataset == 'Tusimple': splits = ['test.txt'] datasets = [LaneTestDataset(cfg.data_root,os.path.join(cfg.data_root, split),img_transform = img_transforms) for split in splits] img_w, img_h = 1280, 720 row_anchor = tusimple_row_anchor else: raise NotImplementedError for split, dataset in zip(splits, datasets): loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle = False, num_workers=1) fourcc = cv2.VideoWriter_fourcc(*'MJPG') print(split[:-3]+'avi') vout = cv2.VideoWriter(split[:-3]+'avi', fourcc , 30.0, (img_w, img_h)) for i, data in enumerate(tqdm.tqdm(loader)): imgs, names = data imgs = imgs.cuda() with torch.no_grad(): out = net(imgs) col_sample = np.linspace(0, 800 - 1, cfg.griding_num) col_sample_w = col_sample[1] - col_sample[0] out_j = out[0].data.cpu().numpy() out_j = out_j[:, ::-1, :] prob = scipy.special.softmax(out_j[:-1, :, :], axis=0) idx = np.arange(cfg.griding_num) + 1 idx = idx.reshape(-1, 1, 1) loc = np.sum(prob * idx, axis=0) out_j = np.argmax(out_j, axis=0) loc[out_j == cfg.griding_num] = 0 out_j = loc # import pdb; pdb.set_trace() vis = cv2.imread(os.path.join(cfg.data_root,names[0])) for i in range(out_j.shape[1]): if np.sum(out_j[:, i] != 0) > 2: for k in range(out_j.shape[0]): if out_j[k, i] > 0: ppp = (int(out_j[k, i] * col_sample_w * img_w / 800) - 1, int(img_h * (row_anchor[cls_num_per_lane-1-k]/288)) - 1 ) cv2.circle(vis,ppp,5,(0,255,0),-1) vout.write(vis) vout.release()

397 浏览量