思科1240AG无线AP配置指南:快速入门与设置详解

版权申诉
0 下载量 189 浏览量 更新于2024-07-05 收藏 667KB PDF 举报
思科WLN00066-无线AP1240AG快速配置手册是一份详细的指南,由韩啸晨翻译,叶俊校对,完成于2008年11月30日。该手册基于英文原版QuickStart Guide Cisco Aironet 1240 AG Series Access Point,针对思科1240AG系列无线接入点提供快速配置步骤。手册特别指出,对于1242G型号的AP,需注意其802.11a功能并不适用,用户应关注802.11B和802.11G的相关内容。 配置过程包括默认登录信息(用户名为Cisco,密码也是Cisco),以及推荐使用DHCP获取IP地址。若无法通过DHCP,用户需要通过Console接口进行手动配置,包括设定SSID(服务标识符)、IP地址、子网掩码和网关。在安装前,确保已准备AP的设备名称、多个频段(如802.11g和802.11a)的SSID、SNMP网管信息,以及可能需要的MAC地址和IP地址设置软件。 设备的安全性是手册的重要部分,所有设备都遵循FCC的安全标准,射频辐射被认为是无害的。在安装过程中,强调了避免将天线靠近人体和头部,以及在危险区域禁止放置无线设备的警告。此外,AP支持IEEE 802.3af供电标准,并符合IEC 60950国际电气安全标准,具备过载保护功能,因此在连接电源和安装位置选择上需严格按照手册指导。 附件中提供了多语言的安全警示,确保用户在操作过程中充分理解并遵守相关规定。这份配置手册为用户提供了全面的设置步骤和安全注意事项,是初次配置或维护思科1240AG无线AP的重要参考资料。

pytorch部分代码如下:train_loss, train_acc = train(model_ft, DEVICE, train_loader, optimizer, epoch,model_ema) if use_amp: with torch.cuda.amp.autocast(): # 开启混合精度 loss = torch.nan_to_num(criterion_train(output, targets)) # 计算loss scaler.scale(loss).backward() # 梯度放大 torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_GRAD) if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks or _global_forward_hooks or _global_forward_pre_hooks): return forward_call(*input, **kwargs) class LDAMLoss(nn.Module): def __init__(self, cls_num_list, max_m=0.5, weight=None, s=30): super(LDAMLoss, self).__init__() m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list)) m_list = m_list * (max_m / np.max(m_list)) m_list = torch.cuda.FloatTensor(m_list) self.m_list = m_list assert s > 0 self.s = s self.weight = weight def forward(self, x, target): index = torch.zeros_like(x, dtype=torch.uint8) index.scatter_(1, target.data.view(-1, 1), 1) index_float = index.type(torch.cuda.FloatTensor) batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(0,1)) batch_m = batch_m.view((-1, 1)) x_m = x - batch_m output = torch.where(index, x_m, x) return F.cross_entropy(self.s*output, target, weight=self.weight) 报错:Traceback (most recent call last): File "/home/adminis/hpy/ConvNextV2_Demo/train+ca.py", line 279, in <module> train_loss, train_acc = train(model_ft, DEVICE, train_loader, optimizer, epoch,model_ema) File "/home/adminis/hpy/ConvNextV2_Demo/train+ca.py", line 46, in train loss = torch.nan_to_num(criterion_train(output, targets)) # 计算loss File "/home/adminis/anaconda3/envs/wln/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl return forward_call(*input, **kwargs) File "/home/adminis/hpy/ConvNextV2_Demo/models/utils.py", line 621, in forward index.scatter_(1, target.data.view(-1, 1), 1) IndexError: scatter_(): Expected dtype int64 for index.

2023-06-10 上传

pytorch代码如下:class LDAMLoss(nn.Module): def init(self, cls_num_list, max_m=0.5, weight=None, s=30): super(LDAMLoss, self).init() m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list)) m_list = m_list * (max_m / np.max(m_list)) m_list = torch.cuda.FloatTensor(m_list) self.m_list = m_list assert s > 0 self.s = s if weight is not None: weight = torch.FloatTensor(weight).cuda() self.weight = weight self.cls_num_list = cls_num_list def forward(self, x, target): index = torch.zeros_like(x, dtype=torch.uint8) index_float = index.type(torch.cuda.FloatTensor) batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(1,0)) # 0,1 batch_m = batch_m.view((-1, 1)) # size=(batch_size, 1) (-1,1) x_m = x - batch_m output = torch.where(index, x_m, x) if self.weight is not None: output = output * self.weight[None, :] logit = output * self.s return F.cross_entropy(logit, target, weight=self.weight) classes=7, cls_num_list = np.zeros(classes) for , label in train_loader.dataset: cls_num_list[label] += 1 criterion_train = LDAMLoss(cls_num_list=cls_num_list, max_m=0.5, s=30) criterion_val = LDAMLoss(cls_num_list=cls_num_list, max_m=0.5, s=30) for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device, non_blocking=True), Variable(target).to(device,non_blocking=True) # 3、将数据输入mixup_fn生成mixup数据 samples, targets = mixup_fn(data, target) targets = torch.tensor(targets).to(torch.long) # 4、将上一步生成的数据输入model,输出预测结果,再计算loss output = model(samples) # 5、梯度清零(将loss关于weight的导数变成0) optimizer.zero_grad() # 6、若使用混合精度 if use_amp: with torch.cuda.amp.autocast(): # 开启混合精度 loss = torch.nan_to_num(criterion_train(output, targets)) # 计算loss scaler.scale(loss).backward() # 梯度放大 torch.nn.utils.clip_grad_norm(model.parameters(), CLIP_GRAD) # 梯度裁剪,防止梯度爆炸 scaler.step(optimizer) # 更新下一次迭代的scaler scaler.update() 报错:File "/home/adminis/hpy/ConvNextV2_Demo/models/losses.py", line 53, in forward return F.cross_entropy(logit, target, weight=self.weight) File "/home/adminis/anaconda3/envs/wln/lib/python3.9/site-packages/torch/nn/functional.py", line 2824, in cross_entropy return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index) RuntimeError: multi-target not supported at /pytorch/aten/src/THCUNN/generic/ClassNLLCriterion.cu:15

2023-05-29 上传

pytorch中ConvNeXt v2模型加入CBAM模块后报错:Traceback (most recent call last): File "/home/adminis/hpy/ConvNextV2_Demo/train+.py", line 234, in <module> model_ft = convnextv2_base(pretrained=True) File "/home/adminis/hpy/ConvNextV2_Demo/models/convnext_v2.py", line 201, in convnextv2_base model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) File "/home/adminis/hpy/ConvNextV2_Demo/models/convnext_v2.py", line 114, in init self.apply(self.init_weights) File "/home/adminis/anaconda3/envs/wln/lib/python3.9/site-packages/torch/nn/modules/module.py", line 616, in apply module.apply(fn) File "/home/adminis/anaconda3/envs/wln/lib/python3.9/site-packages/torch/nn/modules/module.py", line 616, in apply module.apply(fn) File "/home/adminis/anaconda3/envs/wln/lib/python3.9/site-packages/torch/nn/modules/module.py", line 616, in apply module.apply(fn) [Previous line repeated 4 more times] File "/home/adminis/anaconda3/envs/wln/lib/python3.9/site-packages/torch/nn/modules/module.py", line 617, in apply fn(self) File "/home/adminis/hpy/ConvNextV2_Demo/models/convnext_v2.py", line 121, in init_weights nn.init.constant(m.bias, 0) File "/home/adminis/anaconda3/envs/wln/lib/python3.9/site-packages/torch/nn/init.py", line 186, in constant return no_grad_fill(tensor, val) File "/home/adminis/anaconda3/envs/wln/lib/python3.9/site-packages/torch/nn/init.py", line 59, in no_grad_fill return tensor.fill_(val) AttributeError: 'NoneType' object has no attribute 'fill_' 部分代码如下:for i in range(4): stage = nn.Sequential( *[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])], CBAM(gate_channels=dims[i]) ) self.stages.append(stage) cur += depths def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): trunc_normal_(m.weight, std=.02) nn.init.constant_(m.bias, 0)

2023-05-25 上传

INSERT INTO `QHDATA_THEME.DB_DTRK_CZRKDT` (`RID`, `LDBM`, `TJS`, `XZQHDM`, `XB0`, `XB1`, `MNL0`, `MNL1`, `MNL2`, `MNL3`, `MNL4`, `MNL5`, `MNL6`, `MNL7`, `MNL8`, `MNL9`, `MNL10`, `MNL11`, `MNL12`, `MNL13`, `MNL14`, `MNL15`, `MNL16`, `MNL17`, `MNL18`, `MNL19`, `MNL20`, `MNL21`, `WNL0`, `WNL1`, `WNL2`, `WNL3`, `WNL4`, `WNL5`, `WNL6`, `WNL7`, `WNL8`, `WNL9`, `WNL10`, `WNL11`, `WNL12`, `WNL13`, `WNL14`, `WNL15`, `WNL16`, `WNL17`, `WNL18`, `WNL19`, `WNL20`, `WNL21`, `MYE`, `WYE`, `MET`, `WET`, `MWCN`, `WWCN`, `MLN`, `WLN`, `LNWHQ`, `LNXQJY`, `LNXX`, `LNCZ`, `LNGZ`, `LNDXZK`, `LNDXBK`, `LNSSYJS`, `LNBSYJS`, `MLN2`, `WLN2`, `LNWHQ2`, `LNXQJY2`, `LNXX2`, `LNCZ2`, `LNGZ2`, `LNDXZK2`, `LNDXBK2`, `LNSSYJS2`, `LNBSYJS2`, `MLN3`, `WLN3`, `LNWHQ3`, `LNXQJY3`, `LNXX3`, `LNCZ3`, `LNGZ3`, `LNDXZK3`, `LNDXBK3`, `LNSSYJS3`, `LNBSYJS3`, `WHQ`, `XQJY`, `XX`, `CZ`, `GZ`, `DXZK`, `DXBK`, `SSYJS`, `BSYJS`, `SSH`, `FSH`, `lng`, `lat`, `is_qianhai`, `DISTRICT_NAME`, `DISTRICT_CODE`, `STREET_NAME`, `STREET_CODE`, `COMMUNITY_NAME`, `COMMUNITY_CODE`, `occur_period`, `occur_period_year`, `occur_period_month`, `org_id`, `org_name`, `area_code`, `data_time`, `TJNY`) VALUES ('933f35f92e5d4b19a7f9334452fe5a99', '4403060000000000000', 54, '440306000000', 23, 31, 0, 0, 0, 0, 0, 7, 10, 3, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 11, 14, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 5, 8, 36, 2, 0, 16, 38, '113.861892600000004', '22.580441539999999', '1', '宝安区', '440306000000', '西乡街道', '440306000000', '盐田社区', '440306000000', 202212, 2022, 12, NULL, NULL, NULL, '2023-06-01 00:00:00', '2022-12-01 00:00:00');

2023-06-03 上传