AVPASS:自动绕过Android恶意软件检测系统

需积分: 5 1 下载量 186 浏览量 更新于2024-06-21 收藏 4.89MB PDF 举报
"藏经阁-AVPASS_Automatically Bypassing.pdf" 这篇文档主要讨论了“AVPASS: Automatically Bypassing Android Malware Detection System”,这是一个由Jinho Jung, Chanil Jeon, Max Wolotsky, Insu Yun, 和 Taesoo Kim在2017年在乔治亚理工学院进行的研究项目。研究团队属于SSLab(@GT)和ISTC-ARSA,这两个组织专注于系统安全研究和恶意软件检测分析的强化。 AVPASS项目的核心目标是通过自动化技术来绕过Android设备上的反病毒系统(AVs),从而对抗日益增长的Android恶意软件威胁。该技术通过推断反病毒软件的特征和规则,以及混淆Android二进制文件(APK)来实现这一目标。同时,AVPASS设计时考虑了防止代码泄漏,以确保在规避安全防护的同时,不会对用户数据造成风险。 随着Android操作系统在移动市场的主导地位不断巩固,其面临的恶意软件问题也日益严重。数据显示,Android每天新增约8,400种恶意软件,安全专家预测2017年可能有超过350万新的Android恶意软件应用出现。因此,AVPASS的出现对于对抗这种趋势具有重要的现实意义。 研究者们的工作不仅揭示了Android恶意软件检测系统的弱点,还提出了一种创新的解决方案,即自动地、智能地混淆恶意软件以避开检测。这可能会对未来的反恶意软件策略产生深远影响,推动安全研究人员开发更复杂的方法来检测和防御不断演变的Android恶意软件。 AVPASS项目是一项旨在提升Android设备安全性的技术,它通过理解和利用反病毒软件的工作原理,提供了一种自动化的方法来混淆恶意软件,使其能够逃避检测。这个研究为理解恶意软件的躲避策略和改进安全防御提供了宝贵的知识,同时也警示了Android用户和开发者应持续关注并提升移动设备的安全性。

在paddle框架中实现下面的所有代码:class CosineAnnealingWarmbootingLR: # cawb learning rate scheduler: given the warm booting steps, calculate the learning rate automatically def __init__(self, optimizer, epochs=0, eta_min=0.05, steps=[], step_scale=0.8, lf=None, batchs=0, warmup_epoch=0, epoch_scale=1.0): self.warmup_iters = batchs * warmup_epoch self.optimizer = optimizer self.eta_min = eta_min self.iters = -1 self.iters_batch = -1 self.base_lr = [group['lr'] for group in optimizer.param_groups] self.step_scale = step_scale steps.sort() self.steps = [warmup_epoch] + [i for i in steps if (i < epochs and i > warmup_epoch)] + [epochs] self.gap = 0 self.last_epoch = 0 self.lf = lf self.epoch_scale = epoch_scale # Initialize epochs and base learning rates for group in optimizer.param_groups: group.setdefault('initial_lr', group['lr']) def step(self, external_iter = None): self.iters += 1 if external_iter is not None: self.iters = external_iter # cos warm boot policy iters = self.iters + self.last_epoch scale = 1.0 for i in range(len(self.steps)-1): if (iters <= self.steps[i+1]): self.gap = self.steps[i+1] - self.steps[i] iters = iters - self.steps[i] if i != len(self.steps)-2: self.gap += self.epoch_scale break scale *= self.step_scale if self.lf is None: for group, lr in zip(self.optimizer.param_groups, self.base_lr): group['lr'] = scale * lr * ((((1 + math.cos(iters * math.pi / self.gap)) / 2) ** 1.0) * (1.0 - self.eta_min) + self.eta_min) else: for group, lr in zip(self.optimizer.param_groups, self.base_lr): group['lr'] = scale * lr * self.lf(iters, self.gap) return self.optimizer.param_groups[0]['lr'] def step_batch(self): self.iters_batch += 1 if self.iters_batch < self.warmup_iters: rate = self.iters_batch / self.warmup_iters for group, lr in zip(self.optimizer.param_groups, self.base_lr): group['lr'] = lr * rate return self.optimizer.param_groups[0]['lr'] else: return None

2023-03-24 上传

TypeError Traceback (most recent call last) /tmp/ipykernel_1045/245448921.py in <module> 1 dataset_path = ABSADatasetList.Restaurant14 ----> 2 sent_classifier = Trainer(config=apc_config_english, 3 dataset=dataset_path, # train set and test set will be automatically detected 4 checkpoint_save_mode=1, # =None to avoid save model 5 auto_device=True # automatic choose CUDA or CPU /tmp/ipykernel_1045/296492999.py in __init__(self, config, dataset, from_checkpoint, checkpoint_save_mode, auto_device) 84 config.model_path_to_save = None 85 ---> 86 self.train() 87 88 def train(self): /tmp/ipykernel_1045/296492999.py in train(self) 96 config.seed = s 97 if self.checkpoint_save_mode: ---> 98 model_path.append(self.train_func(config, self.from_checkpoint, self.logger)) 99 else: 100 # always return the last trained model if dont save trained model /tmp/ipykernel_1045/4269211813.py in train4apc(opt, from_checkpoint_path, logger) 494 load_checkpoint(trainer, from_checkpoint_path) 495 --> 496 return trainer.run() /tmp/ipykernel_1045/4269211813.py in run(self) 466 criterion = nn.CrossEntropyLoss() 467 self._reset_params() --> 468 return self._train(criterion) 469 470 /tmp/ipykernel_1045/4269211813.py in _train(self, criterion) 153 return self._k_fold_train_and_evaluate(criterion) 154 else: --> 155 return self._train_and_evaluate(criterion) 156 157 def _train_and_evaluate(self, criterion): /tmp/ipykernel_1045/4269211813.py in _train_and_evaluate(self, criterion) 190 191 for epoch in range(self.opt.num_epoch): --> 192 iterator = tqdm(self.train_dataloaders[0]) 193 for i_batch, sample_batched in enumerate(iterator): 194 global_step += 1 TypeError: 'module' object is not callable

2023-07-12 上传

给以下代码写注释,要求每行写一句:class CosineAnnealingWarmbootingLR: # cawb learning rate scheduler: given the warm booting steps, calculate the learning rate automatically def __init__(self, optimizer, epochs=0, eta_min=0.05, steps=[], step_scale=0.8, lf=None, batchs=0, warmup_epoch=0, epoch_scale=1.0): self.warmup_iters = batchs * warmup_epoch self.optimizer = optimizer self.eta_min = eta_min self.iters = -1 self.iters_batch = -1 self.base_lr = [group['lr'] for group in optimizer.param_groups] self.step_scale = step_scale steps.sort() self.steps = [warmup_epoch] + [i for i in steps if (i < epochs and i > warmup_epoch)] + [epochs] self.gap = 0 self.last_epoch = 0 self.lf = lf self.epoch_scale = epoch_scale # Initialize epochs and base learning rates for group in optimizer.param_groups: group.setdefault('initial_lr', group['lr']) def step(self, external_iter = None): self.iters += 1 if external_iter is not None: self.iters = external_iter # cos warm boot policy iters = self.iters + self.last_epoch scale = 1.0 for i in range(len(self.steps)-1): if (iters <= self.steps[i+1]): self.gap = self.steps[i+1] - self.steps[i] iters = iters - self.steps[i] if i != len(self.steps)-2: self.gap += self.epoch_scale break scale *= self.step_scale if self.lf is None: for group, lr in zip(self.optimizer.param_groups, self.base_lr): group['lr'] = scale * lr * ((((1 + math.cos(iters * math.pi / self.gap)) / 2) ** 1.0) * (1.0 - self.eta_min) + self.eta_min) else: for group, lr in zip(self.optimizer.param_groups, self.base_lr): group['lr'] = scale * lr * self.lf(iters, self.gap) return self.optimizer.param_groups[0]['lr'] def step_batch(self): self.iters_batch += 1 if self.iters_batch < self.warmup_iters: rate = self.iters_batch / self.warmup_iters for group, lr in zip(self.optimizer.param_groups, self.base_lr): group['lr'] = lr * rate return self.optimizer.param_groups[0]['lr'] else: return None

2023-03-24 上传

Building prefix dict from the default dictionary ... DEBUG:jieba:Building prefix dict from the default dictionary ... Loading model from cache C:\Users\LY-AI\AppData\Local\Temp\jieba.cache DEBUG:jieba:Loading model from cache C:\Users\LY-AI\AppData\Local\Temp\jieba.cache Loading model cost 0.717 seconds. DEBUG:jieba:Loading model cost 0.717 seconds. Prefix dict has been built successfully. DEBUG:jieba:Prefix dict has been built successfully. C:\Users\LY-AI\Desktop\AI\vits-uma-genshin-honkai\python3.9.13\3.9.13\lib\site-packages\gradio\processing_utils.py:183: UserWarning: Trying to convert audio automatically from float32 to 16-bit int format. warnings.warn(warning.format(data.dtype)) Traceback (most recent call last): File "C:\Users\LY-AI\Desktop\AI\vits-uma-genshin-honkai\python3.9.13\3.9.13\lib\site-packages\gradio\routes.py", line 442, in run_predict output = await app.get_blocks().process_api( File "C:\Users\LY-AI\Desktop\AI\vits-uma-genshin-honkai\python3.9.13\3.9.13\lib\site-packages\gradio\blocks.py", line 1392, in process_api data = self.postprocess_data(fn_index, result["prediction"], state) File "C:\Users\LY-AI\Desktop\AI\vits-uma-genshin-honkai\python3.9.13\3.9.13\lib\site-packages\gradio\blocks.py", line 1326, in postprocess_data prediction_value = block.postprocess(prediction_value) File "C:\Users\LY-AI\Desktop\AI\vits-uma-genshin-honkai\app.py", line 42, in audio_postprocess return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) AttributeError: module 'gradio.processing_utils' has no attribute 'encode_url_or_file_to_base64'

2023-07-24 上传