Git基础:fork, clone与branch详解

需积分: 0 0 下载量 50 浏览量 更新于2024-08-04 收藏 412KB DOCX 举报
"这份文档包含了前端大厂面试中关于Git中的fork、clone和branch三个核心概念的解释和使用方法。" Git是版本控制系统,对于前端开发者来说,理解和熟练使用Git至关重要。下面将详细阐述这三个概念及其区别。 1. **fork** - **定义**:fork在Git中意味着创建一个仓库的副本,它不仅包含了源仓库的所有文件和提交历史,还包含了Tag等元数据。这个副本位于你的GitHub账户下,与原始仓库(上游仓库)独立。 - **用途**:当你对某个开源项目感兴趣,想要参与贡献或者基于该项目创建自己的版本时,可以fork。fork后的仓库允许你在本地进行修改,然后通过Pull Request将这些修改提交回原仓库,供项目维护者审查和合并。 - **操作流程**:在GitHub上找到感兴趣的项目,点击右上角的fork按钮,然后在本地使用`git clone`克隆你的fork副本。 2. **clone** - **定义**:clone是将远程仓库的全部内容下载到本地的过程,包括所有分支、提交历史和Tag。执行`git clone`命令后,会在本地创建一个与远程仓库同步的新目录,包含`.git`隐藏文件夹,用于存储仓库元数据。 - **应用场景**:当你需要获取远程仓库的代码以进行开发、学习或构建本地环境时,会用到`git clone`。 - **操作**:获取远程仓库URL,通常在GitHub页面的“Code”按钮下,然后在终端运行`git clone <仓库URL>`。 3. **branch** - **定义**:branch在Git中表示分支,是开发过程中的并行线路,允许你在不影响主分支(通常是`master`或`main`)的情况下进行开发。 - **功能**:分支提供了一种隔离开发环境的方式,可以创建新功能、修复bug而不影响主分支的稳定。 - **操作**:创建新分支使用`git branch <分支名>`,切换分支使用`git checkout <分支名>`。默认情况下,新分支不会自动切换,需要手动`checkout`。创建并切换到新分支可以一步完成,使用`git checkout -b <分支名>`。 在实际开发中,`fork`常用于贡献开源项目,`clone`用于获取项目源码,而`branch`则用于管理开发的不同阶段和任务。掌握这三个基本操作是每个前端开发者必备的Git技能,它们在团队协作和项目管理中扮演着重要角色。在面试中,对这些概念的深入理解体现了候选人的Git熟练度和对版本控制的理解程度。

#ifndef CONFIG_HAVE_COPY_THREAD_TLS /* For compatibility with architectures that call do_fork directly rather than * using the syscall entry points below. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct kernel_clone_args args = { .flags = (clone_flags & ~CSIGNAL), .pidfd = parent_tidptr, .child_tid = child_tidptr, .parent_tid = parent_tidptr, .exit_signal = (clone_flags & CSIGNAL), .stack = stack_start, .stack_size = stack_size, }; if (!legacy_clone_args_valid(&args)) //1.查找 pid 位图,为子进程分配新的 pid return -EINVAL; return _do_fork(&args); } long _do_fork(struct kernel_clone_args *args) { u64 clone_flags = args->flags; struct completion vfork; struct pid *pid; struct task_struct *p; int trace = 0; long nr; //2.关于进程追踪的设置 if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if (args->exit_signal != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } //3.复制进程描述符 p = copy_process(NULL, trace, NUMA_NO_NODE, args); add_latent_entropy(); if (IS_ERR(p)) return PTR_ERR(p); trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, args->parent_tid); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } //4.将子进程放在运行队列中父进程的前面 wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { //5.如果是 vfork() 的话父进程插入等待队列,挂起父进程直到子进程释放自己的内存地址空间 //(直到子进程结束或者执行新的程序) if (!wait_for_vfork_done(p, &vfork)) ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); return nr; }加上注释

2023-06-11 上传
2023-07-15 上传

Traceback (most recent call last): File "DT_001_X01_P01.py", line 150, in DT_001_X01_P01.Module.load_model File "/home/kejia/Server/tf/Bin_x64/DeepLearning/DL_Lib_02/mmdet/apis/inference.py", line 42, in init_detector checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc) File "/home/kejia/Server/tf/Bin_x64/DeepLearning/DL_Lib_02/mmcv/runner/checkpoint.py", line 529, in load_checkpoint checkpoint = _load_checkpoint(filename, map_location, logger) File "/home/kejia/Server/tf/Bin_x64/DeepLearning/DL_Lib_02/mmcv/runner/checkpoint.py", line 467, in _load_checkpoint return CheckpointLoader.load_checkpoint(filename, map_location, logger) File "/home/kejia/Server/tf/Bin_x64/DeepLearning/DL_Lib_02/mmcv/runner/checkpoint.py", line 244, in load_checkpoint return checkpoint_loader(filename, map_location) File "/home/kejia/Server/tf/Bin_x64/DeepLearning/DL_Lib_02/mmcv/runner/checkpoint.py", line 261, in load_from_local checkpoint = torch.load(filename, map_location=map_location) File "torch/serialization.py", line 594, in load return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args) File "torch/serialization.py", line 853, in _load result = unpickler.load() File "torch/serialization.py", line 845, in persistent_load load_tensor(data_type, size, key, _maybe_decode_ascii(location)) File "torch/serialization.py", line 834, in load_tensor loaded_storages[key] = restore_location(storage, location) File "torch/serialization.py", line 175, in default_restore_location result = fn(storage, location) File "torch/serialization.py", line 157, in _cuda_deserialize return obj.cuda(device) File "torch/_utils.py", line 71, in _cuda with torch.cuda.device(device): File "torch/cuda/__init__.py", line 225, in __enter__ self.prev_idx = torch._C._cuda_getDevice() File "torch/cuda/__init__.py", line 164, in _lazy_init "Cannot re-initialize CUDA in forked subprocess. " + msg) RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method ('异常抛出', None) DT_001_X01_P01 load_model ret=1, version=V1.0.0.0

2023-07-22 上传