使用Visual Basic绘制矩形与填充矩形教程

版权申诉
0 下载量 41 浏览量 更新于2024-11-15 收藏 3KB ZIP 举报
资源摘要信息:"该资源主要关注在Windows编程环境下使用Visual Basic语言绘制矩形和填充矩形的基本操作和示例。具体的知识点涵盖以下几个方面: 1. Visual Basic基础:Visual Basic(VB)是一种事件驱动的编程语言,是微软公司推出的一种开发环境,非常适合快速开发Windows应用程序。了解VB的基础语法、事件处理机制和开发环境是进行Windows编程的前提条件。 2. Windows绘图基础:在Windows系统中,绘图是通过GDI(图形设备接口)来进行的。GDI提供了大量的函数和对象来支持基本的图形绘制,如直线、矩形、圆等。学习如何使用GDI进行基本图形的绘制是进行更复杂图形绘制的基础。 3. 绘制矩形:在GDI中,绘制矩形通常使用MoveToEx和LineTo函数或者Rectangle函数。用户需要指定矩形的左上角和右下角坐标来定义矩形的位置和大小。绘制矩形是学习Windows编程的一个基础练习,有助于理解坐标系统和图形绘制流程。 4. 填充矩形:填充矩形与绘制矩形类似,但需要使用到填充算法。在GDI中,可以使用FillRect函数来填充一个矩形区域。通过指定矩形边界和填充颜色,可以实现矩形区域的填充效果。学习填充矩形对于理解Windows图形用户界面(GUI)设计中的样式和视觉效果有很大帮助。 5. 使用文档:压缩包中的文件rectangles.doc可能是一份教学文档,提供了关于绘制和填充矩形的具体实现代码、方法描述和示例。这份文档对于理解如何在Visual Basic环境中利用Windows API进行图形绘制至关重要。文档可能详细介绍了相关的函数、参数和使用场景,以及对于绘制和填充效果的预期和调试技巧。 总结以上知识点,该资源是针对Windows编程初学者或有一定基础的开发者提供的实用教程,旨在帮助他们快速掌握如何在Visual Basic环境下使用Windows API进行基本图形绘制和填充操作。通过具体的操作演示和代码示例,学习者可以更好地理解和实践绘图命令,从而为开发更复杂的图形界面打下坚实的基础。"

解释如下代码: length_list = [list(range(32, 1025, 16)) + list(range(1056, 8193, 16))] width_list = [list(range(16, 145, 1))] length_max = max(length_list[0]) width_max = max(width_list[0]) def cut_rectangle(length, width): if length > length_max and width > width_max: rectangles = [] a_length = length_max b_length = length - length_max a_rectangle = (a_length, width) b_rectangle = (b_length, width) if b_length > length_max: a_rectangles, b_rectangles = cut_rectangle(b_length, width) rectangles.extend(a_rectangles) rectangles.extend(b_rectangles) else: rectangles.append(b_rectangle) if a_length > width_max: new_a_rectangles = [a_rectangle] while new_a_rectangles: a_rectangles = new_a_rectangles new_a_rectangles = [] for rectangle in a_rectangles: a_width = rectangle[1] if a_width > width_max: half_width = math.ceil(a_width / 2) if half_width > width_max: new_a_rectangle = (a_length, half_width) b_length = rectangle[0] b_rectangle = (b_length, a_width - half_width) if b_length > length_max: a_rectangles, b_rectangles = cut_rectangle(b_length, a_width - half_width) rectangles.extend(a_rectangles) rectangles.extend(b_rectangles) else: rectangles.append(b_rectangle) new_a_rectangles.append(new_a_rectangle) else: new_a_rectangles.append(rectangle) else: rectangles.append(rectangle) else: rectangles.append(a_rectangle) return rectangles, [] else: return [(length, width)], [] length = int(input("请输入被切割矩形的长度值:")) width = int(input("请输入被切割矩形的宽度值:")) rectangles, _ = cut_rectangle(length, width) print("全部切割后的矩形尺寸的列表:") for rectangle in rectangles: print(f"{rectangle[0]} x {rectangle[1]}")

134 浏览量

请详细解释下这段代码void FaceTracker::OnNewFaceData( const std::vector<human_sensing::CrosFace>& faces) { // Given |f1| and |f2| from two different (usually consecutive) frames, treat // the two rectangles as the same face if their position delta is less than // kFaceDistanceThresholdSquare. // // This is just a heuristic and is not accurate in some corner cases, but we // don't have face tracking. auto is_same_face = [&](const Rect<float>& f1, const Rect<float>& f2) -> bool { const float center_f1_x = f1.left + f1.width / 2; const float center_f1_y = f1.top + f1.height / 2; const float center_f2_x = f2.left + f2.width / 2; const float center_f2_y = f2.top + f2.height / 2; constexpr float kFaceDistanceThresholdSquare = 0.1 * 0.1; const float dist_square = std::pow(center_f1_x - center_f2_x, 2.0f) + std::pow(center_f1_y - center_f2_y, 2.0f); return dist_square < kFaceDistanceThresholdSquare; }; for (const auto& f : faces) { FaceState s = { .normalized_bounding_box = Rect<float>( f.bounding_box.x1 / options_.active_array_dimension.width, f.bounding_box.y1 / options_.active_array_dimension.height, (f.bounding_box.x2 - f.bounding_box.x1) / options_.active_array_dimension.width, (f.bounding_box.y2 - f.bounding_box.y1) / options_.active_array_dimension.height), .last_detected_ticks = base::TimeTicks::Now(), .has_attention = std::fabs(f.pan_angle) < options_.pan_angle_range}; bool found_matching_face = false; for (auto& known_face : faces_) { if (is_same_face(s.normalized_bounding_box, known_face.normalized_bounding_box)) { found_matching_face = true; if (!s.has_attention) { // If the face isn't looking at the camera, reset the timer. s.first_detected_ticks = base::TimeTicks::Max(); } else if (!known_face.has_attention && s.has_attention) { // If the face starts looking at the camera, start the timer. s.first_detected_ticks = base::TimeTicks::Now(); } else { s.first_detected_ticks = known_face.first_detected_ticks; } known_face = s; break; } } if (!found_matching_face) { s.first_detected_ticks = base::TimeTicks::Now(); faces_.push_back(s); } } // Flush expired face states. for (auto it = faces_.begin(); it != faces_.end();) { if (ElapsedTimeMs(it->last_detected_ticks) > options_.face_phase_out_threshold_ms) { it = faces_.erase(it); } else { ++it; } } }

111 浏览量

解释一下这段代码function [params, bg_area, fg_area, area_resize_factor] = initializeAllAreas(im, params) % we want a regular frame surrounding the object avg_dim = sum(params.target_sz)/2; % size from which we extract features bg_area = round(params.target_sz + avg_dim); % pick a "safe" region smaller than bbox to avoid mislabeling fg_area = round(params.target_sz - avg_dim * params.inner_padding); % saturate to image size if(bg_area(2)>size(im,2)), bg_area(2)=size(im,2)-1; end if(bg_area(1)>size(im,1)), bg_area(1)=size(im,1)-1; end % make sure the differences are a multiple of 2 (makes things easier later in color histograms) bg_area = bg_area - mod(bg_area - params.target_sz, 2); fg_area = fg_area + mod(bg_area - fg_area, 2); % Compute the rectangle with (or close to) params.fixedArea and % same aspect ratio as the target bbox area_resize_factor = sqrt(params.fixed_area/prod(bg_area)); params.norm_bg_area = round(bg_area * area_resize_factor); % Correlation Filter (HOG) feature space % It smaller that the norm bg area if HOG cell size is > 1 params.cf_response_size = floor(params.norm_bg_area / params.hog_cell_size); % given the norm BG area, which is the corresponding target w and h? norm_target_sz_w = 0.75*params.norm_bg_area(2) - 0.25*params.norm_bg_area(1); norm_target_sz_h = 0.75*params.norm_bg_area(1) - 0.25*params.norm_bg_area(2); % norm_target_sz_w = params.target_sz(2) * params.norm_bg_area(2) / bg_area(2); % norm_target_sz_h = params.target_sz(1) * params.norm_bg_area(1) / bg_area(1); params.norm_target_sz = round([norm_target_sz_h norm_target_sz_w]); % distance (on one side) between target and bg area norm_pad = floor((params.norm_bg_area - params.norm_target_sz) / 2); radius = min(norm_pad); % norm_delta_area is the number of rectangles that are considered. % it is the "sampling space" and the dimension of the final merged resposne % it is squared to not privilege any particular direction params.norm_delta_area = (2*radius+1) * [1, 1]; % Rectangle in which the integral images are computed. % Grid of rectangles ( each of size norm_target_sz) has size norm_delta_area. params.norm_pwp_search_area = params.norm_target_sz + params.norm_delta_area - 1; end

137 浏览量