#include <iostream> #include <thread> #include <mutex> #include <condition_variable> using namespace std; const int N = 5; // 哲学家数量 mutex forks[N]; // 叉子互斥锁 condition_variable cv[N]; // 条件变量,用于线程同步 void philosopher(int i) { while (true) { // 模拟思考 cout << "Philosopher " << i << " is hungry." << endl; // 拿起左手的叉子 forks[i].lock(); cout << "Philosopher " << i << " picks up left fork." << endl; // 尝试拿起右手的叉子 unique_lock<mutex> lock(forks[(i + 1) % N], defer_lock); while (!lock.try_lock()) { cv[i].wait(lock); } cout << "Philosopher " << i << " picks up right fork." << endl; // 进餐 cout << "Philosopher " << i << " starts eating." << endl; this_thread::sleep_for(chrono::milliseconds(rand() % 1000)); cout << "Philosopher " << i << " finishes eating." << endl; // 放下右手的叉子 lock.unlock(); cv[(i + 1) % N].notify_one(); cout << "Philosopher " << i << " puts down right fork." << endl; // 放下左手的叉子 forks[i].unlock(); cout << "Philosopher " << i << " puts down left fork." << endl; } } int main() { thread philosophers[N]; for (int i = 0; i < N; i++) { philosophers[i] = thread(philosopher, i); } for (int i = 0; i < N; i++) { philosophers[i].join(); } return 0; }分析案例
时间: 2024-01-06 08:07:11 浏览: 38
这是一个经典的哲学家就餐问题,使用了互斥锁和条件变量来实现线程同步。
在哲学家就餐问题中,有5个哲学家围坐在一张圆桌前,每个哲学家面前都有一只叉子。哲学家只有在同时拿到左右两只叉子时才能进餐,进餐完毕后需要放下叉子。
在代码实现中,每个哲学家对应一个线程,线程函数philosopher(i)模拟了哲学家的行为。当哲学家饥饿时,首先尝试拿起左手的叉子,如果成功则继续尝试拿起右手的叉子,如果失败则等待。当哲学家成功拿起左右两只叉子时,进餐一段时间后放下叉子,释放右手的叉子,并通知右边的哲学家可以拿起叉子了。
通过互斥锁保证了同一时刻只能有一个哲学家拿起同一只叉子,通过条件变量实现线程间的等待和通知,以避免死锁和忙等待的问题。
该实现方式在一定程度上解决了哲学家就餐问题,但仍存在一些问题,例如可能会出现饥饿死锁等问题。在实际应用中,需要根据具体情况进行进一步的优化和改进。
相关问题
C++设计实现模拟多个用户压力测试,并且获取网络性能指标(时延,速率,吞吐量,丢包率)和服务器性能指标(响应时间,并发用户数,QPS) 服务器端和客户端 代码
以下是一个简单的C++多线程客户端和服务器端代码,用于模拟多个用户进行压力测试并获取网络性能指标以及服务器性能指标:
服务器端代码:
```c++
#include <iostream>
#include <thread>
#include <chrono>
#include <mutex>
#include <vector>
#include <queue>
#include <condition_variable>
#include <atomic>
#include <ctime>
#include <cstdlib>
#include <cstring>
#include <unistd.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <fcntl.h>
#include <sys/epoll.h>
using namespace std;
#define MAX_EVENTS 1024
#define MAX_MSG_LENGTH 1024
struct request {
int fd;
time_t start_time;
};
mutex mtx;
condition_variable cv;
queue<request> req_queue;
atomic<bool> stop_flag(false);
atomic<int> conn_cnt(0), req_cnt(0), err_cnt(0);
void process_request(int fd) {
char buffer[MAX_MSG_LENGTH];
ssize_t n = recv(fd, buffer, MAX_MSG_LENGTH, 0);
if (n < 0) {
cerr << "Error receiving data from client" << endl;
err_cnt++;
return;
}
if (n == 0) {
close(fd);
return;
}
send(fd, buffer, n, 0);
close(fd);
}
void worker_thread() {
int epoll_fd = epoll_create1(0);
if (epoll_fd < 0) {
cerr << "Error creating epoll file descriptor" << endl;
return;
}
struct epoll_event event;
event.data.fd = -1;
event.events = EPOLLIN | EPOLLET;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, -1, &event) < 0) {
cerr << "Error adding listen socket to epoll" << endl;
return;
}
while (!stop_flag.load()) {
vector<request> req_list;
{
unique_lock<mutex> lock(mtx);
cv.wait(lock, []{ return !req_queue.empty() || stop_flag.load(); });
if (stop_flag.load()) {
break;
}
while (!req_queue.empty()) {
req_list.push_back(req_queue.front());
req_queue.pop();
}
}
for (auto& req : req_list) {
process_request(req.fd);
req_cnt++;
}
}
close(epoll_fd);
}
void accept_thread(int listen_fd) {
int epoll_fd = epoll_create1(0);
if (epoll_fd < 0) {
cerr << "Error creating epoll file descriptor" << endl;
return;
}
struct epoll_event event;
event.data.fd = listen_fd;
event.events = EPOLLIN | EPOLLET;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, listen_fd, &event) < 0) {
cerr << "Error adding listen socket to epoll" << endl;
return;
}
struct epoll_event events[MAX_EVENTS];
while (!stop_flag.load()) {
int nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
if (nfds < 0) {
cerr << "Error waiting for events" << endl;
continue;
}
for (int i = 0; i < nfds; i++) {
if (events[i].data.fd == listen_fd) {
int conn_fd = accept(listen_fd, nullptr, nullptr);
if (conn_fd < 0) {
cerr << "Error accepting connection" << endl;
err_cnt++;
continue;
}
conn_cnt++;
request req;
req.fd = conn_fd;
req.start_time = time(nullptr);
{
lock_guard<mutex> lock(mtx);
req_queue.push(req);
}
cv.notify_one();
}
}
}
close(epoll_fd);
}
void print_stats() {
while (!stop_flag.load()) {
this_thread::sleep_for(chrono::seconds(1));
cout << "Connections: " << conn_cnt.load()
<< ", Requests: " << req_cnt.load()
<< ", Errors: " << err_cnt.load()
<< endl;
}
}
int main() {
int listen_fd = socket(AF_INET, SOCK_STREAM, 0);
if (listen_fd < 0) {
cerr << "Error creating listen socket" << endl;
return 1;
}
int flags = fcntl(listen_fd, F_GETFL, 0);
if (flags < 0) {
cerr << "Error getting socket flags" << endl;
return 1;
}
if (fcntl(listen_fd, F_SETFL, flags | O_NONBLOCK) < 0) {
cerr << "Error setting socket to non-blocking mode" << endl;
return 1;
}
struct sockaddr_in addr;
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = htons(8888);
if (bind(listen_fd, (struct sockaddr*)&addr, sizeof(addr)) < 0) {
cerr << "Error binding listen socket" << endl;
return 1;
}
if (listen(listen_fd, SOMAXCONN) < 0) {
cerr << "Error listening on listen socket" << endl;
return 1;
}
thread accept_th(accept_thread, listen_fd);
thread worker_th(worker_thread);
thread stats_th(print_stats);
this_thread::sleep_for(chrono::seconds(10));
stop_flag.store(true);
accept_th.join();
worker_th.join();
stats_th.join();
close(listen_fd);
return 0;
}
```
客户端代码:
```c++
#include <iostream>
#include <thread>
#include <chrono>
#include <mutex>
#include <vector>
#include <queue>
#include <condition_variable>
#include <atomic>
#include <ctime>
#include <cstdlib>
#include <cstring>
#include <unistd.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <fcntl.h>
using namespace std;
#define MAX_MSG_LENGTH 1024
void send_request(int id, const char* ip, uint16_t port) {
int fd = socket(AF_INET, SOCK_STREAM, 0);
if (fd < 0) {
cerr << "Error creating socket" << endl;
return;
}
struct sockaddr_in addr;
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = inet_addr(ip);
addr.sin_port = htons(port);
if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) < 0) {
cerr << "Error connecting to server" << endl;
close(fd);
return;
}
char buffer[MAX_MSG_LENGTH];
snprintf(buffer, MAX_MSG_LENGTH, "Request from client %d", id);
ssize_t n = send(fd, buffer, strlen(buffer), 0);
if (n < 0) {
cerr << "Error sending data to server" << endl;
close(fd);
return;
}
n = recv(fd, buffer, MAX_MSG_LENGTH, 0);
if (n < 0) {
cerr << "Error receiving data from server" << endl;
close(fd);
return;
}
close(fd);
}
int main(int argc, char* argv[]) {
if (argc != 3) {
cerr << "Usage: " << argv[0] << " <ip> <port>" << endl;
return 1;
}
const char* ip = argv[1];
uint16_t port = atoi(argv[2]);
srand(time(nullptr));
int id = rand() % 100;
while (true) {
send_request(id, ip, port);
this_thread::sleep_for(chrono::milliseconds(rand() % 1000));
}
return 0;
}
```
这两个代码需要在Linux环境下编译运行。启动服务器端后,它将等待来自客户端的连接。启动客户端后,它将发送随机间隔下的请求到服务器端,并等待响应。可以通过修改服务器端的代码来获取所需的性能指标。
生成环形队列实现生产者和消费者的同步与互斥问题的代码
以下是生成环形队列实现生产者和消费者的同步与互斥问题的代码:
```
#include <iostream>
#include <queue>
#include <thread>
#include <chrono>
#include <mutex>
#include <condition_variable>
using namespace std;
const int buffer_size = 10;
queue<int> buffer;
int product_count = 0, consume_count = 0;
mutex buffer_mutex;
condition_variable buffer_full_cond;
condition_variable buffer_empty_cond;
void producer_func()
{
while (true) {
unique_lock<mutex> lock(buffer_mutex);
buffer_full_cond.wait(lock, []{ return buffer.size() < buffer_size; });
buffer.push(product_count++);
cout << "Producer produces: " << product_count - 1 << endl;
lock.unlock();
buffer_empty_cond.notify_one();
this_thread::sleep_for(chrono::milliseconds(500)); // 生产者速度慢一些
}
}
void consumer_func()
{
while (true) {
unique_lock<mutex> lock(buffer_mutex);
buffer_empty_cond.wait(lock, []{ return !buffer.empty(); });
int val = buffer.front();
buffer.pop();
cout << "Consumer consumes: " << val << endl;
lock.unlock();
buffer_full_cond.notify_one();
this_thread::sleep_for(chrono::milliseconds(200)); // 消费者速度快一些
}
}
int main()
{
thread producer_thread(producer_func);
thread consumer_thread(consumer_func);
producer_thread.join();
consumer_thread.join();
return 0;
}
```
本段代码实现了生成环形队列来实现生产者和消费者的同步与互斥问题。在运行时,生产者线程会生产一个产品,并通过条件变量 `buffer_full_cond` 等待,直到队列未满时再将产品添加到队列中;消费者线程会从队列中取走一个产品,并通过条件变量 `buffer_empty_cond` 等待,直到队列非空时再进行消费。同时,使用互斥锁 `buffer_mutex` 来保证队列的数据访问时的线程安全。