C++基於reactor的服務器百萬並發實現與講解
reactor實現的原理請參考:
https://www.jb51.net/article/253794.htm
本次百萬並發的代碼實現也是基於上面代碼進行更改而來
並發量和承載的概念
並發量:一個服務器能同時承載客戶端的數量
承載:客戶端發送給服務器的請求(http或tcp等)在200ms內可以返回正確的結果
一、服務器的代碼實現與講解
結構體代碼主要構建的結構如圖所示
鏈表結構,每個eventblock結點,包括一個ntyevent數組,數組中存儲fd
/*結構體定義鏈表數組*/ struct ntyevent { int fd;//要監聽的文件描述符 int events;//對應的監聽事件, EPOLLIN和EPOLLOUT(不同的事件,走不同的回調函數) void *arg;//指向自己結構體指針 int (*callback)(int fd, int events, void *arg); int status;//是否在監聽:1->在紅黑樹上(監聽),0->不在(不監聽) char buffer[BUFFER_LENGTH]; int length; long last_active; }; struct eventblock { struct eventblock *next; struct ntyevent *events;//數組 }; struct ntyreactor { //句柄 int epfd; //結點個數 int blkcnt; struct eventblock *evblk; //fd --> 100w };
初始化fd 上樹、下樹代碼
//nty_event_set(event, sockfd, acceptor, reactor); //初始化sockfd void nty_event_set(struct ntyevent *ev, int fd, NCALLBACK callback, void *arg) { ev->fd = fd; ev->callback = callback; ev->events = 0; ev->arg = arg; ev->last_active = time(NULL); return ; } //nty_event_add(reactor->epfd, EPOLLIN, event); //對監聽的epoll紅黑樹上的結點的修改 int nty_event_add(int epfd, int events, struct ntyevent *ev) { struct epoll_event ep_ev = {0, {0}}; ep_ev.data.ptr = ev; ep_ev.events = ev->events = events; int op; if (ev->status == 1) { op = EPOLL_CTL_MOD; } else { op = EPOLL_CTL_ADD; ev->status = 1; } if (epoll_ctl(epfd, op, ev->fd, &ep_ev) < 0) { printf("event add failed [fd=%d], events[%d]\n", ev->fd, events); return -1; } return 0; } int nty_event_del(int epfd, struct ntyevent *ev) { struct epoll_event ep_ev = {0, {0}}; if (ev->status != 1) { return -1; } ep_ev.data.ptr = ev; ev->status = 0; epoll_ctl(epfd, EPOLL_CTL_DEL, ev->fd, &ep_ev); return 0; }
回調函數代碼的書寫
註意看recv_cb的回調函數中,recv之後,立馬下樹,然後又重新初始化fd,上樹。這樣做的目的是因為代碼邏輯是recv收到數據後,立即原樣send,所以需要對fd的屬性進行更改,需要重新初始化賦值,然後上樹
int recv_cb(int fd, int events, void *arg) { struct ntyreactor *reactor = (struct ntyreactor*)arg; struct ntyevent *ev = ntyreactor_idx(reactor, fd); int len = recv(fd, ev->buffer, BUFFER_LENGTH , 0); // nty_event_del(reactor->epfd, ev); if (len > 0) { ev->length = len; ev->buffer[len] = '\0'; printf("C[%d]:%s\n", fd, ev->buffer); nty_event_set(ev, fd, send_cb, reactor); nty_event_add(reactor->epfd, EPOLLOUT, ev); } else if (len == 0) { close(ev->fd); //printf("[fd=%d] pos[%ld], closed\n", fd, ev-reactor->events); } else { close(ev->fd); printf("recv[fd=%d] error[%d]:%s\n", fd, errno, strerror(errno)); } return len; } int send_cb(int fd, int events, void *arg) { struct ntyreactor *reactor = (struct ntyreactor*)arg; struct ntyevent *ev = ntyreactor_idx(reactor, fd); int len = send(fd, ev->buffer, ev->length, 0); if (len > 0) { printf("send[fd=%d], [%d]%s\n", fd, len, ev->buffer); nty_event_del(reactor->epfd, ev); nty_event_set(ev, fd, recv_cb, reactor); nty_event_add(reactor->epfd, EPOLLIN, ev); } else { close(ev->fd); nty_event_del(reactor->epfd, ev); printf("send[fd=%d] error %s\n", fd, strerror(errno)); } return len; } int accept_cb(int fd, int events, void *arg) { struct ntyreactor *reactor = (struct ntyreactor*)arg; if (reactor == NULL) return -1; struct sockaddr_in client_addr; socklen_t len = sizeof(client_addr); int clientfd; if ((clientfd = accept(fd, (struct sockaddr*)&client_addr, &len)) == -1) { if (errno != EAGAIN && errno != EINTR) { } printf("accept: %s\n", strerror(errno)); return -1; } int flag = 0; if ((flag = fcntl(clientfd, F_SETFL, O_NONBLOCK)) < 0) { printf("%s: fcntl nonblocking failed, %d\n", __func__, MAX_EPOLL_EVENTS); return -1; } /*存儲*/ struct ntyevent *event = ntyreactor_idx(reactor, clientfd); nty_event_set(event, clientfd, recv_cb, reactor); nty_event_add(reactor->epfd, EPOLLIN, event); printf("new connect [%s:%d], pos[%d]\n", inet_ntoa(client_addr.sin_addr), ntohs(client_addr.sin_port), clientfd); return 0; }
鏈表的初始化與銷毀
//初始化鏈表 int ntyreactor_init(struct ntyreactor *reactor) { if (reactor == NULL) return -1; memset(reactor, 0, sizeof(struct ntyreactor)); reactor->epfd = epoll_create(1); if (reactor->epfd <= 0) { printf("create epfd in %s err %s\n", __func__, strerror(errno)); return -2; } struct ntyevent *evs = (struct ntyevent*)malloc((MAX_EPOLL_EVENTS) * sizeof(struct ntyevent)); if (evs == NULL) { printf("ntyreactor_alloc ntyevents failed\n"); return -2; } memset(evs, 0, (MAX_EPOLL_EVENTS) * sizeof(struct ntyevent)); struct eventblock *block = (struct eventblock *)malloc(sizeof(struct eventblock)); if (block == NULL) { printf("ntyreactor_alloc eventblock failed\n"); return -2; } memset(block, 0, sizeof(struct eventblock)); block->events = evs; block->next = NULL; reactor->evblk = block; reactor->blkcnt = 1; return 0; }
找到fd應在鏈表數組中存儲的位置並返回
//新增塊數(eventblock結點個數) //ntyreactor_alloc(reactor); int ntyreactor_alloc(struct ntyreactor *reactor) { if (reactor == NULL) return -1; if (reactor->evblk == NULL) return -1; struct eventblock *blk = reactor->evblk; while (blk->next != NULL) { blk = blk->next; } struct ntyevent *evs = (struct ntyevent*)malloc((MAX_EPOLL_EVENTS) * sizeof(struct ntyevent)); if (evs == NULL) { printf("ntyreactor_alloc ntyevents failed\n"); return -2; } memset(evs, 0, (MAX_EPOLL_EVENTS) * sizeof(struct ntyevent)); struct eventblock *block = (struct eventblock *)malloc(sizeof(struct eventblock)); if (block == NULL) { printf("ntyreactor_alloc eventblock failed\n"); return -2; } memset(block, 0, sizeof(struct eventblock)); block->events = evs; block->next = NULL; blk->next = block; reactor->blkcnt ++; // return 0; } //struct ntyevent *event = ntyreactor_idx(reactor, sockfd); struct ntyevent *ntyreactor_idx(struct ntyreactor *reactor, int sockfd) { int blkidx = sockfd / MAX_EPOLL_EVENTS; //如果塊數(eventblock結點個數)不能滿足新的sockfd的存放 while (blkidx >= reactor->blkcnt) { //新增塊數(eventblock結點個數) ntyreactor_alloc(reactor); } //找到存放sockfd的塊(eventblock對應的結點) int i = 0; struct eventblock *blk = reactor->evblk; while(i ++ < blkidx && blk != NULL) { blk = blk->next; } //返回對應塊(eventblock對應的結點)的存放sockfd數組的那個具體位置 return &blk->events[sockfd % MAX_EPOLL_EVENTS]; }
上樹,並初始化鏈表數組上對應的fd
//ntyreactor_addlistener(reactor, sockfds[i], accept_cb); //上樹,並初始化鏈表數組上對應的fd int ntyreactor_addlistener(struct ntyreactor *reactor, int sockfd, NCALLBACK *acceptor) { if (reactor == NULL) return -1; if (reactor->evblk == NULL) return -1; //reactor->evblk->events[sockfd]; //找到sock所在的具體位置 struct ntyevent *event = ntyreactor_idx(reactor, sockfd); 初始化sockfd nty_event_set(event, sockfd, acceptor, reactor); //對監聽的epoll紅黑樹上的結點的修改 nty_event_add(reactor->epfd, EPOLLIN, event); return 0; }
epollwait
//ntyreactor_run(reactor); int ntyreactor_run(struct ntyreactor *reactor) { if (reactor == NULL) return -1; if (reactor->epfd < 0) return -1; if (reactor->evblk == NULL) return -1; struct epoll_event events[MAX_EPOLL_EVENTS+1]; int checkpos = 0, i; while (1) { /* long now = time(NULL); for (i = 0;i < 100;i ++, checkpos ++) { if (checkpos == MAX_EPOLL_EVENTS) { checkpos = 0; } if (reactor->events[checkpos].status != 1) { continue; } long duration = now - reactor->events[checkpos].last_active; if (duration >= 60) { close(reactor->events[checkpos].fd); printf("[fd=%d] timeout\n", reactor->events[checkpos].fd); nty_event_del(reactor->epfd, &reactor->events[checkpos]); } } */ int nready = epoll_wait(reactor->epfd, events, MAX_EPOLL_EVENTS, 1000); if (nready < 0) { printf("epoll_wait error, exit\n"); continue; } for (i = 0;i < nready;i ++) { struct ntyevent *ev = (struct ntyevent*)events[i].data.ptr; //看fd連接是否發生變化 if ((events[i].events & EPOLLIN) && (ev->events & EPOLLIN)) { ev->callback(ev->fd, events[i].events, ev->arg); } if ((events[i].events & EPOLLOUT) && (ev->events & EPOLLOUT)) { ev->callback(ev->fd, events[i].events, ev->arg); } } } }
main函數;此服務器代碼開設瞭100個監聽的端口,目的是因為客戶端測試程序也是運行在虛擬機的Ubuntu上,通過開三臺來充當客戶端來進行測試。有因為一臺Ubuntu最多有6w個端口,3臺有18w端口。如果服務器隻開設一個監聽端口,則最多有18w端口。因此要達到100w並發則應多開設端口
// 3, 6w, 1, 100 == // <remoteip, remoteport, localip, localport> int main(int argc, char *argv[]) { unsigned short port = SERVER_PORT; // listen 8888 if (argc == 2) { port = atoi(argv[1]);//把參數 str 所指向的字符串轉換為一個整數(類型為 int 型) } struct ntyreactor *reactor = (struct ntyreactor*)malloc(sizeof(struct ntyreactor)); /*初始化三個結構體,建立鏈表*/ ntyreactor_init(reactor); int i = 0; int sockfds[PORT_COUNT] = {0}; for (i = 0;i < PORT_COUNT;i ++) { //端口號的監聽 sockfds[i] = init_sock(port+i); //上樹 ntyreactor_addlistener(reactor, sockfds[i], accept_cb); } // epoll_wait ntyreactor_run(reactor); // ntyreactor_destory(reactor); for (i = 0;i < PORT_COUNT;i ++) { close(sockfds[i]); } free(reactor); return 0; }
完整服務器代碼展示
/*鏈表存儲數組,把epoll變成對事件的管理,用鏈表數組的目的就是為瞭回調函數*/ /*recv寫法:代碼邏輯是收到數據後,立即原樣返回所以才那樣寫*/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/socket.h> #include <sys/epoll.h> #include <arpa/inet.h> #include <fcntl.h> #include <unistd.h> #include <errno.h> #include <time.h> #define BUFFER_LENGTH 4096 #define MAX_EPOLL_EVENTS 1024 #define SERVER_PORT 8888 #define PORT_COUNT 100 typedef int NCALLBACK(int ,int, void*); //struct ntyevent *evs = (struct ntyevent*)malloc((MAX_EPOLL_EVENTS) * sizeof(struct ntyevent)); struct ntyevent { int fd;//要監聽的文件描述符 int events;//對應的監聽事件, EPOLLIN和EPOLLOUT(不同的事件,走不同的回調函數) void *arg;//指向自己結構體指針 int (*callback)(int fd, int events, void *arg); int status;//是否在監聽:1->在紅黑樹上(監聽),0->不在(不監聽) char buffer[BUFFER_LENGTH]; int length; long last_active; }; struct eventblock { struct eventblock *next; struct ntyevent *events;//數組 }; struct ntyreactor { //句柄 int epfd; //結點個數 int blkcnt; struct eventblock *evblk; //fd --> 100w }; int recv_cb(int fd, int events, void *arg); int send_cb(int fd, int events, void *arg); struct ntyevent *ntyreactor_idx(struct ntyreactor *reactor, int sockfd); //nty_event_set(event, sockfd, acceptor, reactor); //初始化sockfd void nty_event_set(struct ntyevent *ev, int fd, NCALLBACK callback, void *arg) { ev->fd = fd; ev->callback = callback; ev->events = 0; ev->arg = arg; ev->last_active = time(NULL); return ; } //nty_event_add(reactor->epfd, EPOLLIN, event); //對監聽的epoll紅黑樹上的結點的修改 int nty_event_add(int epfd, int events, struct ntyevent *ev) { struct epoll_event ep_ev = {0, {0}}; ep_ev.data.ptr = ev; ep_ev.events = ev->events = events; int op; if (ev->status == 1) { op = EPOLL_CTL_MOD; } else { op = EPOLL_CTL_ADD; ev->status = 1; } if (epoll_ctl(epfd, op, ev->fd, &ep_ev) < 0) { printf("event add failed [fd=%d], events[%d]\n", ev->fd, events); return -1; } return 0; } int nty_event_del(int epfd, struct ntyevent *ev) { struct epoll_event ep_ev = {0, {0}}; if (ev->status != 1) { return -1; } ep_ev.data.ptr = ev; ev->status = 0; epoll_ctl(epfd, EPOLL_CTL_DEL, ev->fd, &ep_ev); return 0; } int recv_cb(int fd, int events, void *arg) { struct ntyreactor *reactor = (struct ntyreactor*)arg; struct ntyevent *ev = ntyreactor_idx(reactor, fd); int len = recv(fd, ev->buffer, BUFFER_LENGTH , 0); // nty_event_del(reactor->epfd, ev); if (len > 0) { ev->length = len; ev->buffer[len] = '\0'; printf("C[%d]:%s\n", fd, ev->buffer); nty_event_set(ev, fd, send_cb, reactor); nty_event_add(reactor->epfd, EPOLLOUT, ev); } else if (len == 0) { close(ev->fd); //printf("[fd=%d] pos[%ld], closed\n", fd, ev-reactor->events); } else { close(ev->fd); printf("recv[fd=%d] error[%d]:%s\n", fd, errno, strerror(errno)); } return len; } int send_cb(int fd, int events, void *arg) { struct ntyreactor *reactor = (struct ntyreactor*)arg; struct ntyevent *ev = ntyreactor_idx(reactor, fd); int len = send(fd, ev->buffer, ev->length, 0); if (len > 0) { printf("send[fd=%d], [%d]%s\n", fd, len, ev->buffer); nty_event_del(reactor->epfd, ev); nty_event_set(ev, fd, recv_cb, reactor); nty_event_add(reactor->epfd, EPOLLIN, ev); } else { close(ev->fd); nty_event_del(reactor->epfd, ev); printf("send[fd=%d] error %s\n", fd, strerror(errno)); } return len; } int accept_cb(int fd, int events, void *arg) { struct ntyreactor *reactor = (struct ntyreactor*)arg; if (reactor == NULL) return -1; struct sockaddr_in client_addr; socklen_t len = sizeof(client_addr); int clientfd; if ((clientfd = accept(fd, (struct sockaddr*)&client_addr, &len)) == -1) { if (errno != EAGAIN && errno != EINTR) { } printf("accept: %s\n", strerror(errno)); return -1; } int flag = 0; if ((flag = fcntl(clientfd, F_SETFL, O_NONBLOCK)) < 0) { printf("%s: fcntl nonblocking failed, %d\n", __func__, MAX_EPOLL_EVENTS); return -1; } /*存儲*/ struct ntyevent *event = ntyreactor_idx(reactor, clientfd); nty_event_set(event, clientfd, recv_cb, reactor); nty_event_add(reactor->epfd, EPOLLIN, event); printf("new connect [%s:%d], pos[%d]\n", inet_ntoa(client_addr.sin_addr), ntohs(client_addr.sin_port), clientfd); return 0; } int init_sock(short port) { int fd = socket(AF_INET, SOCK_STREAM, 0); fcntl(fd, F_SETFL, O_NONBLOCK); struct sockaddr_in server_addr; memset(&server_addr, 0, sizeof(server_addr)); server_addr.sin_family = AF_INET; server_addr.sin_addr.s_addr = htonl(INADDR_ANY); server_addr.sin_port = htons(port); bind(fd, (struct sockaddr*)&server_addr, sizeof(server_addr)); if (listen(fd, 20) < 0) { printf("listen failed : %s\n", strerror(errno)); } return fd; } //新增塊數(eventblock結點個數) //ntyreactor_alloc(reactor); int ntyreactor_alloc(struct ntyreactor *reactor) { if (reactor == NULL) return -1; if (reactor->evblk == NULL) return -1; struct eventblock *blk = reactor->evblk; while (blk->next != NULL) { blk = blk->next; } struct ntyevent *evs = (struct ntyevent*)malloc((MAX_EPOLL_EVENTS) * sizeof(struct ntyevent)); if (evs == NULL) { printf("ntyreactor_alloc ntyevents failed\n"); return -2; } memset(evs, 0, (MAX_EPOLL_EVENTS) * sizeof(struct ntyevent)); struct eventblock *block = (struct eventblock *)malloc(sizeof(struct eventblock)); if (block == NULL) { printf("ntyreactor_alloc eventblock failed\n"); return -2; } memset(block, 0, sizeof(struct eventblock)); block->events = evs; block->next = NULL; blk->next = block; reactor->blkcnt ++; // return 0; } //struct ntyevent *event = ntyreactor_idx(reactor, sockfd); struct ntyevent *ntyreactor_idx(struct ntyreactor *reactor, int sockfd) { int blkidx = sockfd / MAX_EPOLL_EVENTS; //如果塊數(eventblock結點個數)不能滿足新的sockfd的存放 while (blkidx >= reactor->blkcnt) { //新增塊數(eventblock結點個數) ntyreactor_alloc(reactor); } //找到存放sockfd的塊(eventblock對應的結點) int i = 0; struct eventblock *blk = reactor->evblk; while(i ++ < blkidx && blk != NULL) { blk = blk->next; } //返回對應塊(eventblock對應的結點)的存放sockfd數組的那個具體位置 return &blk->events[sockfd % MAX_EPOLL_EVENTS]; } //初始化鏈表 int ntyreactor_init(struct ntyreactor *reactor) { if (reactor == NULL) return -1; memset(reactor, 0, sizeof(struct ntyreactor)); reactor->epfd = epoll_create(1); if (reactor->epfd <= 0) { printf("create epfd in %s err %s\n", __func__, strerror(errno)); return -2; } struct ntyevent *evs = (struct ntyevent*)malloc((MAX_EPOLL_EVENTS) * sizeof(struct ntyevent)); if (evs == NULL) { printf("ntyreactor_alloc ntyevents failed\n"); return -2; } memset(evs, 0, (MAX_EPOLL_EVENTS) * sizeof(struct ntyevent)); struct eventblock *block = (struct eventblock *)malloc(sizeof(struct eventblock)); if (block == NULL) { printf("ntyreactor_alloc eventblock failed\n"); return -2; } memset(block, 0, sizeof(struct eventblock)); block->events = evs; block->next = NULL; reactor->evblk = block; reactor->blkcnt = 1; return 0; } int ntyreactor_destory(struct ntyreactor *reactor) { close(reactor->epfd); //free(reactor->events); struct eventblock *blk = reactor->evblk; struct eventblock *blk_next = NULL; while (blk != NULL) { blk_next = blk->next; free(blk->events); free(blk); blk = blk_next; } return 0; } //ntyreactor_addlistener(reactor, sockfds[i], accept_cb); //上樹,並初始化鏈表數組上對應的fd int ntyreactor_addlistener(struct ntyreactor *reactor, int sockfd, NCALLBACK *acceptor) { if (reactor == NULL) return -1; if (reactor->evblk == NULL) return -1; //reactor->evblk->events[sockfd]; //找到sock所在的具體位置 struct ntyevent *event = ntyreactor_idx(reactor, sockfd); 初始化sockfd nty_event_set(event, sockfd, acceptor, reactor); //對監聽的epoll紅黑樹上的結點的修改 nty_event_add(reactor->epfd, EPOLLIN, event); return 0; } //ntyreactor_run(reactor); int ntyreactor_run(struct ntyreactor *reactor) { if (reactor == NULL) return -1; if (reactor->epfd < 0) return -1; if (reactor->evblk == NULL) return -1; struct epoll_event events[MAX_EPOLL_EVENTS+1]; int checkpos = 0, i; while (1) { /* long now = time(NULL); for (i = 0;i < 100;i ++, checkpos ++) { if (checkpos == MAX_EPOLL_EVENTS) { checkpos = 0; } if (reactor->events[checkpos].status != 1) { continue; } long duration = now - reactor->events[checkpos].last_active; if (duration >= 60) { close(reactor->events[checkpos].fd); printf("[fd=%d] timeout\n", reactor->events[checkpos].fd); nty_event_del(reactor->epfd, &reactor->events[checkpos]); } } */ int nready = epoll_wait(reactor->epfd, events, MAX_EPOLL_EVENTS, 1000); if (nready < 0) { printf("epoll_wait error, exit\n"); continue; } for (i = 0;i < nready;i ++) { struct ntyevent *ev = (struct ntyevent*)events[i].data.ptr; //看fd連接是否發生變化 if ((events[i].events & EPOLLIN) && (ev->events & EPOLLIN)) { ev->callback(ev->fd, events[i].events, ev->arg); } if ((events[i].events & EPOLLOUT) && (ev->events & EPOLLOUT)) { ev->callback(ev->fd, events[i].events, ev->arg); } } } } // 3, 6w, 1, 100 == // <remoteip, remoteport, localip, localport> int main(int argc, char *argv[]) { unsigned short port = SERVER_PORT; // listen 8888 if (argc == 2) { port = atoi(argv[1]);//把參數 str 所指向的字符串轉換為一個整數(類型為 int 型) } struct ntyreactor *reactor = (struct ntyreactor*)malloc(sizeof(struct ntyreactor)); /*初始化三個結構體,建立鏈表*/ ntyreactor_init(reactor); int i = 0; int sockfds[PORT_COUNT] = {0}; for (i = 0;i < PORT_COUNT;i ++) { //端口號的監聽 sockfds[i] = init_sock(port+i); //上樹 ntyreactor_addlistener(reactor, sockfds[i], accept_cb); } // epoll_wait ntyreactor_run(reactor); // ntyreactor_destory(reactor); for (i = 0;i < PORT_COUNT;i ++) { close(sockfds[i]); } free(reactor); return 0; }
reactor的寫法感覺和epoll的普通寫法,感覺差別就是reactor多瞭個回調函數,具體沒啥優點?
epoll是針對io的管理。 reactor對針對事件的管理
不同的事件,針對不同的回調函數
性能上沒啥差異,但提高瞭代碼的復用性。具體需要自己慢慢體會體會,嗚嗚嗚嗚還有體會到,編程思想不過關。
二、環境設置
限制是fd的限制,系統默認fd最多有1024個,按照一個連接一個fd的做法,那就需要百萬個fd。這裡有兩種修改方法,一是使用ulimit -n命令,這個命令重啟就失效;二是修改/etc/security/limits.conf文件,這是永久有效的,重啟或sysctl -p生效。
* hard nofile 1048576 * soft nofile 1048576
hard是硬限制,不能超過該值,soft是軟限制,可以超過,超過後就開始回收。
這個文件裡還有一些其他的參數可以瞭解一下,fs.file_max是fd可取到的最大值,註意與fd最大個數區分。
突破這兩個限制後,還會遇到一個問題,客戶端會報錯:connection timedout。連接超時,即是客戶端未收到服務器對客戶端connect()的回應包。這裡有兩種可能,客戶端為收到服務器的包或是服務器未收到客戶端的connect包。事實上,是因為系統有個防火墻iotables,這個防火墻是基於網卡和協議棧之間的過濾機制netfilter實現的。netfilter當連接數到達一定程度時,會不允許再向外發送connect包。修改也是通過/etc/security/limits.conf文件
net.nf_conntrack_max=1048576
突破這些限制,就可以實現百萬並發瞭。
這裡再介紹/etc/security/limits.conf中幾個參數
net.ipv4.tcp_mem=262144 524288 786432是所有TCP協議棧所占空間的大小,單位是頁(4KB)。介紹一下後面寫的三個值,當所占空間大小超過第二個值時,系統會進行優化,此時如果占用空間降到第一個值以下,不再優化,第三個值是上限,不允許分配超過比大小的空間。
net.ipv4.tcp_wmem=2048 2048 4096是每個socket對應的寫緩沖區大小,三個值分別是最小值、默認值、最大值,單位是B。
net.ipv4.tcp_rmem=2048 2048 4096是每個socket對應的讀緩沖區大小,三個值分別是最小值、默認值、最大值,單位是B。
做百萬並發時,如果內存不大,可以相應調小。在實際應用中,如果傳輸大文件,調大;如果傳輸的都是字符,調小,就可以接收更多fd。
到此這篇關於C++基於reactor的服務器百萬並發實現的文章就介紹到這瞭,更多相關reactor服務器百萬並發內容請搜索WalkonNet以前的文章或繼續瀏覽下面的相關文章希望大傢以後多多支持WalkonNet!
推薦閱讀:
- epoll封裝reactor原理剖析示例詳解
- C++中的Reactor原理與實現
- 解析Linux高性能網絡IO和Reactor模型
- epoll多路復用的一個實例程序(C實現)
- 如何用C寫一個web服務器之I/O多路復用