-
Version Platform Description native epoll #include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/epoll.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
void epoll_add(int epoll_fd, int fd, uint32_t event) {
struct epoll_event ev;
ev.events = event;
ev.data.fd = fd;
epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &ev);
}
void epoll_del(int epoll_fd, int fd) {
epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, NULL);
}
void set_non_blocking(int fd) {
int previous = fcntl(fd, F_GETFL, 0);
assert(previous != -1);
assert(fcntl(fd, F_SETFL, previous | O_NONBLOCK) != -1);
}
int main(int argc, char const *argv[]) {
int listen_socket = socket(AF_INET, SOCK_STREAM, 0);
int enable = 1;
setsockopt(listen_socket, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(int));
struct sockaddr_in listen_addr;
listen_addr.sin_family = AF_INET;
listen_addr.sin_port = htons(8888);
memset(&listen_addr.sin_addr, 0, sizeof(listen_addr.sin_addr));
bind(listen_socket, (const struct sockaddr *)&listen_addr, sizeof(listen_addr));
set_non_blocking(listen_socket);
listen(listen_socket, 512);
int epoll_fd = epoll_create1(0);
epoll_add(epoll_fd, listen_socket, EPOLLIN);
for (;;) {
struct epoll_event evs[512];
int event_num = epoll_wait(epoll_fd, evs, 512, -1);
for (int i = 0; i < event_num; ++i) {
if (evs[i].data.fd == listen_socket) {
struct sockaddr_in connect_addr;
socklen_t addr_len = sizeof(connect_addr);
int connect_socket = accept(listen_socket, (struct sockaddr *)&connect_addr, &addr_len);
set_non_blocking(connect_socket);
epoll_add(epoll_fd, connect_socket, EPOLLIN);
} else if (evs[i].events & EPOLLIN) {
char buf[512];
if (read(evs[i].data.fd, buf, sizeof(buf)) == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
perror("read");
abort();
}
if (write(evs[i].data.fd, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 1\r\n\r\na", 65) <= 0) {
perror("write");
abort();
}
close(evs[i].data.fd);
} else {
printf("unexpected\n");
abort();
}
}
}
return 0;
} tokio use tokio::net::TcpListener;
use tokio::prelude::*;
#[tokio::main]
async fn main() {
let listener = TcpListener::bind("0.0.0.0:8887").await.unwrap();
loop {
let (mut socket, _) = listener.accept().await.unwrap();
tokio::spawn(async move {
let mut buf = [0; 512];
match socket.read(&mut buf).await {
Ok(_) => (),
Err(e) => {
eprintln!("failed to read from socket; err = {:?}", e);
return;
}
};
let http_respone =
"HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 1\r\n\r\na".as_bytes();
if let Err(e) = socket.write_all(http_respone).await {
eprintln!("failed to write to socket; err = {:?}", e);
}
return;
});
}
} compile
test command
|
Beta Was this translation helpful? Give feedback.
Replies: 1 comment
-
You are measuring different things here: your C example is single-threaded, your rust example is multi-threaded, and your test uses just 1 connection, so it cannot take advantage of the threads. Also the work on each connection is so small that I don't think it will change anything with using the multi-threaded runtime. So you are basically measuring the synchronization overhead. Try this one: use tokio::net::TcpListener;
use tokio::prelude::*;
use tokio::runtime;
async fn accept_loop() {
let listener = TcpListener::bind("0.0.0.0:8887").await.unwrap();
loop {
let (mut socket, _) = listener.accept().await.unwrap();
tokio::spawn(async move {
let mut buf = [0; 512];
match socket.read(&mut buf).await {
Ok(_) => (),
Err(e) => {
eprintln!("failed to read from socket; err = {:?}", e);
return;
}
};
let http_respone =
"HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 1\r\n\r\na" ‣&str
.as_bytes();
if let Err(e) = socket.write_all(http_respone).await {
eprintln!("failed to write to socket; err = {:?}", e);
}
return;
});
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let rt = runtime::Builder::new_current_thread().enable_io().build()?;
rt.block_on(accept_loop());
Ok(())
} There results are quite comparable:
|
Beta Was this translation helpful? Give feedback.
You are measuring different things here: your C example is single-threaded, your rust example is multi-threaded, and your test uses just 1 connection, so it cannot take advantage of the threads. Also the work on each connection is so small that I don't think it will change anything with using the multi-threaded runtime. So you are basically measuring the synchronization overhead. Try this one: