57 #define NETIO_BUF_SIZE (4 * 1024) 58 #define NETIO_USE_STATIC_BUF 0 61 #define NETIO_STATE_WAIT_FOR_CMD 0 62 #define NETIO_STATE_RECV_DATA 1 63 #define NETIO_STATE_SEND_DATA 2 64 #define NETIO_STATE_SEND_DATA_LAST 3 65 #define NETIO_STATE_DONE 4 79 #define NETIO_CMD_QUIT 0 80 #define NETIO_CMD_C2S 1 81 #define NETIO_CMD_S2C 2 82 #define NETIO_CMD_RES 3 84 static err_t netio_recv(
void *arg,
struct tcp_pcb *pcb,
struct pbuf *p,
err_t err);
87 netio_close(
void *arg,
struct tcp_pcb *pcb)
91 struct netio_state *ns = arg;
92 ns->state = NETIO_STATE_DONE;
98 tcp_recv(pcb, netio_recv);
101 #if NETIO_USE_STATIC_BUF != 1 102 if(ns->buf_ptr !=
NULL){
107 tcp_poll(pcb,
NULL, 0);
116 netio_recv(
void *arg,
struct tcp_pcb *pcb,
struct pbuf *p,
err_t err)
118 struct netio_state *ns = arg;
133 while (data_cntr--) {
134 if (ns->state == NETIO_STATE_DONE){
135 netio_close(ns, pcb);
137 }
else if (ns->state == NETIO_STATE_WAIT_FOR_CMD) {
141 ns->cmd |= *data_ptr++;
143 }
else if (ns->cntr < 8) {
146 ns->data_len |= *data_ptr++;
154 if (ns->cmd == NETIO_CMD_C2S) {
155 ns->state = NETIO_STATE_RECV_DATA;
156 }
else if (ns->cmd == NETIO_CMD_S2C) {
157 ns->state = NETIO_STATE_SEND_DATA;
162 len = tcp_sndbuf(pcb);
163 len =
LWIP_MIN(len, ns->data_len - ns->cntr);
164 len =
LWIP_MIN(len, NETIO_BUF_SIZE - ns->buf_pos);
167 err = tcp_write(pcb, ns->buf_ptr + ns->buf_pos, len, TCP_WRITE_FLAG_COPY);
171 }
while ((err ==
ERR_MEM) && (len > 1));
181 netio_close(ns, pcb);
189 }
else if (ns->state == NETIO_STATE_RECV_DATA) {
196 ns->first_byte = *data_ptr;
199 ns->buf_ptr[ns->buf_pos++] = *data_ptr++;
202 if (ns->buf_pos == NETIO_BUF_SIZE) {
207 if(ns->cntr == ns->data_len){
209 if (ns->first_byte != 0) {
212 ns->state = NETIO_STATE_WAIT_FOR_CMD;
221 }
else if (ns->state == NETIO_STATE_SEND_DATA
222 || ns->state == NETIO_STATE_SEND_DATA_LAST) {
226 netio_close(ns, pcb);
244 netio_close(ns, pcb);
252 netio_sent(
void *arg,
struct tcp_pcb *pcb,
u16_t len)
254 struct netio_state *ns = arg;
257 if (ns->cntr >= ns->data_len && ns->state == NETIO_STATE_SEND_DATA) {
263 if (
sys_now() - ns->time_stamp > 600) {
265 ns->state = NETIO_STATE_SEND_DATA_LAST;
271 if(ns->state == NETIO_STATE_SEND_DATA_LAST || ns->state == NETIO_STATE_SEND_DATA){
272 len = tcp_sndbuf(pcb);
273 len =
LWIP_MIN(len, ns->data_len - ns->cntr);
274 len =
LWIP_MIN(len, NETIO_BUF_SIZE - ns->buf_pos);
276 if(ns->cntr < ns->data_len){
278 err = tcp_write(pcb, ns->buf_ptr + ns->buf_pos, len, TCP_WRITE_FLAG_COPY);
282 }
while ((err ==
ERR_MEM) && (len > 1));
285 if(ns->buf_pos >= NETIO_BUF_SIZE){
293 if(ns->cntr >= ns->data_len && ns->state == NETIO_STATE_SEND_DATA_LAST){
295 ns->state = NETIO_STATE_WAIT_FOR_CMD;
304 netio_poll(
void *arg,
struct tcp_pcb *pcb)
306 struct netio_state * ns = arg;
307 if(ns->state == NETIO_STATE_SEND_DATA){
309 }
else if(ns->state == NETIO_STATE_DONE){
310 netio_close(ns, pcb);
317 #if NETIO_USE_STATIC_BUF == 1 318 static u8_t netio_buf[NETIO_BUF_SIZE];
322 netio_accept(
void *arg,
struct tcp_pcb *pcb,
err_t err)
324 struct netio_state * ns;
328 ns = (
struct netio_state *)
mem_malloc(
sizeof(
struct netio_state));
334 ns->state = NETIO_STATE_WAIT_FOR_CMD;
339 #if NETIO_USE_STATIC_BUF == 1 340 ns->buf_ptr = netio_buf;
344 if(ns->buf_ptr ==
NULL){
353 tcp_sent(pcb, netio_sent);
354 tcp_recv(pcb, netio_recv);
355 tcp_poll(pcb, netio_poll, 4);
365 pcb = tcp_listen(pcb);
366 tcp_accept(pcb, netio_accept);
const ip_addr_t ip_addr_any ICACHE_RODATA_ATTR
#define ICACHE_FLASH_ATTR
void * mem_malloc(mem_size_t size) ICACHE_FLASH_ATTR
static u32_t sys_now(void) ICACHE_FLASH_ATTR
u8_t pbuf_free(struct pbuf *p) ICACHE_FLASH_ATTR
#define LWIP_UNUSED_ARG(x)
void mem_free(void *mem) ICACHE_FLASH_ATTR