HopeHook's Blog 2018-07-13T04:34:06+00:00 achst@qq.com TCP UDP 区别 2018-04-18T00:00:00+00:00 stach.tan http://hopehook.com/2018/04/18/tcp_and_udp
TCP 和 UDP 的区别

TCP 服务端建立过程

UDP 服务端建立过程

三次握手

四次挥手

]]>
TCP/IP 协议栈图片展 2018-04-17T00:00:00+00:00 stach.tan http://hopehook.com/2018/04/17/internet_protocol_suite
协议栈工作流

报文层层封装

协议依赖关系图

UDP 报文格式

TCP 报文格式

ICMP 报文格式

]]>
epoll demo 2018-03-27T00:00:00+00:00 stach.tan http://hopehook.com/2018/03/27/epoll_demo 1 客户端

#include <stdio.h> 
#include <string.h> 
#include <sys/socket.h>
#include <netinet/in.h> 

#define MAXDATASIZE 1024
#define SERVERIP "127.0.0.1"
#define SERVERPORT 8000

int main( int argc, char * argv[] ) 
{
	char buf[MAXDATASIZE];
	int sockfd, numbytes;
	struct sockaddr_in server_addr;
	if ( ( sockfd = socket( AF_INET , SOCK_STREAM , 0) ) == -1) 
	{
		perror ( "socket error" );
		return 1;
	}
	memset ( &server_addr, 0, sizeof ( struct sockaddr ) );
	server_addr.sin_family = AF_INET;
	server_addr.sin_port = htons(SERVERPORT);
	server_addr.sin_addr.s_addr = inet_addr(SERVERIP);
	if ( connect ( sockfd, ( struct sockaddr * ) & server_addr, sizeof ( struct sockaddr ) ) == -1) 
	{
		perror ( "connect error" );
		return 1;
	}
	printf ( "send: Hello, world!\n" );
	if ( send ( sockfd, "Hello, world!" , 14, 0) == -1) 
	{
		perror ( "send error" );
		return 1;
	}
	if ( ( numbytes = recv ( sockfd, buf, MAXDATASIZE, 0) ) == -1) 
	{
		perror ( "recv error" );
		return 1;
	}
	if (numbytes) 
	{
		buf[numbytes] = '\0';
		printf ( "received: %s\n" , buf);
	}
	close(sockfd);
	return 0;
}

2 服务端

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/epoll.h>
#include <errno.h>
#include <netinet/in.h>
#include <arpa/inet.h>


#define MAXDATASIZE 1024
#define MAXCONN_NUM 10
#define MAXEVENTS 64

struct sockaddr_in server_addr;
struct sockaddr_in client_addr;

static int make_socket_non_blocking (int sockfd)
{
    int flags, s;

    flags = fcntl (sockfd, F_GETFL, 0);
    if (flags == -1)
    {
        perror ("fcntl");
        return -1;
    }

    flags |= O_NONBLOCK;
    s = fcntl (sockfd, F_SETFL, flags);
    if (s == -1)
    {
        perror ("fcntl");
        return -1;
    }
    return 0;
}

static int create_and_bind(int port)
{
	int sockfd;
	if ( ( sockfd = socket ( AF_INET , SOCK_STREAM , 0) ) == -1) 
	{
        perror ( "socket error" );
        return -1;
    }

	memset(&client_addr, 0, sizeof(struct sockaddr));
	server_addr.sin_family = AF_INET;
	server_addr.sin_port = htons(port);
	server_addr.sin_addr.s_addr = INADDR_ANY;
	if ( bind( sockfd, ( struct sockaddr * ) &server_addr, sizeof(struct sockaddr) ) == -1) 
	{
        perror ( "bind error" );
        close (sockfd);
        return -1;
	}
	return sockfd;
}


int main (int argc, char *argv[])
{
    // 数据缓存区域
    char buf[MAXDATASIZE];

    // 检查是否指定端口
    if (argc != 2)
    {
        fprintf (stderr, "Usage: %s [port]\n", argv[0]);
        exit (EXIT_FAILURE);
    }
    char *port_argv = argv[1];
    int port = atoi(port_argv);
    
    // 创建并监听tcp socket
    int sockfd = create_and_bind (port);
    if (sockfd == -1)
        abort ();

    // 设置socket为非阻塞
    if (make_socket_non_blocking (sockfd) == -1)
        abort ();

    // 创建epoll句柄
    int epfd = epoll_create1 (0);
    if (epfd == -1)
    {
        perror ("epoll_create error");
        abort ();
    }

    // epoll_ctl
    struct epoll_event event;
    event.data.fd = sockfd;
    event.events = EPOLLIN | EPOLLET;
    if (epoll_ctl (epfd, EPOLL_CTL_ADD, sockfd, &event) == -1)
    {
        perror ("epoll_ctl error");
        abort ();
    }

    /* Buffer where events are returned */
    struct epoll_event *events;
    events = calloc (MAXEVENTS, sizeof event);

    // listen
	if ( listen(sockfd, MAXCONN_NUM ) == -1)
	{
        perror ( "listen error" );
        abort ();
	}

    /* The event loop */
    while(1)
    {
        int n, i, new_fd, numbytes;
        n = epoll_wait (epfd, events, MAXEVENTS, -1);
        for (i = 0; i < n; i++)
        {
            /* We have a notification on the listening socket, which
                 means one or more incoming connections. */
            if(events[i].data.fd == sockfd)
            {
                // accept
                int sin_size = sizeof( struct sockaddr_in );
                if ( ( new_fd = accept( sockfd, ( struct sockaddr * ) &client_addr, &sin_size) ) == -1) 
                {
                    perror ( "accept error" );
                    continue;
                }
                printf ("server: got connection from %s\n" , inet_ntoa( client_addr.sin_addr) ) ;

                // epoll_ctl
                event.data.fd = new_fd;
                event.events = EPOLLIN | EPOLLET;
                if (epoll_ctl (epfd, EPOLL_CTL_ADD, new_fd, &event) == -1)
                {
                    perror ("epoll_ctl error");
                    abort ();
                }
            } 
            else if(events[i].events & EPOLLIN)
            {
                if((new_fd = events[i].data.fd) < 0)
                    continue;
         
                if((numbytes = read(new_fd, buf, MAXDATASIZE)) < 0) {
                    if(errno == ECONNRESET) {
                        close(new_fd);
                        events[i].data.fd = -1;
                        epoll_ctl(epfd, EPOLL_CTL_DEL, new_fd, &event);
                    } 
                    else
                    {
                        printf("readline error");
                    }
                } 
                else if(numbytes == 0)
                {
                    close(new_fd);
                    events[i].data.fd = -1;
                    epoll_ctl(epfd, EPOLL_CTL_DEL, new_fd, &event);
                }
                // numbytes > 0
                else
                {
                    printf("received data: %s\n", buf);
                }
                event.data.fd = new_fd;
                event.events = EPOLLOUT | EPOLLET;
                epoll_ctl(epfd, EPOLL_CTL_MOD, new_fd, &event);
            }
            else if(events[i].events & EPOLLOUT)
            {
				new_fd = events[i].data.fd;
				write(new_fd, buf, numbytes);

                printf("written data: %s\n", buf);
                printf("written numbytes: %d\n", numbytes);

				event.data.fd = new_fd;
				event.events = EPOLLIN | EPOLLET;
				epoll_ctl(epfd, EPOLL_CTL_MOD, new_fd, &event);
			}
            else if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP))
            {
                /* An error has occured on this fd, or the socket is not
                ready for reading (why were we notified then?) */
                fprintf (stderr, "epoll error\n");
                new_fd = events[i].data.fd;
                close(new_fd);
                events[i].data.fd = -1;
                epoll_ctl(epfd, EPOLL_CTL_DEL, new_fd, &event);
                continue;
            }
        }
    }

}
]]>
nginx access_log request_body 中文字符解析方法 2017-12-18T00:00:00+00:00 stach.tan http://hopehook.com/2017/12/18/nginx_request_body_parse 问题

nginx 在获取 post 数据时候,request_body 如果是中文,日志内容是一堆乱码。

"{\\x22id\\x22:319,\\x22title\\x22:\\x22\\xE4\\xBD\\xB3\\xE6\\xB2\\x9B\\xE9\\x98\\xB3\\xE5\\x85\\x89\\xE9\\x87\\x91\\xE5\\xA5\\x87\\xE5\\xBC\\x82\\xE6\\x9E\\x9C\\xE7\\x8E\\x8B\\xEF\\xBC\\x8822\\xE6\\x9E\\x9A\\xEF\\xBC\\x89\\x22,\\x22intro\\x22:\\x22\\xE8\\xB6\\x85\\xE9\\xAB\\x98\\xE8\\x90\\xA5\\xE5\\x85\\xBB\\xE8\\x83\\xBD\\xE9\\x87\\x8F\\xE6\\x9E\\x9C\\xEF\\xBC\\x8C\\xE4\\xB8\\x80\\xE5\\x8F\\xA3\\xE4\\xB8\\x8B\\xE5\\x8E\\xBB\\xE6\\xBB\\xA1\\xE6\\x98\\xAF\\xE7\\xBB\\xB4C\\x22,\\x22supplier_id\\x22:23,\\x22skus\\x22:[{\\x22create_time\\x22:\\x222017-08-09 22:09:32\\x22,\\x22id\\x22:506,\\x22item_id\\x22:319,\\x22item_type\\x22:\\x22common\\x22,\\x22price\\x22:21800,\\x22project_type\\x22:\\x22find\\x22,\\x22sku_title\\x22:\\x22\\x22,\\x22update_time\\x22:\\x222017-08-09 22:09:32\\x22}],\\x22images\\x22:[\\x22GoodsCommodity/5b3b8558-7d0c-11e7-95d6-00163e0a37a7\\x22],\\x22project_type\\x22:\\x22find\\x22}"

解决思路

思路一: 在 nginx 层面解决,中文不进行转义,避免解析。

思路二: 在程序层面解决,想办法解析出来。

具体方法

思路一可以参考 http://www.jianshu.com/p/8f8c2b5ca2d1 ,可以知道 nginx 到底做了些什么, 这个不是本文重点,直接跳过,我们看看思路二。

从思路一得到启发,既然 nginx 遇到中文字符,会处理成 \x22 这样的16进制内容。 那么我们只要遇到 \x22 这种形式的内容,翻译回来即可。

  • nginx 转义处理的代码片段
static uintptr_t ngx_http_log_escape(u_char *dst, u_char *src, size_t size)
{
    ngx_uint_t      n;
    /* 这是十六进制字符表 */
    static u_char   hex[] = "0123456789ABCDEF";

    /* 这是ASCII码表,每一位表示一个符号,其中值为1表示此符号需要转换,值为0表示不需要转换 */
    static uint32_t   escape[] = {
        0xffffffff, /* 1111 1111 1111 1111  1111 1111 1111 1111 */

                    /* ?>=< ;:98 7654 3210  /.-, +*)( '&%$ #"!  */
        0x00000004, /* 0000 0000 0000 0000  0000 0000 0000 0100 */

                    /* _^]\ [ZYX WVUT SRQP  ONML KJIH GFED CBA@ */
        0x10000000, /* 0001 0000 0000 0000  0000 0000 0000 0000 */

                    /*  ~}| {zyx wvut srqp  onml kjih gfed cba` */
        0x80000000, /* 1000 0000 0000 0000  0000 0000 0000 0000 */

        0xffffffff, /* 1111 1111 1111 1111  1111 1111 1111 1111 */
        0xffffffff, /* 1111 1111 1111 1111  1111 1111 1111 1111 */
        0xffffffff, /* 1111 1111 1111 1111  1111 1111 1111 1111 */
        0xffffffff, /* 1111 1111 1111 1111  1111 1111 1111 1111 */
    };
    
    while (size) {
         /* escape[*src >> 5],escape每一行保存了32个符号,
         所以右移5位,即除以32就找到src对应的字符保存在escape的行,
         (1 << (*src & 0x1f))此符号在escape一行中的位置,
         相&结果就是判断src符号位是否为1,需不需要转换 */
        if (escape[*src >> 5] & (1 << (*src & 0x1f))) {
            *dst++ = '\\';
            *dst++ = 'x';
            /* 一个字符占一个字节8位,每4位转成一个16进制表示 */
            /* 高4位转换成16进制 */
            *dst++ = hex[*src >> 4];
            /* 低4位转换成16进制*/
            *dst++ = hex[*src & 0xf];
            src++;

        } else {
            /* 不需要转换的字符直接赋值 */
            *dst++ = *src++;
        }
        size--;
    }

    return (uintptr_t) dst;
}

函数参数: dst 是存在转义后的字符串; src 是原字符串; size 是 sizeof(src); 返回值不用管。 程序逻辑: ngx_http_log_escape 函数拿到用户传过来的字符串 src,按照一个字节一个字节处理,遇到不是 ASCII 码表 中的字符,该字符的高4位和低4位分别转成两个16进制数(0123456789ABCDEF),并用 \x 开头表示。

  • 解析处理的 ruby 代码
request_body = "{\\x22id\\x22:319,\\x22title\\x22:\\x22\\xE4\\xBD\\xB3\\xE6\\xB2\\x9B\\xE9\\x98\\xB3\\xE5\\x85\\x89\\xE9\\x87\\x91\\xE5\\xA5\\x87\\xE5\\xBC\\x82\\xE6\\x9E\\x9C\\xE7\\x8E\\x8B\\xEF\\xBC\\x8822\\xE6\\x9E\\x9A\\xEF\\xBC\\x89\\x22,\\x22intro\\x22:\\x22\\xE8\\xB6\\x85\\xE9\\xAB\\x98\\xE8\\x90\\xA5\\xE5\\x85\\xBB\\xE8\\x83\\xBD\\xE9\\x87\\x8F\\xE6\\x9E\\x9C\\xEF\\xBC\\x8C\\xE4\\xB8\\x80\\xE5\\x8F\\xA3\\xE4\\xB8\\x8B\\xE5\\x8E\\xBB\\xE6\\xBB\\xA1\\xE6\\x98\\xAF\\xE7\\xBB\\xB4C\\x22,\\x22supplier_id\\x22:23,\\x22skus\\x22:[{\\x22create_time\\x22:\\x222017-08-09 22:09:32\\x22,\\x22id\\x22:506,\\x22item_id\\x22:319,\\x22item_type\\x22:\\x22common\\x22,\\x22price\\x22:21800,\\x22project_type\\x22:\\x22find\\x22,\\x22sku_title\\x22:\\x22\\x22,\\x22update_time\\x22:\\x222017-08-09 22:09:32\\x22}],\\x22images\\x22:[\\x22GoodsCommodity/5b3b8558-7d0c-11e7-95d6-00163e0a37a7\\x22],\\x22project_type\\x22:\\x22find\\x22}"

new_request_body = ''
pt = 0
while pt < request_body.length do
    # 如果是中文, 转码
    if request_body[pt] == '\\' and request_body[pt + 1] == 'x' then
        word = (request_body[pt + 2] + request_body[pt + 3]).to_i(16).chr
        new_request_body = new_request_body + word
        pt = pt + 4
    # 如果是英文, 不处理
    else
        new_request_body = new_request_body + request_body[pt]
        pt = pt + 1
    end
end
puts '翻译结果:'
puts new_request_body

上面的 ruby 代码可以直接运行,运行结果如下:

{
"id": 319,
"title": "佳沛阳光金奇异果王(22枚)",
"intro": "超高营养能量果,一口下去满是维C",
"supplier_id": 23,
"skus": [
    {
        "create_time": "2017-08-09 22:09:32",
        "id": 506,
        "item_id": 319,
        "item_type": "common",
        "price": 21800,
        "project_type": "find",
        "sku_title": "",
        "update_time": "2017-08-09 22:09:32"
    }
],
"images": [
    "GoodsCommodity/5b3b8558-7d0c-11e7-95d6-00163e0a37a7"
],
"project_type": "find"
}
  • 题外话

前面是针对性的解析了 request_body,使得中文可以正常显示出来。假如 nginx access_log 输出的是一个 json,要完整解析,它的日志怎么做呢?

nginx access_log 格式定义如下:

 log_format  logstash   '{ "@timestamp": "$time_local", '
                        '"@fields": { '
                        '"status": "$status", '
                        '"request_method": "$request_method", '
                        '"request": "$request", '
                        '"request_body": "$request_body", '
                        '"request_time": "$request_time", '
                        '"body_bytes_sent": "$body_bytes_sent", '
                        '"remote_addr": "$remote_addr", '
                        '"http_x_forwarded_for": "$http_x_forwarded_for", '
                        '"http_host": "$http_host", '
                        '"http_referrer": "$http_referer", '
                        '"http_user_agent": "$http_user_agent" } }';   

 access_log  /data/log/nginx/access.log  logstash;

完整解析的 ruby 代码实例:

#!/usr/bin/ruby
# -*- coding: UTF-8 -*-
require 'json'

# nginx access log 日志实例
event = {}
event['message'] = "{ \"@timestamp\": \"17/Dec/2017:00:07:58 +0800\", \"@fields\": { \"status\": \"200\", \"request\": \"POST /api/m/item/add_edit?time=1513440478479 HTTP/1.1\",  \"request_body\": \"{\\x22id\\x22:319,\\x22title\\x22:\\x22\\xE4\\xBD\\xB3\\xE6\\xB2\\x9B\\xE9\\x98\\xB3\\xE5\\x85\\x89\\xE9\\x87\\x91\\xE5\\xA5\\x87\\xE5\\xBC\\x82\\xE6\\x9E\\x9C\\xE7\\x8E\\x8B\\xEF\\xBC\\x8822\\xE6\\x9E\\x9A\\xEF\\xBC\\x89\\x22,\\x22intro\\x22:\\x22\\xE8\\xB6\\x85\\xE9\\xAB\\x98\\xE8\\x90\\xA5\\xE5\\x85\\xBB\\xE8\\x83\\xBD\\xE9\\x87\\x8F\\xE6\\x9E\\x9C\\xEF\\xBC\\x8C\\xE4\\xB8\\x80\\xE5\\x8F\\xA3\\xE4\\xB8\\x8B\\xE5\\x8E\\xBB\\xE6\\xBB\\xA1\\xE6\\x98\\xAF\\xE7\\xBB\\xB4C\\x22,\\x22supplier_id\\x22:23,\\x22skus\\x22:[{\\x22create_time\\x22:\\x222017-08-09 22:09:32\\x22,\\x22id\\x22:506,\\x22item_id\\x22:319,\\x22item_type\\x22:\\x22common\\x22,\\x22price\\x22:21800,\\x22project_type\\x22:\\x22find\\x22,\\x22sku_title\\x22:\\x22\\x22,\\x22update_time\\x22:\\x222017-08-09 22:09:32\\x22}],\\x22images\\x22:[\\x22GoodsCommodity/5b3b8558-7d0c-11e7-95d6-00163e0a37a7\\x22],\\x22project_type\\x22:\\x22find\\x22}\", \"request_time\": \"0.041\", \"body_bytes_sent\": \"702\", \"remote_addr\": \"100.120.141.124\", \"http_x_forwarded_for\": \"-\", \"http_host\": \"api.dev.domain.com\", \"http_referrer\": \"https://test.dev.domain.com/\", \"http_user_agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4\" } }"

message = event['message']
# 避免转义的request_body被解析parse
message = message.gsub('\\x', '\\\\\\x')
message_obj = JSON.parse(message)
request_body = message_obj['@fields']['request_body']
if request_body != '-' then
    # 如果 request_body 有 json 内容, 进行转码处理, 然后解析 parse
    new_request_body = ''
    pt = 0
    while pt < request_body.length do
        # 如果是中文, 转码
        if request_body[pt] == '\\' and request_body[pt + 1] == 'x' then
            word = (request_body[pt + 2] + request_body[pt + 3]).to_i(16).chr
            new_request_body = new_request_body + word
            pt = pt + 4
        # 如果是英文, 不处理
        else
            new_request_body = new_request_body + request_body[pt]
            pt = pt + 1
        end
    end
    new_request_body_obj = JSON.parse(new_request_body)
    message_obj['@fields']['request_body'] = new_request_body_obj
end
    
event['message_json'] = JSON.generate(message_obj)

puts '翻译结果:'
puts event['message_json']

这个代码可以应用于 logstash indexer 的配置文件 filter 部分,实现 elk 对 nginx access_log 的解析。

]]>
golang transaction 事务使用的正确姿势 2017-08-21T00:00:00+00:00 stach.tan http://hopehook.com/2017/08/21/golang_transaction 第一种写法

这种写法非常朴实,程序流程也非常明确,但是事务处理与程序流程嵌入太深,容易遗漏,造成严重的问题

func DoSomething() (err error) {
    tx, err := db.Begin()
    if err != nil {
        return
    }


    defer func() {
        if p := recover(); p != nil {
            tx.Rollback()
            panic(p)  // re-throw panic after Rollback
        }
    }()


    if _, err = tx.Exec(...); err != nil {
        tx.Rollback()
        return
    }
    if _, err = tx.Exec(...); err != nil {
        tx.Rollback()
        return
    }
    // ...


    err = tx.Commit()
    return
}

第二种写法

下面这种写法把事务处理从程序流程抽离了出来,不容易遗漏,但是作用域是整个函数,程序流程不是很清晰

func DoSomething() (err error) {
    tx, err := db.Begin()
    if err != nil {
        return
    }


    defer func() {
        if p := recover(); p != nil {
            tx.Rollback()
            panic(p) // re-throw panic after Rollback
        } else if err != nil {
            tx.Rollback()
        } else {
            err = tx.Commit()
        }
    }()


    if _, err = tx.Exec(...); err != nil {
        return
    }
    if _, err = tx.Exec(...); err != nil {
        return
    }
    // ...
    return
}

第三种写法

写法三是对写法二的进一步封装,写法高级一点,缺点同上

func Transact(db *sql.DB, txFunc func(*sql.Tx) error) (err error) {
    tx, err := db.Begin()
    if err != nil {
        return
    }


    defer func() {
        if p := recover(); p != nil {
            tx.Rollback()
            panic(p) // re-throw panic after Rollback
        } else if err != nil {
            tx.Rollback()
        } else {
            err = tx.Commit()
        }
    }()


    err = txFunc(tx)
    return err
}


func DoSomething() error {
    return Transact(db, func (tx *sql.Tx) error {
        if _, err := tx.Exec(...); err != nil {
            return err
        }
        if _, err := tx.Exec(...); err != nil {
            return err
        }
    })
}

我的写法

经过总结和实验,我采用了下面这种写法,defer tx.Rollback() 使得事务回滚始终得到执行。 当 tx.Commit() 执行后,tx.Rollback() 起到关闭事务的作用, 当程序因为某个错误中止,tx.Rollback() 起到回滚事务,同事关闭事务的作用。

  • 普通场景
    func DoSomething() (err error) {
      tx, _ := db.Begin()
      defer tx.Rollback()
    
      if _, err = tx.Exec(...); err != nil {
          return
      }
      if _, err = tx.Exec(...); err != nil {
          return
      }
      // ...
    
    
      err = tx.Commit()
      return
    }
    
  • 循环场景

(1) 小事务 每次循环提交一次 在循环内部使用这种写法的时候,defer 不能使用,所以要把事务部分抽离到独立的函数当中

func DoSomething() (err error) {
    tx, _ := db.Begin()
    defer tx.Rollback()

    if _, err = tx.Exec(...); err != nil {
        return
    }
    if _, err = tx.Exec(...); err != nil {
        return
    }
    // ...


    err = tx.Commit()
    return
}


for {
    if err := DoSomething(); err != nil{
         // ...
    }
}

(2) 大事务 批量提交 大事务的场景和普通场景是一样的,没有任何区别

func DoSomething() (err error) {
    tx, _ := db.Begin()
    defer tx.Rollback()

    for{
        if _, err = tx.Exec(...); err != nil {
            return
        }
        if _, err = tx.Exec(...); err != nil {
            return
        }
        // ...
    }

    err = tx.Commit()
    return
}

参考链接:

https://stackoverflow.com/questions/16184238/database-sql-tx-detecting-commit-or-rollback

]]>
redis mysql 连接池 之 golang 实现 2017-06-14T00:00:00+00:00 stach.tan http://hopehook.com/2017/06/14/golang_db_pool 分享一下 golang 实现的 redis 和 mysql 连接池,可以在项目中直接引用连接池句柄,调用对应的方法。

举个栗子:

1 mysql 连接池的使用

(1)在项目子目录放置 mysql.go

(2)在需要调用的地方导入连接池句柄 DB

(3)调用 DB.Query()

2 redis 连接池的使用

(1)在项目子目录放置 redis.go

(2)在需要调用的地方导入连接池句柄 Cache

(3)调用 Cache.SetString (“test_key”, “test_value”)

最新代码地址:

https://github.com/hopehook/golang-db

附件:

1 mysql 连接池代码

package lib

import (
	"database/sql"
	"fmt"
	"strconv"

	"github.com/arnehormann/sqlinternals/mysqlinternals"
	_ "github.com/go-sql-driver/mysql"
)

var MYSQL map[string]string = map[string]string{
	"host":         "127.0.0.1:3306",
	"database":     "",
	"user":         "",
	"password":     "",
	"maxOpenConns": "0",
	"maxIdleConns": "0",
}

type SqlConnPool struct {
	DriverName     string
	DataSourceName string
	MaxOpenConns   int64
	MaxIdleConns   int64
	SqlDB          *sql.DB // 连接池
}

var DB *SqlConnPool

func init() {
	dataSourceName := fmt.Sprintf("%s:%s@tcp(%s)/%s", MYSQL["user"], MYSQL["password"], MYSQL["host"], MYSQL["database"])
	maxOpenConns, _ := strconv.ParseInt(MYSQL["maxOpenConns"], 10, 64)
	maxIdleConns, _ := strconv.ParseInt(MYSQL["maxIdleConns"], 10, 64)

	DB = &SqlConnPool{
		DriverName:     "mysql",
		DataSourceName: dataSourceName,
		MaxOpenConns:   maxOpenConns,
		MaxIdleConns:   maxIdleConns,
	}
	if err := DB.open(); err != nil {
		panic("init db failed")
	}
}

// 封装的连接池的方法
func (p *SqlConnPool) open() error {
	var err error
	p.SqlDB, err = sql.Open(p.DriverName, p.DataSourceName)
	p.SqlDB.SetMaxOpenConns(int(p.MaxOpenConns))
	p.SqlDB.SetMaxIdleConns(int(p.MaxIdleConns))
	return err
}

func (p *SqlConnPool) Close() error {
	return p.SqlDB.Close()
}

func (p *SqlConnPool) Query(queryStr string, args ...interface{}) ([]map[string]interface{}, error) {
	rows, err := p.SqlDB.Query(queryStr, args...)
	if err != nil {
		return []map[string]interface{}{}, err
	}
	defer rows.Close()
	// 返回属性字典
	columns, err := mysqlinternals.Columns(rows)
	// 获取字段类型
	scanArgs := make([]interface{}, len(columns))
	values := make([]sql.RawBytes, len(columns))
	for i, _ := range values {
		scanArgs[i] = &values[i]
	}
	rowsMap := make([]map[string]interface{}, 0, 10)
	for rows.Next() {
		rows.Scan(scanArgs...)
		rowMap := make(map[string]interface{})
		for i, value := range values {
			rowMap[columns[i].Name()] = bytes2RealType(value, columns[i].MysqlType())
		}
		rowsMap = append(rowsMap, rowMap)
	}
	if err = rows.Err(); err != nil {
		return []map[string]interface{}{}, err
	}
	return rowsMap, nil
}

func (p *SqlConnPool) execute(sqlStr string, args ...interface{}) (sql.Result, error) {
	return p.SqlDB.Exec(sqlStr, args...)
}

func (p *SqlConnPool) Update(updateStr string, args ...interface{}) (int64, error) {
	result, err := p.execute(updateStr, args...)
	if err != nil {
		return 0, err
	}
	affect, err := result.RowsAffected()
	return affect, err
}

func (p *SqlConnPool) Insert(insertStr string, args ...interface{}) (int64, error) {
	result, err := p.execute(insertStr, args...)
	if err != nil {
		return 0, err
	}
	lastid, err := result.LastInsertId()
	return lastid, err

}

func (p *SqlConnPool) Delete(deleteStr string, args ...interface{}) (int64, error) {
	result, err := p.execute(deleteStr, args...)
	if err != nil {
		return 0, err
	}
	affect, err := result.RowsAffected()
	return affect, err
}

type SqlConnTransaction struct {
	SqlTx *sql.Tx // 单个事务连接
}

//// 开启一个事务
func (p *SqlConnPool) Begin() (*SqlConnTransaction, error) {
	var oneSqlConnTransaction = &SqlConnTransaction{}
	var err error
	if pingErr := p.SqlDB.Ping(); pingErr == nil {
		oneSqlConnTransaction.SqlTx, err = p.SqlDB.Begin()
	}
	return oneSqlConnTransaction, err
}

// 封装的单个事务连接的方法
func (t *SqlConnTransaction) Rollback() error {
	return t.SqlTx.Rollback()
}

func (t *SqlConnTransaction) Commit() error {
	return t.SqlTx.Commit()
}

func (t *SqlConnTransaction) Query(queryStr string, args ...interface{}) ([]map[string]interface{}, error) {
	rows, err := t.SqlTx.Query(queryStr, args...)
	if err != nil {
		return []map[string]interface{}{}, err
	}
	defer rows.Close()
	// 返回属性字典
	columns, err := mysqlinternals.Columns(rows)
	// 获取字段类型
	scanArgs := make([]interface{}, len(columns))
	values := make([]sql.RawBytes, len(columns))
	for i, _ := range values {
		scanArgs[i] = &values[i]
	}
	rowsMap := make([]map[string]interface{}, 0, 10)
	for rows.Next() {
		rows.Scan(scanArgs...)
		rowMap := make(map[string]interface{})
		for i, value := range values {
			rowMap[columns[i].Name()] = bytes2RealType(value, columns[i].MysqlType())
		}
		rowsMap = append(rowsMap, rowMap)
	}
	if err = rows.Err(); err != nil {
		return []map[string]interface{}{}, err
	}
	return rowsMap, nil
}

func (t *SqlConnTransaction) execute(sqlStr string, args ...interface{}) (sql.Result, error) {
	return t.SqlTx.Exec(sqlStr, args...)
}

func (t *SqlConnTransaction) Update(updateStr string, args ...interface{}) (int64, error) {
	result, err := t.execute(updateStr, args...)
	if err != nil {
		return 0, err
	}
	affect, err := result.RowsAffected()
	return affect, err
}

func (t *SqlConnTransaction) Insert(insertStr string, args ...interface{}) (int64, error) {
	result, err := t.execute(insertStr, args...)
	if err != nil {
		return 0, err
	}
	lastid, err := result.LastInsertId()
	return lastid, err

}

func (t *SqlConnTransaction) Delete(deleteStr string, args ...interface{}) (int64, error) {
	result, err := t.execute(deleteStr, args...)
	if err != nil {
		return 0, err
	}
	affect, err := result.RowsAffected()
	return affect, err
}

// others
func bytes2RealType(src []byte, columnType string) interface{} {
	srcStr := string(src)
	var result interface{}
	switch columnType {
	case "TINYINT":
		fallthrough
	case "SMALLINT":
		fallthrough
	case "INT":
		fallthrough
	case "BIGINT":
		result, _ = strconv.ParseInt(srcStr, 10, 64)
	case "CHAR":
		fallthrough
	case "VARCHAR":
		fallthrough
	case "BLOB":
		fallthrough
	case "TIMESTAMP":
		fallthrough
	case "DATETIME":
		result = srcStr
	case "FLOAT":
		fallthrough
	case "DOUBLE":
		fallthrough
	case "DECIMAL":
		result, _ = strconv.ParseFloat(srcStr, 64)
	default:
		result = nil
	}
	return result
}

2 redis 连接池代码

package lib

import (
	"strconv"
	"time"

	"github.com/garyburd/redigo/redis"
)

var REDIS map[string]string = map[string]string{
	"host":         "127.0.0.1:6379",
	"database":     "0",
	"password":     "",
	"maxOpenConns": "0",
	"maxIdleConns": "0",
}

var Cache *RedisConnPool

type RedisConnPool struct {
	redisPool *redis.Pool
}

func init() {
	Cache = &RedisConnPool{}
	maxOpenConns, _ := strconv.ParseInt(REDIS["maxOpenConns"], 10, 64)
	maxIdleConns, _ := strconv.ParseInt(REDIS["maxIdleConns"], 10, 64)
	database, _ := strconv.ParseInt(REDIS["database"], 10, 64)

	Cache.redisPool = newPool(REDIS["host"], REDIS["password"], int(database), int(maxOpenConns), int(maxIdleConns))
	if Cache.redisPool == nil {
		panic("init redis failed!")
	}
}

func newPool(server, password string, database, maxOpenConns, maxIdleConns int) *redis.Pool {
	return &redis.Pool{
		MaxActive:   maxOpenConns, // max number of connections
		MaxIdle:     maxIdleConns,
		IdleTimeout: 10 * time.Second,
		Dial: func() (redis.Conn, error) {
			c, err := redis.Dial("tcp", server)
			if err != nil {
				return nil, err
			}
			if _, err := c.Do("AUTH", password); err != nil {
				c.Close()
				return nil, err
			}
			if _, err := c.Do("select", database); err != nil {
				c.Close()
				return nil, err
			}
			return c, err
		},
		TestOnBorrow: func(c redis.Conn, t time.Time) error {
			_, err := c.Do("PING")
			return err
		},
	}
}

// 关闭连接池
func (p *RedisConnPool) Close() error {
	err := p.redisPool.Close()
	return err
}

// 当前某一个数据库,执行命令
func (p *RedisConnPool) Do(command string, args ...interface{}) (interface{}, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return conn.Do(command, args...)
}

//// String(字符串)
func (p *RedisConnPool) SetString(key string, value interface{}) (interface{}, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return conn.Do("SET", key, value)
}

func (p *RedisConnPool) GetString(key string) (string, error) {
	// 从连接池里面获得一个连接
	conn := p.redisPool.Get()
	// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
	defer conn.Close()
	return redis.String(conn.Do("GET", key))
}

func (p *RedisConnPool) GetBytes(key string) ([]byte, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return redis.Bytes(conn.Do("GET", key))
}

func (p *RedisConnPool) GetInt(key string) (int, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return redis.Int(conn.Do("GET", key))
}

func (p *RedisConnPool) GetInt64(key string) (int64, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return redis.Int64(conn.Do("GET", key))
}

//// Key(键)
func (p *RedisConnPool) DelKey(key string) (interface{}, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return conn.Do("DEL", key)
}

func (p *RedisConnPool) ExpireKey(key string, seconds int64) (interface{}, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return conn.Do("EXPIRE", key, seconds)
}

func (p *RedisConnPool) Keys(pattern string) ([]string, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return redis.Strings(conn.Do("KEYS", pattern))
}

func (p *RedisConnPool) KeysByteSlices(pattern string) ([][]byte, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return redis.ByteSlices(conn.Do("KEYS", pattern))
}

//// Hash(哈希表)
func (p *RedisConnPool) SetHashMap(key string, fieldValue map[string]interface{}) (interface{}, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return conn.Do("HMSET", redis.Args{}.Add(key).AddFlat(fieldValue)...)
}

func (p *RedisConnPool) GetHashMapString(key string) (map[string]string, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return redis.StringMap(conn.Do("HGETALL", key))
}

func (p *RedisConnPool) GetHashMapInt(key string) (map[string]int, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return redis.IntMap(conn.Do("HGETALL", key))
}

func (p *RedisConnPool) GetHashMapInt64(key string) (map[string]int64, error) {
	conn := p.redisPool.Get()
	defer conn.Close()
	return redis.Int64Map(conn.Do("HGETALL", key))
}
]]>
python 的 mysql 连接池(基于 mysql 官方的 mysql-connector-python) 2017-06-13T00:00:00+00:00 stach.tan http://hopehook.com/2017/06/13/python_mysql_pool 背景

网上看了一圈,没有发现顺手的 python mysql 连接池,甚至很多都是错误的实现。于是自己写了一个,经过了生产环境的考验,特地分享出来。

也许你有更好的实现方式,而我却不知道,欢迎在评论区留言交流 :)

使用方法

1 安装 mysql-connector-python 库

2 实例化一个 Pool 全局单例“数据库连接池句柄”

3 通过“数据库连接池句柄”调用对应的方法

代码片段

from mysql import Pool

# 配置信息
db_conf = {
    "user": "",
    "password": "",
    "database": "",
    "host": "",
    "port": 3306,
    "time_zone": "+8:00",
    "buffered": True,
    "autocommit": True,
    "charset": "utf8mb4",
}

# 实例化一个 Pool 全局单例“数据库连接池句柄”
db = Pool(pool_reset_session=False, **db_conf)

# 通过连接池操作
rows = db.query("select * from table")

# 事务操作
transaction = db.begin()
try:
    transaction.insert("insert into talble...")
except:
    transaction.rollback()
else:
    transaction.commit()
finally:
    transaction.close()

# 事务操作语法糖
with db.begin() as transaction:
    transaction.insert("insert into talble...")

附件 mysqllib.py: 连接池实现源码

# -*- coding:utf-8 -*-
import logging
from mysql.connector.pooling import CNX_POOL_MAXSIZE
from mysql.connector.pooling import MySQLConnectionPool, PooledMySQLConnection
from mysql.connector import errors
import threading
CONNECTION_POOL_LOCK = threading.RLock()


class Pool(MySQLConnectionPool):

    def connect(self):
        try:
            return self.get_connection()
        except errors.PoolError:
            # Pool size should be lower or equal to CNX_POOL_MAXSIZE
            if self.pool_size < CNX_POOL_MAXSIZE:
                with threading.Lock():
                    new_pool_size = self.pool_size + 1
                    try:
                        self._set_pool_size(new_pool_size)
                        self._cnx_queue.maxsize = new_pool_size
                        self.add_connection()
                    except Exception as e:
                        logging.exception(e)
                    return self.connect()
            else:
                with CONNECTION_POOL_LOCK:
                    cnx = self._cnx_queue.get(block=True)
                    if not cnx.is_connected() or self._config_version != cnx._pool_config_version:
                        cnx.config(**self._cnx_config)
                        try:
                            cnx.reconnect()
                        except errors.InterfaceError:
                            # Failed to reconnect, give connection back to pool
                            self._queue_connection(cnx)
                            raise
                        cnx._pool_config_version = self._config_version
                    return PooledMySQLConnection(self, cnx)
        except Exception:
            raise

    def query(self, operation, params=None):
        cnx = cursor = None
        try:
            cnx = self.connect()
            cursor = cnx.cursor(buffered=True, dictionary=True)
            cursor.execute(operation, params)
            data_set = cursor.fetchall()
        except Exception:
            raise
        finally:
            if cursor:
                cursor.close()
            if cnx:
                cnx.close()
        return data_set

    def get(self, operation, params=None):
        cnx = cursor = None
        try:
            cnx = self.connect()
            cursor = cnx.cursor(buffered=True, dictionary=True)
            cursor.execute(operation, params)
            data_set = cursor.fetchone()
        except Exception:
            raise
        finally:
            if cursor:
                cursor.close()
            if cnx:
                cnx.close()
        return data_set

    def insert(self, operation, params=None):
        cnx = cursor = None
        try:
            cnx = self.connect()
            cursor = cnx.cursor()
            cursor.execute(operation, params)
            last_id = cursor.lastrowid
        except Exception:
            raise
        finally:
            if cursor:
                cursor.close()
            if cnx:
                cnx.close()
        return last_id

    def insert_many(self, operation, seq_params):
        cnx = cursor = None
        try:
            cnx = self.connect()
            cursor = cnx.cursor()
            cursor.executemany(operation, seq_params)
            row_count = cursor.rowcount
        except Exception:
            raise
        finally:
            if cursor:
                cursor.close()
            if cnx:
                cnx.close()
        return row_count

    def execute(self, operation, params=None):
        cnx = cursor = None
        try:
            cnx = self.connect()
            cursor = cnx.cursor()
            cursor.execute(operation, params)
            row_count = cursor.rowcount
        except Exception:
            raise
        finally:
            if cursor:
                cursor.close()
            if cnx:
                cnx.close()
        return row_count

    def update(self, operation, params=None):
        return self.execute(operation, params)

    def delete(self, operation, params=None):
        return self.execute(operation, params)

    def begin(self, consistent_snapshot=False, isolation_level=None, readonly=None):
        cnx = self.connect()
        cnx.start_transaction(consistent_snapshot, isolation_level, readonly)
        return Transaction(cnx)


class Transaction(object):

    def __init__(self, connection):
        self.cnx = None
        if isinstance(connection, PooledMySQLConnection):
            self.cnx = connection
            self.cursor = connection.cursor(buffered=True, dictionary=True)
        else:
            raise AttributeError("connection should be a PooledMySQLConnection")

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_type is None and exc_val is None and exc_tb is None:
            self.commit()
        else:
            # will raise with-body's Exception, should deal with it
            self.rollback()
        self.close()

    def query(self, operation, params=None):
        cursor = self.cursor
        cursor.execute(operation, params)
        data_set = cursor.fetchall()
        return data_set

    def get(self, operation, params=None):
        cursor = self.cursor
        cursor.execute(operation, params)
        data_set = cursor.fetchone()
        return data_set

    def insert(self, operation, params=None):
        cursor = self.cursor
        cursor.execute(operation, params)
        last_id = cursor.lastrowid
        return last_id

    def insert_many(self, operation, seq_params):
        cursor = self.cursor
        cursor.executemany(operation, seq_params)
        row_count = cursor.rowcount
        return row_count

    def execute(self, operation, params=None):
        cursor = self.cursor
        cursor.execute(operation, params)
        row_count = cursor.rowcount
        return row_count

    def update(self, operation, params=None):
        return self.execute(operation, params)

    def delete(self, operation, params=None):
        return self.execute(operation, params)

    def commit(self):
        self.cnx.commit()

    def rollback(self):
        self.cnx.rollback()

    def close(self):
        self.cursor.close()
        self.cnx.close()

]]>
torndb 常用操作和两种事务方式 2017-02-19T00:00:00+00:00 stach.tan http://hopehook.com/2017/02/19/torndb # coding:utf8 import torndb # 建立连接 # 东8区,默认字符集UTF8,没必要在加上 charset = "utf8" 。 db = torndb.Connection('127.0.0.1:3306', 'database', 'user', 'password', time_zone='+8:00') # 查询 ## query: 得到多行记录,单行为字典 sql = '''SELECT * FROM sms_task WHERE id > %s''' rows = db.query(sql, 10) ## get: 得到单行记录,一个字典 sql = '''SELECT * FROM sms_task WHERE id = %s''' info = db.get(sql, 10) # 更新 sql = '''UPDATE sms_task SET `status` = %s WHERE id = %s''' affected_row_count = db.update(sql, 0, 10) # 插入 sql = '''INSERT INTO sms_task_phone (phone, uid) VALUES (%s, %s)''' args = [0, 0] last_id = db.insert(sql, *args) # 删除 sql = '''DELETE FROM sms_task WHERE id = %s''' affected_row_count = db.execute_rowcount(sql, 8) # 事务 ## begin 的方式使用事务 def transacion_begin(): try: db._db.begin() sql = ''' SELECT `status`, is_deleted FROM sms_task WHERE id = %s FOR UPDATE ''' info = db.get(sql, 10) if not info: return False sql = ''' UPDATE sms_task SET is_deleted = %s WHERE id = %s ''' db.update(sql, 1, 10) db._db.commit() except Exception, e: db._db.rollback() print str(e) return False return True transacion_begin() ## autocommit 的方式使用事务 def transacion_autocommit(): try: db._db.autocommit(False) sql = ''' SELECT `status`, is_deleted FROM sms_task WHERE id = %s FOR UPDATE ''' info = db.get(sql, 10) if not info: return False sql = ''' UPDATE sms_task SET is_deleted = 1 WHERE id = %s ''' db.update(sql, 10) db._db.commit() except Exception, e: db._db.rollback() print str(e) return False finally: db._db.autocommit(True) return True transacion_autocommit() ]]> apscheduler 定时任务器的使用(tornado 场景) 2017-02-19T00:00:00+00:00 stach.tan http://hopehook.com/2017/02/19/apscheduler 1.七种scheduler

BlockingScheduler: 当应用程序中只有调度器时使用

BackgroundScheduler: 不使用任何以下框架:asyncio, gevent, Tornado, Twisted, Qt, 并且需要在你的应用程序后台运行调度程序

AsyncIOScheduler: 应用程序使用asyncio模块时使用

GeventScheduler: 应用程序使用gevent模块时使用程序

TornadoScheduler: Tornado应用程序使用

TwistedScheduler: Twisted应用程序使用

QtScheduler: Qt应用程序使用

2.Tornado中使用的demo

sched = TornadoScheduler() 可以作为全局变量,在项目任何引用的地方操作调度器

sched.start() 是调度器真正开始执行的入口

sched.start() 之前 sched.add_job缓存的任务会开始调度,之后 sched.add_job的任务立即调度(达到执行时间的时候执行任务)

scheduler.shutdown(wait=True) 默认情况下调度器会等待所有正在运行的作业完成后,关闭所有的调度器和作业存储。如果你不想等待,可以将wait选项设置为False。

scheduler.add_job() 第二个参数是trigger,它管理着作业的调度方式。它可以为date, interval或者cron。对于不同的trigger,对应的参数也相同。

trigger:

1) interval 间隔调度

循环执行,间隔固定的时间

# Schedule job_function to be called every two hours
sched.add_job(job_function, 'interval', hours=2)

2) date 定时调度

只会执行一次,指定时间执行后就结束

# The job will be executed on November 6th, 2009
sched.add_job(my_job, 'date', run_date=date(2009, 11, 6), args=['text'])
# The job will be executed on November 6th, 2009 at 16:30:05
sched.add_job(my_job, 'date', run_date=datetime(2009, 11, 6, 16, 30, 5), args=['text'])

3) cron 定时调度

闹钟似执行,设定计划时间表执行

# Schedules job_function to be run on the third Friday
# of June, July, August, November and December at 00:00, 01:00, 02:00 and 03:00
sched.add_job(job_function, 'cron', month='6-8,11-12', day='3rd fri', hour='0-3')
# Runs from Monday to Friday at 5:30 (am) until 2014-05-30 00:00:00
sched.add_job(job_function, 'cron', day_of_week='mon-fri', hour=5, minute=30, end_date='2014-05-30')

apscheduler_demo.py

# -*- coding:utf8 -*-

import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
from apscheduler.schedulers.tornado import TornadoScheduler
sched = TornadoScheduler()

def my_job():
    print 1

def my_job2():
    print 2
    print sched.get_jobs()

class MainHandler(tornado.web.RequestHandler):
    def get(self):
        self.write("Hello, guys")


class PauseJobHandler(tornado.web.RequestHandler):
    def get(self):
        sched.pause_job('1')
        self.write("pause")

class ResumeJobHandler(tornado.web.RequestHandler):
    def get(self):
        sched.resume_job('1')
        self.write("resume")

class AddJobHandler(tornado.web.RequestHandler):
    def get(self):
        sched.add_job(my_job, 'interval', seconds=1, id='1')
        sched.add_job(my_job2, 'interval', seconds=1, id='2')
        self.write("add job")

class RemoveJobHandler(tornado.web.RequestHandler):
    def get(self):
        sched.remove_job('2')
        self.write("remove job: %s" % 2)

class RemoveAllJobsHandler(tornado.web.RequestHandler):
    def get(self):
        sched.remove_all_jobs()
        self.write("remove all jobs")


class StartHandler(tornado.web.RequestHandler):
    def get(self):
        sched.start()
        result = sched.running
        self.write("start scheduler: %s" % result)

class ShutdownSchedHandler(tornado.web.RequestHandler):
    def get(self):
        sched.shutdown()
        self.write("shutdown scheduler")

class PauseSchedHandler(tornado.web.RequestHandler):
    def get(self):
        sched.pause()
        self.write("pause scheduler")

class ResumeSchedHandler(tornado.web.RequestHandler):
    def get(self):
        sched.resume()
        self.write("resume scheduler")


def main():
    tornado.options.parse_command_line()
    application = tornado.web.Application([
        # home
        (r"/", MainHandler),
        # scheduler
        (r"/start_sched", StartHandler),
        (r"/shutdown_sched", ShutdownSchedHandler),
        (r"/pause_sched", PauseSchedHandler),
        (r"/resume_sched", ResumeSchedHandler),
        # job
        (r"/add_job", AddJobHandler),
        (r"/pause_job", PauseJobHandler),
        (r"/resume_job", ResumeJobHandler),
        (r"/remove_job", RemoveJobHandler),
        (r"/remove_all_jobs", RemoveAllJobsHandler),
    ])
    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.instance().start()

if __name__ == "__main__":
    main()

小坑

浏览器地址栏输入高频使用的url,没有等用户输入全部链接,也没等用户enter键确认,浏览器就会自动请求一次

任务id必须是字符串类型

其他种类的 scheduler 和 tornado版 使用方法不相同,需要区别对待

]]>
websocket 协议学习 2017-01-08T00:00:00+00:00 stach.tan http://hopehook.com/2017/01/08/websocket 一 websocket 报文格式

websocket 报文格式

二 实验代码

  • 客户端 client.html

    
        
    
    
    
        
        
        
    
    

  • 服务端 websocket_server.go
package main

import (
	"crypto/sha1"
	"encoding/base64"
	"encoding/binary"
	"io"
	"log"
	"net"
	"strings"
	"time"
)

type WsSocket struct {
	Conn net.Conn
}

// 帧类型(OPCODE). RFC 6455, section 11.8.
const (
	FRAME_CONTINUE = 0  //继续帧
	FRAME_TEXT     = 1  //文本帧
	FRAME_BINARY   = 2  //二进制帧
	FRAME_CLOSE    = 8  //关闭帧
	FRAME_PING     = 9  //ping帧
	FRAME_PONG     = 10 //pong帧
)

func init() {
	//初始化日志打印格式
	log.SetFlags(log.Lshortfile | log.LstdFlags)
}

func main() {
	ln, err := net.Listen("tcp", ":8000")
	defer ln.Close()
	if err != nil {
		log.Panic(err)
	}

	for {
		conn, err := ln.Accept()
		if err != nil {
			log.Println("accept err:", err)
		}
		go handleConnection(conn)
	}

}

func handleConnection(conn net.Conn) {
	// http request open websocket
	content := make([]byte, 1024)
	conn.Read(content)
	log.Printf("http request:\n%s\n", string(content))
	headers := parseHttpHeader(string(content))
	secWebsocketKey := headers["Sec-WebSocket-Key"]

	// http response open websocket
	response := "HTTP/1.1 101 Switching Protocols\r\n"
	response += "Sec-WebSocket-Accept: " + computeAcceptKey(secWebsocketKey) + "\r\n"
	response += "Connection: Upgrade\r\n"
	response += "Upgrade: websocket\r\n\r\n"
	log.Printf("http response:\n%s\n", response)
	if lenth, err := conn.Write([]byte(response)); err != nil {
		log.Println(err)
	} else {
		log.Println("send http response len:", lenth)
	}

	// websocket established
	wssocket := &WsSocket{Conn: conn}

	//begin test case
	for {
		time.Sleep(5 * time.Second)
		// frame ping
		wssocket.SendIframe(FRAME_PING, []byte("hello"))
		// frame read //浏览器响应同样负载数据的pong帧
		log.Printf("server read data from client:\n%s\n", string(wssocket.ReadIframe()))
	}
	//end test case
}

//发送帧给客户端(不考虑分片)(只做服务端,无掩码)
func (this *WsSocket) SendIframe(OPCODE int, frameData []byte) {
	dataLen := len(frameData)
	var n int
	var err error

	//第一个字节b1
	b1 := 0x80 | byte(OPCODE)
	n, err = this.Conn.Write([]byte{b1})
	if err != nil {
		log.Printf("Conn.Write() error,length:%d;error:%s\n", n, err)
		if err == io.EOF {
			log.Println("客户端已经断开WsSocket!")
		} else if err.(*net.OpError).Err.Error() == "use of closed network connection" {
			log.Println("服务端已经断开WsSocket!")
		}
	}

	//第二个字节
	var b2 byte
	var payloadLen int
	switch {
	case dataLen <= 125:
		b2 = byte(dataLen)
		payloadLen = dataLen
	case 126 <= dataLen && dataLen <= 65535:
		b2 = byte(126)
		payloadLen = 126
	case dataLen > 65535:
		b2 = byte(127)
		payloadLen = 127
	}
	this.Conn.Write([]byte{b2})

	//如果payloadLen不够用,写入exPayLenByte,用exPayLenByte表示负载数据的长度
	switch payloadLen {
	case 126:
		exPayloadLenByte := make([]byte, 2)
		exPayloadLenByte[0] = byte(dataLen >> 8) //高8位
		exPayloadLenByte[1] = byte(dataLen)      //低8位
		this.Conn.Write(exPayloadLenByte)        //扩展2个字节表示负载数据长度, 最高位也可以用
	case 127:
		exPayloadLenByte := make([]byte, 8)
		exPayloadLenByte[0] = byte(dataLen >> 56) //第1个字节
		exPayloadLenByte[1] = byte(dataLen >> 48) //第2个字节
		exPayloadLenByte[2] = byte(dataLen >> 40) //第3个字节
		exPayloadLenByte[3] = byte(dataLen >> 32) //第4个字节
		exPayloadLenByte[4] = byte(dataLen >> 24) //第5个字节
		exPayloadLenByte[5] = byte(dataLen >> 16) //第6个字节
		exPayloadLenByte[6] = byte(dataLen >> 8)  //第7个字节
		exPayloadLenByte[7] = byte(dataLen)       //第8个字节
		this.Conn.Write(exPayloadLenByte)         //扩展8个字节表示负载数据长度, 最高位不可以用,必须为0
	}
	this.Conn.Write(frameData) //无掩码,直接在表示长度的区域后面写入数据
	log.Printf("real payloadLen=%d:该数据帧的真实负载数据长度(bytes).\n", dataLen)
	log.Println("MASK=0:没有掩码.")
	log.Printf("server send data to client:\n%s\n", string(frameData))

}

//读取客户端发送的帧(考虑分片)
func (this *WsSocket) ReadIframe() (frameData []byte) {
	var n int
	var err error

	//第一个字节
	b1 := make([]byte, 1)
	n, err = this.Conn.Read(b1)
	if err != nil {
		log.Printf("Conn.Read() error,length:%d;error:%s\n", n, err)
		if err == io.EOF {
			log.Println("客户端已经断开WsSocket!")
		} else if err.(*net.OpError).Err.Error() == "use of closed network connection" {
			log.Println("服务端已经断开WsSocket!")
		}
	}
	FIN := b1[0] >> 7
	OPCODE := b1[0] & 0x0F
	if OPCODE == 8 {
		log.Println("OPCODE=8:连接关闭帧.")
		this.SendIframe(FRAME_CLOSE, formatCloseMessage(1000, "因为收到客户端的主动关闭请求,所以响应."))
		this.Conn.Close()
		return
	}

	//第二个字节
	b2 := make([]byte, 1)
	this.Conn.Read(b2)
	payloadLen := int64(b2[0] & 0x7F) //payloadLen:表示数据报文长度(可能不够用),0x7F(16) > 01111111(2)
	MASK := b2[0] >> 7                //MASK=1:表示客户端发来的数据,且表示采用了掩码(客户端传来的数据必须采用掩码)
	log.Printf("second byte:MASK=%d, raw payloadLen=%d\n", MASK, payloadLen)

	//扩展长度
	dataLen := payloadLen
	switch {
	case payloadLen == 126:
		// 如果payloadLen=126,启用2个字节作为拓展,表示更长的报文
		// 负载数据的长度范围(bytes):126~65535(2) 0xffff
		log.Println("raw payloadLen=126,启用2个字节作为拓展(最高有效位可以是1,使用所有位),表示更长的报文")
		exPayloadLenByte := make([]byte, 2)
		n, err := this.Conn.Read(exPayloadLenByte)
		if err != nil {
			log.Printf("Conn.Read() error,length:%d;error:%s\n", n, err)
		}
		dataLen = int64(exPayloadLenByte[0])<<8 + int64(exPayloadLenByte[1])

	case payloadLen == 127:
		// 如果payloadLen=127,启用8个字节作为拓展,表示更长的报文
		// 负载数据的长度范围(bytes):65536~0x7fff ffff ffff ffff
		log.Println("payloadLen=127,启用8个字节作为拓展(最高有效位必须是0,舍弃最高位),表示更长的报文")
		exPayloadLenByte := make([]byte, 8)
		this.Conn.Read(exPayloadLenByte)
		dataLen = int64(exPayloadLenByte[0])<<56 + int64(exPayloadLenByte[1])<<48 + int64(exPayloadLenByte[2])<<40 + int64(exPayloadLenByte[3])<<32 + int64(exPayloadLenByte[4])<<24 + int64(exPayloadLenByte[5])<<16 + int64(exPayloadLenByte[6])<<8 + int64(exPayloadLenByte[7])
	}
	log.Printf("real payloadLen=%d:该数据帧的真实负载数据长度(bytes).\n", dataLen)

	//掩码
	maskingByte := make([]byte, 4)
	if MASK == 1 {
		this.Conn.Read(maskingByte)
		log.Println("MASK=1:负载数据采用了掩码.")
	} else if MASK == 0 {
		log.Println("MASK=0:没有掩码.")
	}

	//数据
	payloadDataByte := make([]byte, dataLen)
	this.Conn.Read(payloadDataByte)
	dataByte := make([]byte, dataLen)
	for i := int64(0); i < dataLen; i++ {
		if MASK == 1 { //解析掩码数据
			dataByte[i] = payloadDataByte[i] ^ maskingByte[i%4]
		} else {
			dataByte[i] = payloadDataByte[i]
		}
	}

	//如果没有数据,强制停止递归
	if dataLen <= 0 {
		return
	}
	//最后一帧,正常停止递归
	if FIN == 1 {
		return dataByte
	}
	//中间帧
	nextData := this.ReadIframe()
	//汇总
	return append(frameData, nextData...)
}

//计算Sec-WebSocket-Accept
func computeAcceptKey(secWebsocketKey string) string {
	var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
	h := sha1.New()
	h.Write([]byte(secWebsocketKey))
	h.Write(keyGUID)
	return base64.StdEncoding.EncodeToString(h.Sum(nil))
}

//HTTP报文头部map
func parseHttpHeader(content string) map[string]string {
	headers := make(map[string]string, 10)
	lines := strings.Split(content, "\r\n")
	for _, line := range lines {
		if len(line) >= 0 {
			words := strings.Split(line, ":")
			if len(words) == 2 {
				headers[strings.Trim(words[0], " ")] = strings.Trim(words[1], " ")
			}
		}
	}
	return headers
}

//二进制逐位打印字节数组
func printBinary(data []byte) {
	for i := 0; i < len(data); i++ {
		byteData := data[i]
		var j uint
		for j = 7; j > 0; j-- {
			log.Printf("%d", ((byteData >> j) & 0x01))
		}
		log.Printf("%d\n", ((byteData >> j) & 0x01))
	}
}

//关闭码 + 关闭原因 = 关闭帧的负载数据
func formatCloseMessage(closeCode int, text string) []byte {
	buf := make([]byte, 2+len(text))
	binary.BigEndian.PutUint16(buf, uint16(closeCode))
	copy(buf[2:], text)
	return buf
}

说明:

// 可以通过Hijacker拿到http连接下的tcp连接
// Hijack()之后该连接完全由自己接管
conn, _, err := w.(http.Hijacker).Hijack()
  • 最新代码: 点我查看代码,仅供学习

    • websocket_server.go: websocket 基于 tcp socket 的粗糙实现, 只提供 websocket 服务
    • websocket_http_server.go: 把该实现移植到了 http socket 环境(也可以是某个 golang web 框架), 实现了 websocket http 利用同一个端口,同时对> 外服务。原理:

三 websocket协议阅读要点记录

1.客户端必须掩码(mask)它发送到服务器的所有帧(更多详细信息请参见 5.3 节)。

2.当收到一个没有掩码的帧时,服务器必须关闭连接。在这种情况下,服务器可能发送一个定义在 7.4.1 节的状态码 1002(协议错误)的 Close 帧。

3.服务器必须不掩码发送到客户端的所有帧。如果客户端检测到掩码的帧,它必须关闭连接。在这种情况下,它可能使用定义在 7.4.1节的状态码 1002(协议错误)。

4.一个没有分片的消息由单个带有 FIN 位设置(5.2 节)和一个非 0 操作码的帧组成。

5.一个分片的消息由单个带有 FIN 位清零(5.2 节)和一个非 0 操作码的帧组成,跟随零个或多个带有 FIN 位清零和操作码设置为 0 的帧,且终止于一个带有 FIN 位设置且 0 操作码的帧。一个分片的消息概念上是等价于单个大的消息,其负载是等价于按顺序串联片段的负载

6.控制帧(参见 5.5 节)可能被注入到一个分片消息的中间。控制帧本身必须不被分割。

7.消息分片必须按发送者发送顺序交付给收件人。

8.一个消息的所有分片是相同类型,以第一个片段的操作码设置。

9.关闭帧可以包含内容体(“帧的“应用数据”部分)指示一个关闭的原因,例如端点关闭了、端点收到的帧太大、或端点收到的帧不符合端点期望的格式。如果有内容体,内容体的头两个字节必须是 2 字节的无符号整数(按网络字节顺序)代表一个在 7.4 节的/code/值定义的状态码。跟着 2 字节的整数,内容体可以包含 UTF-8 编码的/reason/值,本规范没有定义它的解释。数据不必是人类可读的但可能对调试或传递打开连接的脚本相关的信息是有用的。由于数据不保证人类可读,客户端必须不把它显示给最终用户。

10.在应用发送关闭帧之后,必须不发送任何更多的数据帧。

11.发送并接收一个关闭消息后,一个端点认为 WebSocket 连接关闭了且必须关闭底层的 TCP 连接。服务器必须立即关闭底层 TCP 连接,客户端应该等待服务器关闭连接但可能在发送和接收一个关闭消息之后的任何时候关闭连接,例如,如果它没有在一个合理的时间周期内接收到服务器的 TCP 关闭。

12.一个端点可以在连接建立之后并在连接关闭之前的任何时候发送一个 Ping 帧。注意:一个 Ping 即可以充当一个 keepalive,也可以作为验证远程端点仍可响应

四 小经验

  • 浏览器目前没有提供js接口发送ping帧,浏览器可能单向的发送pong帧(可以利用文本帧当作ping帧来使用)

  • 服务端给浏览器发送ping帧,浏览器会尽快响应同样负载数据的pong帧

  • 浏览器发送的websocket负载数据太大的时候会分片

  • 不管是浏览器,还是服务器,收到close帧都回复同样内容的close帧,然后做后续的操作

五 连接断开情况分析

  • server: 简称 s
  • browser: 简称 b

情况 1

动作:b 发 s 连接关闭帧, s 无操作

现象:

  • b 过很久之后触发了 onclose
  • s 写入: *net.OpError: write tcp 127.0.0.1:8000->127.0.0.1:34508: write: broken pipe
  • s 读取: *errors.errorString: EOF

情况 2

动作:b 发 s 连接关闭帧,s 回应连接关闭帧

现象:

  • b 马上触发了 onclose
  • s 写入: *net.OpError: write tcp 127.0.0.1:8000->127.0.0.1:34482: write: broken pipe
  • s 读取: *errors.errorString: EOF

情况 3

动作:b 发 s 连接关闭帧,s 回应连接关闭帧,s 关闭 tcp socket

现象:

  • b 马上触发了 onclose
  • s 写入: *net.OpError: write tcp 127.0.0.1:8000->127.0.0.1:34502: use of closed network connection
  • s 读取: *net.OpError: read tcp 127.0.0.1:8000->127.0.0.1:34502: use of closed network connection

情况 4

动作:s 发 b 连接关闭帧,b 无操作

现象:

  • b 马上回应相同数据的关闭帧, 接着触发 onclose
  • s 写入: *net.OpError: write tcp 127.0.0.1:8000->127.0.0.1:34482: write: broken pipe
  • s 读取: *errors.errorString: EOF

情况 5

动作:s 发 b 连接关闭帧,s 关闭 tcp socket

现象:

  • b 马上触发了onclose
  • s 写入: *net.OpError: write tcp 127.0.0.1:8000->127.0.0.1:34542: use of closed network connection
  • s 读取: *net.OpError: tcp 127.0.0.1:8000->127.0.0.1:34542: use of closed network connection
]]>