Ensure you configure the connection pool size properly in the set_keepalive . Basically if your NGINX handle n concurrent requests and your NGINX has m workers, then the connection pool size should be configured as n/m. For example, if your NGINX usually handles 1000 concurrent requests and you have 10 NGINX workers, then the connection pool size should be 100.
不支持LB和sharding, 可以自己实现相关逻辑
You can trivially implement your own Redis load balancing logic yourself in Lua. Just keep a Lua table of all available Redis backend information (like host name and port numbers) and pick one server according to some rule (like round-robin or key-based hashing) from the Lua table at every request. You can keep track of the current rule state in your own Lua module’s data, see http://wiki.nginx.org/HttpLuaModule#Data_Sharing_within_an_Nginx_Worker
local redisLib = require"resty.redis"local cookieLib = require"resty.cookie"local json = require("json")local redis = redisLib:new()ifnot redis then ngx.log(ngx.ERR, err) ngx.exec("@defaultProxy")endlocal cookie, err = cookieLib:new()ifnot cookie then ngx.log(ngx.ERR, err) ngx.exec("@defaultProxy")end-- set cookie(模拟测试) --[[local ok, err = cookie:set({ key = "uid", value = "100",})if not ok then ngx.log(ngx.ERR, err) ngx.exec("@defaultProxy")end]]-- get cookielocal uid, err = cookie:get("uid")ifnot uid then ngx.log(ngx.ERR, err) ngx.exec("@defaultProxy")endredis:set_timeout(1000)local ok, err = redis:connect('127.0.0.1', '6379')ifnot ok then ngx.log("failed to connect:", err) ngx.exec("@defaultProxy")end-- 根据用户会话ID获取用户属性-- 也可以直接通过后端应用set到cookie然后在这里解析即可, 少一次redis调用-- eg: {'tag1':'2','tag2':'1','tag3':'0'}local tags, err = redis:get(uid)ifnot tags then ngx.log("failed to get uid: ", err) ngx.exec("@defaultProxy")endif tags == ngx.null then ngx.log("uid not found.") ngx.exec("@defaultProxy")end-- 获取规则配置信息, 需要做一定的缓存策略-- eg: {'tag':'tag1','proxy':{'0':'proxy_a','1':'proxy_a','2':'proxy_b'}}local proxyConfig, err = redis:get("proxyConfig")ifnot proxyConfig then ngx.log("failed to get proxyConfig: ", err) ngx.exec("@defaultProxy")endif proxyConfig == ngx.null then ngx.log("proxyConfig not found.") ngx.exec("@defaultProxy")end-- put it into the connection pool of size 100,-- with 10 seconds max idle timelocal ok, err = red:set_keepalive(10000, 100)ifnot ok then ngx.say("failed to set keepalive: ", err)returnendproxyConfigData = json.decode(proxyConfig)tagsData = json.decode(tags)tag = proxyConfigData.tag-- 解析规则处理-- 根据规则里配置的类型和用户标签做匹配, 分流到相应的服务器上-- 这里是可以按照用户标签维度支持比较灵活的配置分流规则, 如果业务逻辑简单的话也可以简化proxy = "@defaultProxy"for k_tag, v_tag inpairs(tagsData) doif k_tag == tag thenfor k_proxy, v_proxy inpairs(proxyConfigData.proxy) doif v_tag == k_proxy then proxy = v_proxybreakendendendendngx.exec(proxy)