GitGub代码地址:https://github.com/GuoZhaoran/spikeSystemjavascript
1java
大型高并发系统架构 node
#配置负载均衡 upstream load_rule { server 127.0.0.1:3001 weight=1; server 127.0.0.1:3002 weight=2; server 127.0.0.1:3003 weight=3; server 127.0.0.1:3004 weight=4; } ... server { listen 80; server_name load_balance.com www.load_balance.com; location / { proxy_pass http://load_rule; } }
package main import ( "net/http" "os" "strings" ) func main() { http.HandleFunc("/buy/ticket", handleReq) http.ListenAndServe(":3001", nil) } //处理请求函数,根据请求将响应结果信息写入日志 func handleReq(w http.ResponseWriter, r *http.Request) { failedMsg := "handle in port:" writeLog(failedMsg, "./stat.log") } //写入日志 func writeLog(msg string, logPath string) { fd, _ := os.OpenFile(logPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) defer fd.Close() content := strings.Join([]string{msg, "\r\n"}, "3001") buf := []byte(content) fd.Write(buf) }
ab -n 1000 -c 100 http://www.load_balance.com/buy/ticket
统计日志中的结果,3001-3004端口分别获得了100、200、300、400的请求量,这和我在nginx中配置的权重占比很好的吻合在了一块儿,而且负载后的流量很是的均匀、随机。linux
具体的实现你们能够参考nginx的upsteam模块实现源码,这里推荐一篇文章:nginx
https://www.kancloud.cn/digest/understandingnginx/202607git
2github
秒杀抢购系统选型 redis
从上面的介绍咱们知道用户秒杀流量经过层层的负载均衡,均匀到了不一样的服务器上,即便如此,集群中的单机所承受的QPS也是很是高的。如何将单机性能优化到极致呢?要解决这个问题,咱们就要想明白一件事:数据库
一般订票系统要处理生成订单、减扣库存、用户支付这三个基本的阶段,咱们系统要作的事情是要保证火车票订单不超卖、很多卖,每张售卖的车票都必须支付才有效,还要保证系统承受极高的并发。apache
这三个阶段的前后顺序改怎么分配才更加合理呢?咱们来分析一下:
当用户并发请求到达服务端时,首先建立订单,而后扣除库存,等待用户支付。这种顺序是咱们通常人首先会想到的解决方案,这种状况下也能保证订单不会超卖,由于建立订单以后就会减库存,这是一个原子操做。
可是这样也会产生一些问题。
第一就是在极限并发状况下,任何一个内存操做的细节都相当影响性能,尤为像建立订单这种逻辑,通常都须要存储到磁盘数据库的,对数据库的压力是可想而知的;
第二是若是用户存在恶意下单的状况,只下单不支付这样库存就会变少,会少卖不少订单,虽然服务端能够限制IP和用户的购买订单数量,这也不算是一个好方法。
若是等待用户支付了订单在减库存,第一感受就是不会少卖。可是这是并发架构的大忌,由于在极限并发状况下,用户可能会建立不少订单,当库存减为零的时候不少用户发现抢到的订单支付不了了,这也就是所谓的“超卖”。也不能避免并发操做数据库磁盘IO
3
扣库存的艺术
在单机低并发状况下,咱们实现扣库存一般是这样的:
为了保证扣库存和生成订单的原子性,须要采用事务处理,而后取库存判断、减库存,最后提交事务,整个流程有不少IO,对数据库的操做又是阻塞的。这种方式根本不适合高并发的秒杀系统。
接下来咱们对单机扣库存的方案作优化:本地扣库存。咱们把必定的库存量分配到本地机器,直接在内存中减库存,而后按照以前的逻辑异步建立订单。改进过以后的单机系统是这样的:
4
代码演示
... //localSpike包结构体定义 package localSpike type LocalSpike struct { LocalInStock int64 LocalSalesVolume int64 } ... //remoteSpike对hash结构的定义和redis链接池 package remoteSpike //远程订单存储健值 type RemoteSpikeKeys struct { SpikeOrderHashKey string //redis中秒杀订单hash结构key TotalInventoryKey string //hash结构中总订单库存key QuantityOfOrderKey string //hash结构中已有订单数量key } //初始化redis链接池 func NewPool() *redis.Pool { return &redis.Pool{ MaxIdle: 10000, MaxActive: 12000, // max number of connections Dial: func() (redis.Conn, error) { c, err := redis.Dial("tcp", ":6379") if err != nil { panic(err.Error()) } return c, err }, } } ... func init() { localSpike = localSpike2.LocalSpike{ LocalInStock: 150, LocalSalesVolume: 0, } remoteSpike = remoteSpike2.RemoteSpikeKeys{ SpikeOrderHashKey: "ticket_hash_key", TotalInventoryKey: "ticket_total_nums", QuantityOfOrderKey: "ticket_sold_nums", } redisPool = remoteSpike2.NewPool() done = make(chan int, 1) done <- 1 }
package localSpike //本地扣库存,返回bool值 func (spike *LocalSpike) LocalDeductionStock() bool{ spike.LocalSalesVolume = spike.LocalSalesVolume + 1 return spike.LocalSalesVolume < spike.LocalInStock }
package remoteSpike ...... const LuaScript = ` local ticket_key = KEYS[1] local ticket_total_key = ARGV[1] local ticket_sold_key = ARGV[2] local ticket_total_nums = tonumber(redis.call('HGET', ticket_key, ticket_total_key)) local ticket_sold_nums = tonumber(redis.call('HGET', ticket_key, ticket_sold_key)) -- 查看是否还有余票,增长订单数量,返回结果值 if(ticket_total_nums >= ticket_sold_nums) then return redis.call('HINCRBY', ticket_key, ticket_sold_key, 1) end return 0 ` //远端统一扣库存 func (RemoteSpikeKeys *RemoteSpikeKeys) RemoteDeductionStock(conn redis.Conn) bool { lua := redis.NewScript(1, LuaScript) result, err := redis.Int(lua.Do(conn, RemoteSpikeKeys.SpikeOrderHashKey, RemoteSpikeKeys.TotalInventoryKey, RemoteSpikeKeys.QuantityOfOrderKey)) if err != nil { return false } return result != 0 }
hmset ticket_hash_key "ticket_total_nums" 10000 "ticket_sold_nums" 0
package main ... func main() { http.HandleFunc("/buy/ticket", handleReq) http.ListenAndServe(":3005", nil) }
package main //处理请求函数,根据请求将响应结果信息写入日志 func handleReq(w http.ResponseWriter, r *http.Request) { redisConn := redisPool.Get() LogMsg := "" <-done //全局读写锁 if localSpike.LocalDeductionStock() && remoteSpike.RemoteDeductionStock(redisConn) { util.RespJson(w, 1, "抢票成功", nil) LogMsg = LogMsg + "result:1,localSales:" + strconv.FormatInt(localSpike.LocalSalesVolume, 10) } else { util.RespJson(w, -1, "已售罄", nil) LogMsg = LogMsg + "result:0,localSales:" + strconv.FormatInt(localSpike.LocalSalesVolume, 10) } done <- 1 //将抢票状态写入到log中 writeLog(LogMsg, "./stat.log") } func writeLog(msg string, logPath string) { fd, _ := os.OpenFile(logPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) defer fd.Close() content := strings.Join([]string{msg, "\r\n"}, "") buf := []byte(content) fd.Write(buf) }
ab -n 10000 -c 100 http://127.0.0.1:3005/buy/ticket
This is ApacheBench, Version 2.3 <$Revision: 1826891 $> Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ Licensed to The Apache Software Foundation, http://www.apache.org/ Benchmarking 127.0.0.1 (be patient) Completed 1000 requests Completed 2000 requests Completed 3000 requests Completed 4000 requests Completed 5000 requests Completed 6000 requests Completed 7000 requests Completed 8000 requests Completed 9000 requests Completed 10000 requests Finished 10000 requests Server Software: Server Hostname: 127.0.0.1 Server Port: 3005 Document Path: /buy/ticket Document Length: 29 bytes Concurrency Level: 100 Time taken for tests: 2.339 seconds Complete requests: 10000 Failed requests: 0 Total transferred: 1370000 bytes HTML transferred: 290000 bytes Requests per second: 4275.96 [#/sec] (mean) Time per request: 23.387 [ms] (mean) Time per request: 0.234 [ms] (mean, across all concurrent requests) Transfer rate: 572.08 [Kbytes/sec] received Connection Times (ms) min mean[+/-sd] median max Connect: 0 8 14.7 6 223 Processing: 2 15 17.6 11 232 Waiting: 1 11 13.5 8 225 Total: 7 23 22.8 18 239 Percentage of the requests served within a certain time (ms) 50% 18 66% 24 75% 26 80% 28 90% 33 95% 39 98% 45 99% 54 100% 239 (longest request)
//stat.log ... result:1,localSales:145 result:1,localSales:146 result:1,localSales:147 result:1,localSales:148 result:1,localSales:149 result:1,localSales:150 result:0,localSales:151 result:0,localSales:152 result:0,localSales:153 result:0,localSales:154 result:0,localSales:156 ...
5
总结回顾
整体来讲,秒杀系统是很是复杂的。咱们这里只是简单介绍模拟了一下单机如何优化到高性能,集群如何避免单点故障,保证订单不超卖、很多卖的一些策略,完整的订单系统还有订单进度的查看,每台服务器上都有一个任务,定时的从总库存同步余票和库存信息展现给用户,还有用户在订单有效期内不支付,释放订单,补充到库存等等。
做者:绘你一世倾城
来源:juejin.im/post/5d84e21f6fb9a06ac8248149
点击「阅读原文」和栈长学更多~