深入分析Golang Server源碼實現過程
func (srv *Server) Serve(l net.Listener) error { ...... for { rw, err := l.Accept() if err != nil { select { case <-srv.getDoneChan(): return ErrServerClosed default: } if ne, ok := err.(net.Error); ok && ne.Temporary() { if tempDelay == 0 { tempDelay = 5 * time.Millisecond } else { tempDelay *= 2 } if max := 1 * time.Second; tempDelay > max { tempDelay = max } srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay) time.Sleep(tempDelay) continue } return err } connCtx := ctx if cc := srv.ConnContext; cc != nil { connCtx = cc(connCtx, rw) if connCtx == nil { panic("ConnContext returned nil") } } tempDelay = 0 c := srv.newConn(rw) c.setState(c.rwc, StateNew, runHooks) // before Serve can return go c.serve(connCtx) } }
func (c *conn) serve(ctx context.Context) { ...... // HTTP/1.x from here on. ctx, cancelCtx := context.WithCancel(ctx) c.cancelCtx = cancelCtx defer cancelCtx() c.r = &connReader{conn: c} c.bufr = newBufioReader(c.r) c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) for { w, err := c.readRequest(ctx) if c.r.remain != c.server.initialReadLimitSize() { // If we read any bytes off the wire, we're active. c.setState(c.rwc, StateActive, runHooks) } if err != nil { const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" switch { case err == errTooLarge: // Their HTTP client may or may not be // able to read this if we're // responding to them and hanging up // while they're still writing their // request. Undefined behavior. const publicErr = "431 Request Header Fields Too Large" fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) c.closeWriteAndWait() return case isUnsupportedTEError(err): // Respond as per RFC 7230 Section 3.3.1 which says, // A server that receives a request message with a // transfer coding it does not understand SHOULD // respond with 501 (Unimplemented). code := StatusNotImplemented // We purposefully aren't echoing back the transfer-encoding's value, // so as to mitigate the risk of cross side scripting by an attacker. fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders) return case isCommonNetReadError(err): return // don't reply default: if v, ok := err.(statusError); ok { fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s: %s%s%d %s: %s", v.code, StatusText(v.code), v.text, errorHeaders, v.code, StatusText(v.code), v.text) return } publicErr := "400 Bad Request" fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) return } } // Expect 100 Continue support req := w.req if req.expectsContinue() { if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { // Wrap the Body reader with one that replies on the connection req.Body = &expectContinueReader{readCloser: req.Body, resp: w} w.canWriteContinue.setTrue() } } else if req.Header.get("Expect") != "" { w.sendExpectationFailed() return } c.curReq.Store(w) if requestBodyRemains(req.Body) { registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) } else { w.conn.r.startBackgroundRead() } // HTTP cannot have multiple simultaneous active requests.[*] // Until the server replies to this request, it can't read another, // so we might as well run the handler in this goroutine. // [*] Not strictly true: HTTP pipelining. We could let them all process // in parallel even if their responses need to be serialized. // But we're not going to implement HTTP pipelining because it // was never deployed in the wild and the answer is HTTP/2. inFlightResponse = w serverHandler{c.server}.ServeHTTP(w, w.req) inFlightResponse = nil w.cancelCtx() if c.hijacked() { return } w.finishRequest() if !w.shouldReuseConnection() { if w.requestBodyLimitHit || w.closedRequestBodyEarly() { c.closeWriteAndWait() } return } c.setState(c.rwc, StateIdle, runHooks) c.curReq.Store((*response)(nil)) if !w.conn.server.doKeepAlives() { // We're in shutdown mode. We might've replied // to the user without "Connection: close" and // they might think they can send another // request, but such is life with HTTP/1.1. return } if d := c.server.idleTimeout(); d != 0 { c.rwc.SetReadDeadline(time.Now().Add(d)) if _, err := c.bufr.Peek(4); err != nil { return } } c.rwc.SetReadDeadline(time.Time{}) } }
1、c.readRequest(ctx)
放在 for 循環裡面,是為瞭 HTTP Keep-Alive,可以復用TCP連接,並且是串行的,上一個請求處理完才會去讀取下一個請求的數據,如果連接被客戶端斷開,那麼c.readRequest(ctx)
會因為讀取報錯而退出。
通過繼續追蹤源碼,發現這裡隻是讀取瞭 Header,並做一些判斷,因此會有readHeaderDeadline
這樣的配置,然後設置Body的類型,Header和Body之間有一個空行,這個作為Header讀完的標志,通過 Content-Length 可以知道是否有Body內容,以及有多少內容。
switch { case t.Chunked: if noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) { t.Body = NoBody } else { t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close} } case realLength == 0: t.Body = NoBody case realLength > 0: t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close} default: // realLength < 0, i.e. "Content-Length" not mentioned in header if t.Close { // Close semantics (i.e. HTTP/1.0) t.Body = &body{src: r, closing: t.Close} } else { // Persistent connection (i.e. HTTP/1.1) t.Body = NoBody } }
func (l *LimitedReader) Read(p []byte) (n int, err error) { if l.N <= 0 { return 0, EOF } if int64(len(p)) > l.N { p = p[0:l.N] } n, err = l.R.Read(p) l.N -= int64(n) return }
io.LimitReader
在讀取到指定的長度後就會返回EOF錯誤,表示讀取完畢。
2、w.conn.r.startBackgroundRead
if requestBodyRemains(req.Body) { registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) } else { w.conn.r.startBackgroundRead() }
當Body讀取完之後才會開啟startBackgroundRead
。
func (cr *connReader) backgroundRead() { n, err := cr.conn.rwc.Read(cr.byteBuf[:]) cr.lock() if n == 1 { cr.hasByte = true ...... } if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { // Ignore this error. It's the expected error from // another goroutine calling abortPendingRead. } else if err != nil { cr.handleReadError(err) } cr.aborted = false cr.inRead = false cr.unlock() cr.cond.Broadcast() } func (cr *connReader) handleReadError(_ error) { cr.conn.cancelCtx() cr.closeNotify() } // may be called from multiple goroutines. func (cr *connReader) closeNotify() { res, _ := cr.conn.curReq.Load().(*response) if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { res.closeNotifyCh <- true } }
其實startBackgroundRead
就是為瞭監控客戶端是否關閉瞭連接,它不能影響業務數據讀取,因此需要等Body被讀取完之後才開啟,它象征性的讀取一個字節,如果客戶端關閉瞭,對應的 fd 是可讀的,它會像一個通道寫入數據,此協程的生命周期是當前請求,而不是當前連接,它的作用是為瞭中斷當前請求的 Handler 處理階段,它認為客戶端已經放棄瞭這個請求,服務端也沒必要做過多的業務處理,但是這個在實際業務中很難實現,或者說是多餘的,在我們看來,隻要請求到達瞭,服務端就有義務正確的給予處理,不應該將其中斷。
當請求處理完畢,就會調用abortPendingRead
,使得startBackgroundRead
協程退出。為什麼startBackgroundRead
協程的生命周期不是跟著連接呢,因為 Keep-Alive 的連接會持續一段時間,即便沒有請求到來,這會導致startBackgroundRead
協程一直在運行。
那麼服務端何時去關閉此連接呢,畢竟客戶端是不可信的,它是通過設置SetReadDeadline
為ReadHeaderTimeout
來修改定時器時間,當然如果沒有設置ReadHeaderTimeout
,那麼會使用ReadTimeout
代替,超時還沒發來請求就可以認為客戶端已經沒有重用此連接瞭,for 循環退出,defer 中關閉此連接。
實際上客戶端隻會在一個短的時間內要發送多個請求的情況下才會重用連接,比如在頁面初始化的時候,瀏覽器會視情況重用連接。
ReadDeadline
是一個總的時間,一個截止時間,是讀取Header和讀取Body的總時間。
3、serverHandler{c.server}.ServeHTTP(w, w.req)
後面就開始調用Handler,如果需要用到Body的信息,則需要接著讀取Body內容,可見Header和Body是分開來讀的。第二次讀取是不會阻塞的因為fd裡面有內容,當然如果有人惡意攻擊,隻發請求頭不填Body,那麼也會阻塞。
到此這篇關於深入分析Golang Server源碼實現過程的文章就介紹到這瞭,更多相關Go Server內容請搜索WalkonNet以前的文章或繼續瀏覽下面的相關文章希望大傢以後多多支持WalkonNet!
推薦閱讀:
- 一文詳解Golang中net/http包的實現原理
- Golang實現簡單http服務器的示例詳解
- Java面試題沖刺第六天–網絡編程1
- Golang實現HTTP編程請求和響應
- go語言中http超時引發的事故解決