ComunicRTCProxy/relay.go

568 lines
14 KiB
Go
Raw Permalink Normal View History

2020-04-11 08:30:28 +00:00
/// RTC Relay
///
/// @author Pierre Hubert
package main
import (
"encoding/json"
2020-04-11 10:14:55 +00:00
"io"
2020-04-11 08:30:28 +00:00
"log"
2020-05-03 18:57:52 +00:00
"strings"
2020-04-12 12:51:06 +00:00
"sync"
2020-04-11 09:26:51 +00:00
"time"
2020-04-11 08:30:28 +00:00
2020-04-11 10:14:55 +00:00
"github.com/pion/rtcp"
2020-04-11 08:30:28 +00:00
"github.com/pion/webrtc/v2"
)
const (
// SDP This is a SDP signal
SDP = iota
// CANDIDATE This is a candidate
CANDIDATE = iota
2020-04-11 15:57:32 +00:00
2020-04-11 16:20:46 +00:00
// RequestOffer for a broadcast receiver
RequestOffer = iota
2020-04-11 15:57:32 +00:00
// CloseConnection Requests the connection to be closed
CloseConnection = iota
2020-04-11 08:30:28 +00:00
)
type receivedSignal struct {
2020-04-11 09:26:51 +00:00
peerID string
callHash string
2020-04-11 08:30:28 +00:00
sigType uint
2020-04-11 16:20:46 +00:00
sdp webrtc.SessionDescription
2020-04-11 08:30:28 +00:00
candidate webrtc.ICECandidateInit
}
2020-04-11 11:14:27 +00:00
type activeRelay struct {
channel chan receivedSignal
callHash string
id uint
2020-04-11 14:32:34 +00:00
closed bool
2020-04-11 11:14:27 +00:00
}
2020-04-11 08:30:28 +00:00
/// We keep for each connection its channel
2020-04-12 15:09:45 +00:00
var closeChan = make(chan *activeRelay)
var connections = make(map[string]*activeRelay)
2020-04-11 11:14:27 +00:00
var currID uint = 0
2020-04-11 08:30:28 +00:00
/// Process incoming messages
2021-10-17 18:01:42 +00:00
func onSignal(config *Config, callHash, peerID string, data map[string]interface{}) {
2020-04-11 08:30:28 +00:00
2020-04-11 09:26:51 +00:00
// Close all the channels that requested so
processCloseRequests()
2020-04-11 08:30:28 +00:00
// Decode received signal
2020-04-11 09:26:51 +00:00
newSignal := receivedSignal{
peerID: peerID,
callHash: callHash,
}
2020-04-11 08:30:28 +00:00
if data["type"] == "SDP" {
newSignal.sigType = SDP
2020-04-11 15:57:32 +00:00
// I have to re-encode data to initialize SDP
var enc []byte
enc, err := json.Marshal(data["data"])
if err != nil {
log.Printf("Could not re-encode candidate ! %s", err)
return
}
2020-04-11 16:20:46 +00:00
err = json.Unmarshal(enc, &newSignal.sdp)
2020-04-11 15:57:32 +00:00
if err != nil {
log.Printf("Discarding invalid candidate: %s", err)
return
}
2020-04-11 08:30:28 +00:00
} else if data["type"] == "CANDIDATE" {
newSignal.sigType = CANDIDATE
// I have to re-encode data to initialize ICECandidate
var enc []byte
enc, err := json.Marshal(data["data"])
if err != nil {
log.Printf("Could not re-encode candidate ! %s", err)
return
}
err = json.Unmarshal(enc, &newSignal.candidate)
if err != nil {
log.Printf("Discarding invalid candidate: %s", err)
return
}
2020-04-11 16:20:46 +00:00
} else if data["type"] == "REQUEST_OFFER" {
// Request an offer
newSignal.sigType = RequestOffer
2020-04-11 15:57:32 +00:00
} else if data["type"] == "CLOSE_CONN" {
// Close connection
newSignal.sigType = CloseConnection
2020-04-11 08:30:28 +00:00
} else {
log.Fatalf("Invalid signal type: %s !", data["type"])
}
2020-04-11 09:26:51 +00:00
// Check if we are attempting to connect as viewer to a non existing channel
if _, ok := connections[callHash]; !ok {
if peerID != "0" || newSignal.sigType != SDP {
2020-04-12 15:09:45 +00:00
log.Printf("Attempting to connect as viewer | send candidate to a non-ready broadcast! (callhash %s / peerid %s)", callHash, peerID)
2020-04-11 09:26:51 +00:00
return
}
}
// Handle new offers
if newSignal.sigType == SDP && peerID == "0" {
// Check if we are overwriting another connection
2020-04-11 11:14:27 +00:00
if val, ok := connections[callHash]; ok {
closeConnection(val)
2020-04-11 09:26:51 +00:00
}
2020-04-12 15:09:45 +00:00
newRelay := activeRelay{
2020-04-11 11:14:27 +00:00
id: currID,
callHash: callHash,
channel: make(chan receivedSignal, 10),
}
currID++
2020-04-12 15:09:45 +00:00
connections[callHash] = &newRelay
2021-10-17 18:01:42 +00:00
go newCall(config, newSignal, &newRelay)
2020-04-11 09:26:51 +00:00
} else {
// Forward the message to the channel
2020-04-11 11:14:27 +00:00
connections[callHash].channel <- newSignal
2020-04-11 09:26:51 +00:00
}
}
2020-04-11 16:20:46 +00:00
/// Request an offer for a client of a broadcast
2021-10-17 18:01:42 +00:00
func onRequestOffer(c *Config, callHash, peerID string) {
onSignal(c, callHash, peerID, map[string]interface{}{
2020-04-11 16:20:46 +00:00
"type": "REQUEST_OFFER",
})
}
2020-04-11 15:57:32 +00:00
/// Request connections to be closed
2021-10-17 18:01:42 +00:00
func onCloseConnection(c *Config, callHash, peerID string) {
onSignal(c, callHash, peerID, map[string]interface{}{
2020-04-11 15:57:32 +00:00
"type": "CLOSE_CONN",
})
}
2020-04-11 09:26:51 +00:00
/// Close a connection
2020-04-12 15:09:45 +00:00
func closeConnection(r *activeRelay) {
// Close the channel
if !r.closed {
close(r.channel)
r.closed = true
}
// Remove the channel from the list
2020-04-11 11:14:27 +00:00
if val, ok := connections[r.callHash]; ok && val.id == r.id {
delete(connections, r.callHash)
2020-04-11 09:26:51 +00:00
}
log.Printf("Closing call %s / id: %d (%d remaining open calls)", r.callHash, r.id, len(connections))
2020-04-11 09:26:51 +00:00
}
// Ask for a channel to be closed
2020-04-12 15:09:45 +00:00
func askForClose(r *activeRelay) {
2020-04-11 11:14:27 +00:00
closeChan <- r
2020-04-11 09:26:51 +00:00
}
// Process channel close requests (in thread safe way)
func processCloseRequests() {
for {
select {
2020-04-11 11:14:27 +00:00
case r := <-closeChan:
closeConnection(r)
2020-04-11 09:26:51 +00:00
case <-time.After(time.Millisecond * 10):
return
}
}
}
/// Start new call
2021-10-17 18:01:42 +00:00
func newCall(config *Config, mainOffer receivedSignal, r *activeRelay) {
2020-04-11 09:26:51 +00:00
2020-05-03 18:57:52 +00:00
// I am not sure this is a strong way to determine whether we are having
// a video call or not, but I have not found any better way yet...
isVideoCall := strings.Contains(mainOffer.sdp.SDP, "m=video") || strings.Contains(mainOffer.sdp.SDP, "mid:video")
2020-05-03 19:04:07 +00:00
numberExpectedTracks := 1
2020-05-03 18:57:52 +00:00
if isVideoCall {
2020-05-03 19:04:07 +00:00
numberExpectedTracks = 2
2020-05-03 18:57:52 +00:00
}
2020-05-03 19:04:07 +00:00
log.Printf("Starting new call: %s / id: %d / number of tracks: %d", r.callHash, r.id, numberExpectedTracks)
2020-04-11 10:14:55 +00:00
2020-04-12 12:51:06 +00:00
// Ice candidates mutex
var candidatesMux sync.Mutex
pendingCandidates := make([]*webrtc.ICECandidate, 0)
2020-04-11 09:26:51 +00:00
// Since we are answering use PayloadTypes declared by offerer
mediaEngine := webrtc.MediaEngine{}
2020-04-11 16:20:46 +00:00
err := mediaEngine.PopulateFromSDP(mainOffer.sdp)
2020-04-11 09:26:51 +00:00
if err != nil {
log.Println("Error: invalid data in offer!", err)
2020-04-11 11:14:27 +00:00
askForClose(r)
2020-04-11 09:26:51 +00:00
return
}
2020-04-12 12:51:06 +00:00
// Enable trickling
s := webrtc.SettingEngine{}
2020-04-12 13:20:52 +00:00
s.SetTrickle(false) // I did not manage to make the connection trickling, sorry...
2020-04-12 12:51:06 +00:00
2021-10-17 18:01:42 +00:00
// Optional : restrict ports
if config.PortStart > 0 && config.PortEnd > config.PortStart {
if err := s.SetEphemeralUDPPortRange(uint16(config.PortStart), uint16(config.PortEnd)); err != nil {
log.Println("Error: faild to set ephemeral UDP Port range!", err)
askForClose(r)
return
}
2021-10-17 16:45:20 +00:00
}
2021-10-17 18:01:42 +00:00
// Optional : set access IP
if len(config.AccessIP) > 0 {
ips := []string{config.AccessIP}
s.SetNAT1To1IPs(ips, webrtc.ICECandidateTypeHost)
}
2021-10-17 16:45:20 +00:00
2020-04-11 10:14:55 +00:00
// Create the API object with the MediaEngine
2020-04-12 12:51:06 +00:00
api := webrtc.NewAPI(webrtc.WithMediaEngine(mediaEngine),
webrtc.WithSettingEngine(s))
2020-04-11 10:14:55 +00:00
// Setup config
peerConnectionConfig := webrtc.Configuration{
2020-04-22 16:21:04 +00:00
ICEServers: callConf.iceServers,
SDPSemantics: webrtc.SDPSemanticsUnifiedPlanWithFallback,
2020-04-11 10:14:55 +00:00
}
// Create a new RTCPeerConnection
2020-04-11 14:32:34 +00:00
mainPeerConnection, err := api.NewPeerConnection(peerConnectionConfig)
2020-04-11 10:14:55 +00:00
if err != nil {
log.Println("Error: could not create peer connection!", err)
2020-04-11 11:14:27 +00:00
askForClose(r)
2020-04-11 10:14:55 +00:00
return
}
2020-04-14 07:18:12 +00:00
// Close peer connection
defer mainPeerConnection.Close()
2020-04-12 15:09:45 +00:00
// Check if the connection is closed
mainPeerConnection.OnConnectionStateChange(func(s webrtc.PeerConnectionState) {
if s == webrtc.PeerConnectionStateClosed {
askForClose(r)
return
}
})
2020-04-12 12:51:06 +00:00
// Forward ice candidates
mainPeerConnection.OnICECandidate(func(c *webrtc.ICECandidate) {
if c == nil || r.closed {
return
}
// Lock the list of candidates
candidatesMux.Lock()
defer candidatesMux.Unlock()
desc := mainPeerConnection.RemoteDescription()
if desc == nil {
// Add the signal to the pending list
pendingCandidates = append(pendingCandidates, c)
} else {
// Send the signal directly
sendSignal(mainOffer.callHash, mainOffer.peerID, c.ToJSON())
}
})
2020-04-11 10:14:55 +00:00
// Allow us to receive 1 audio & 1 video track
2020-04-12 15:36:27 +00:00
if callConf.allowVideo {
if _, err = mainPeerConnection.AddTransceiverFromKind(webrtc.RTPCodecTypeVideo,
webrtc.RtpTransceiverInit{Direction: webrtc.RTPTransceiverDirectionRecvonly}); err != nil {
log.Println("Error: could not prepare to receive video track!", err)
askForClose(r)
return
}
2020-04-11 10:14:55 +00:00
}
2020-04-11 14:32:34 +00:00
if _, err = mainPeerConnection.AddTransceiverFromKind(webrtc.RTPCodecTypeAudio,
2020-04-11 12:02:46 +00:00
webrtc.RtpTransceiverInit{Direction: webrtc.RTPTransceiverDirectionRecvonly}); err != nil {
2020-04-11 10:14:55 +00:00
log.Println("Error: could not prepare to receive audio track!", err)
2020-04-11 11:14:27 +00:00
askForClose(r)
2020-04-11 10:14:55 +00:00
return
}
2020-05-03 19:04:07 +00:00
localTrackChan := make(chan *webrtc.Track, numberExpectedTracks)
2020-04-11 10:14:55 +00:00
// Set a handler for when a new remote track starts, this just distributes all our packets
// to connected peers
2020-04-11 14:32:34 +00:00
mainPeerConnection.OnTrack(func(remoteTrack *webrtc.Track, receiver *webrtc.RTPReceiver) {
2020-04-11 10:14:55 +00:00
// Send a PLI on an interval so that the publisher is pushing a keyframe every rtcpPLIInterval
// This can be less wasteful by processing incoming RTCP events, then we would emit a NACK/PLI when a viewer requests it
go func() {
rtcpPLIInterval := time.Second * 3
ticker := time.NewTicker(rtcpPLIInterval)
for range ticker.C {
2020-04-11 14:32:34 +00:00
if r.closed {
return
}
if rtcpSendErr := mainPeerConnection.WriteRTCP([]rtcp.Packet{&rtcp.PictureLossIndication{MediaSSRC: remoteTrack.SSRC()}}); rtcpSendErr != nil {
2020-04-12 15:09:45 +00:00
log.Println("Write RTCP error:", rtcpSendErr)
2020-04-11 14:41:20 +00:00
askForClose(r)
return
2020-04-11 10:14:55 +00:00
}
}
}()
// Create a local track, all our SFU clients will be fed via this track
2020-04-11 16:59:35 +00:00
trackID := "audio"
trackLabel := "pion" // We need only one track label
if remoteTrack.Kind() == webrtc.RTPCodecTypeVideo {
2020-04-12 15:36:27 +00:00
// Check if video calls are allowed
if !callConf.allowVideo {
log.Printf("Blocked a video stream! Call hash: %s", r.callHash)
return
}
2020-04-11 16:59:35 +00:00
trackID = "video" // We need two different track ids
}
localTrack, newTrackErr := mainPeerConnection.NewTrack(remoteTrack.PayloadType(), remoteTrack.SSRC(), trackID, trackLabel)
2020-04-11 10:14:55 +00:00
if newTrackErr != nil {
log.Println("New track error!", err)
2020-04-11 11:14:27 +00:00
askForClose(r)
2020-04-11 10:14:55 +00:00
return
}
// Send the track to the main goroutine (in order to respond correctly to SDP requests & responses)
localTrackChan <- localTrack
rtpBuf := make([]byte, 1400)
for {
2020-04-13 07:43:56 +00:00
if r.closed {
return
}
2020-04-11 10:14:55 +00:00
i, readErr := remoteTrack.Read(rtpBuf)
if readErr != nil {
// Could not read from remote track
2020-04-11 14:32:34 +00:00
log.Println("Read error!", readErr)
2020-04-11 11:14:27 +00:00
askForClose(r)
2020-04-11 10:14:55 +00:00
break
}
// ErrClosedPipe means we don't have any subscribers, this is ok if no peers have connected yet
if _, err = localTrack.Write(rtpBuf[:i]); err != nil && err != io.ErrClosedPipe {
log.Println("Write error!", err)
2020-04-11 11:14:27 +00:00
askForClose(r)
2020-04-11 10:14:55 +00:00
break
}
}
})
// Set the remote SessionDescription
2020-04-11 16:20:46 +00:00
err = mainPeerConnection.SetRemoteDescription(mainOffer.sdp)
2020-04-11 10:14:55 +00:00
if err != nil {
log.Println("Set remote description error!", err)
2020-04-11 11:14:27 +00:00
askForClose(r)
2020-04-11 10:14:55 +00:00
return
}
// Create answer
2020-04-11 14:32:34 +00:00
answer, err := mainPeerConnection.CreateAnswer(nil)
2020-04-11 10:14:55 +00:00
if err != nil {
log.Println("Create answer error!", err)
2020-04-11 11:14:27 +00:00
askForClose(r)
2020-04-11 10:14:55 +00:00
return
}
// Sets the LocalDescription, and starts our UDP listeners
2020-04-11 14:32:34 +00:00
err = mainPeerConnection.SetLocalDescription(answer)
2020-04-11 10:14:55 +00:00
if err != nil {
log.Println("Set local description error!", err)
2020-04-11 11:14:27 +00:00
askForClose(r)
2020-04-11 10:14:55 +00:00
return
}
// Send anwser
2020-04-11 14:32:34 +00:00
if !r.closed {
sendSignal(mainOffer.callHash, mainOffer.peerID, answer)
2020-04-12 12:51:06 +00:00
// Warning ! Do not put this outside a specific block
candidatesMux.Lock()
// Send pending ice candidates
for _, val := range pendingCandidates {
sendSignal(mainOffer.callHash, mainOffer.peerID, val)
}
candidatesMux.Unlock()
2020-04-11 14:32:34 +00:00
}
// Keep a list of active tracks
localTracks := make([]*webrtc.Track, 0, 2)
clients := make(map[string]*webrtc.PeerConnection, 1)
2020-04-12 15:24:49 +00:00
// Enter messags loop
2020-04-11 14:32:34 +00:00
for {
newMessage, ok := <-r.channel
if !ok {
log.Printf("Channel closed: call hash: %s / call id: %d", r.callHash, r.id)
return
}
// Check if we are creating a new connection
2020-04-11 16:20:46 +00:00
if newMessage.sigType == RequestOffer {
2020-04-11 14:32:34 +00:00
// Close any previous connection of this client
if val, ok := clients[newMessage.peerID]; ok {
val.Close()
}
2020-05-03 18:57:52 +00:00
// First, we must make sure that we got all the tracks we need, before
// creating an offer (in order not to miss a track while building the offer)
2020-05-03 19:04:07 +00:00
stopCheck := len(localTracks) >= numberExpectedTracks // Stop check if we got all the channels
2020-05-03 18:57:52 +00:00
for !stopCheck {
select {
case t := <-localTrackChan:
localTracks = append(localTracks, t)
2020-05-03 19:04:07 +00:00
stopCheck = len(localTracks) >= numberExpectedTracks
2020-05-03 18:57:52 +00:00
case <-time.After(time.Millisecond * 1000):
stopCheck = true
}
}
2020-04-12 13:18:42 +00:00
// Ice candidates mutex
var newCandidatesMux sync.Mutex
newPendingCandidates := make([]*webrtc.ICECandidate, 0)
2020-04-11 14:32:34 +00:00
// Create new peer connection
newPeerConnection, err := api.NewPeerConnection(peerConnectionConfig)
if err != nil {
log.Printf("Error creating new remote peer connection: %s", err)
continue
}
clients[newMessage.peerID] = newPeerConnection
defer newPeerConnection.Close()
2020-04-12 13:18:42 +00:00
// Send ice candidates
newPeerConnection.OnICECandidate(func(c *webrtc.ICECandidate) {
if c == nil || r.closed {
return
}
newCandidatesMux.Lock()
defer newCandidatesMux.Unlock()
// Check if we have already determined remote descprition
desc := newPeerConnection.LocalDescription()
if desc == nil {
// Append signal to the queue
newPendingCandidates = append(newPendingCandidates, c)
} else {
// Send the signal immedialy
sendSignal(r.callHash, newMessage.peerID, c.ToJSON())
}
})
2020-04-11 14:32:34 +00:00
// Add tracks
for _, value := range localTracks {
_, err = newPeerConnection.AddTrack(value)
if err != nil {
log.Printf("Error adding a track: %s", err)
continue
}
}
2020-04-11 16:20:46 +00:00
// Create the offer
offer, err := newPeerConnection.CreateOffer(nil)
2020-04-11 14:32:34 +00:00
if err != nil {
log.Printf("Could not create answer: %s!", err)
continue
}
2020-04-11 16:20:46 +00:00
err = newPeerConnection.SetLocalDescription(offer)
2020-04-11 14:32:34 +00:00
if err != nil {
log.Printf("Could not set local description: %s!", err)
continue
}
2020-04-11 16:20:46 +00:00
// Send offer
sendSignal(r.callHash, newMessage.peerID, offer)
2020-04-12 13:18:42 +00:00
// Send pending ice candidates
newCandidatesMux.Lock()
for _, c := range newPendingCandidates {
sendSignal(r.callHash, newMessage.peerID, c.ToJSON())
}
newCandidatesMux.Unlock()
2020-04-11 16:20:46 +00:00
} else if newMessage.sigType == SDP {
// Got an answer from the client
if val, ok := clients[newMessage.peerID]; ok {
// Set remote description
if err := val.SetRemoteDescription(newMessage.sdp); err != nil {
log.Printf("Could not set remote description! %s", err)
}
} else {
log.Printf("Dropped un-usable answer: callHash %s / peer ID %s", newMessage.callHash, newMessage.peerID)
}
2020-04-11 14:32:34 +00:00
} else if newMessage.sigType == CANDIDATE {
if newMessage.peerID == "0" {
// Ice candidate for main peer
if err := mainPeerConnection.AddICECandidate(newMessage.candidate); err != nil {
log.Printf("Err Adding ICECandidate to main peer: %s", err)
}
} else if val, ok := clients[newMessage.peerID]; ok {
// Ice candidate for remote peer
if err := val.AddICECandidate(newMessage.candidate); err != nil {
log.Printf("Err adding ice candidate for remote peer: %s", err)
}
} else {
log.Printf("Err tried to add ICE Candidate for non ready peer connection!")
}
2020-04-11 16:02:50 +00:00
} else if newMessage.sigType == CloseConnection {
// Check if we have to close the whole connection or just a client
if newMessage.peerID == "0" {
askForClose(r)
return
} else if val, ok := clients[newMessage.peerID]; ok {
// Close a single client
val.Close()
delete(clients, newMessage.peerID)
}
2020-04-11 14:32:34 +00:00
}
}
2020-04-11 08:30:28 +00:00
}