2020-03-16 10:18:41 +00:00
|
|
|
package soju
|
|
|
|
|
|
|
|
import (
|
2020-04-04 02:48:25 +00:00
|
|
|
"fmt"
|
2020-03-16 10:18:41 +00:00
|
|
|
"time"
|
2020-03-16 11:44:59 +00:00
|
|
|
|
|
|
|
"gopkg.in/irc.v3"
|
2020-03-16 10:18:41 +00:00
|
|
|
)
|
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
type event interface{}
|
|
|
|
|
|
|
|
type eventUpstreamMessage struct {
|
2020-03-16 11:44:59 +00:00
|
|
|
msg *irc.Message
|
|
|
|
uc *upstreamConn
|
|
|
|
}
|
|
|
|
|
2020-04-04 02:48:25 +00:00
|
|
|
type eventUpstreamConnectionError struct {
|
|
|
|
net *network
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
2020-04-01 10:05:25 +00:00
|
|
|
type eventUpstreamConnected struct {
|
|
|
|
uc *upstreamConn
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:51:58 +00:00
|
|
|
type eventUpstreamDisconnected struct {
|
|
|
|
uc *upstreamConn
|
|
|
|
}
|
|
|
|
|
2020-04-04 02:48:25 +00:00
|
|
|
type eventUpstreamError struct {
|
|
|
|
uc *upstreamConn
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
type eventDownstreamMessage struct {
|
2020-03-16 11:44:59 +00:00
|
|
|
msg *irc.Message
|
|
|
|
dc *downstreamConn
|
|
|
|
}
|
|
|
|
|
2020-03-27 16:21:05 +00:00
|
|
|
type eventDownstreamConnected struct {
|
|
|
|
dc *downstreamConn
|
|
|
|
}
|
|
|
|
|
2020-03-27 16:55:03 +00:00
|
|
|
type eventDownstreamDisconnected struct {
|
|
|
|
dc *downstreamConn
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
type network struct {
|
|
|
|
Network
|
2020-04-01 13:40:20 +00:00
|
|
|
user *user
|
|
|
|
ring *Ring
|
|
|
|
stopped chan struct{}
|
2020-03-20 21:48:17 +00:00
|
|
|
|
2020-04-06 17:11:26 +00:00
|
|
|
conn *upstreamConn
|
2020-04-04 02:48:25 +00:00
|
|
|
history map[string]uint64
|
|
|
|
lastError error
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newNetwork(user *user, record *Network) *network {
|
|
|
|
return &network{
|
|
|
|
Network: *record,
|
|
|
|
user: user,
|
2020-03-25 09:53:08 +00:00
|
|
|
ring: NewRing(user.srv.RingCap),
|
2020-04-01 13:40:20 +00:00
|
|
|
stopped: make(chan struct{}),
|
2020-03-20 21:48:17 +00:00
|
|
|
history: make(map[string]uint64),
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-04 02:48:25 +00:00
|
|
|
func (net *network) forEachDownstream(f func(*downstreamConn)) {
|
|
|
|
net.user.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
if dc.network != nil && dc.network != net {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
f(dc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
func (net *network) run() {
|
|
|
|
var lastTry time.Time
|
|
|
|
for {
|
2020-04-01 13:40:20 +00:00
|
|
|
select {
|
|
|
|
case <-net.stopped:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// This space is intentionally left blank
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
if dur := time.Now().Sub(lastTry); dur < retryConnectMinDelay {
|
|
|
|
delay := retryConnectMinDelay - dur
|
|
|
|
net.user.srv.Logger.Printf("waiting %v before trying to reconnect to %q", delay.Truncate(time.Second), net.Addr)
|
|
|
|
time.Sleep(delay)
|
|
|
|
}
|
|
|
|
lastTry = time.Now()
|
|
|
|
|
|
|
|
uc, err := connectToUpstream(net)
|
|
|
|
if err != nil {
|
|
|
|
net.user.srv.Logger.Printf("failed to connect to upstream server %q: %v", net.Addr, err)
|
2020-04-04 02:48:25 +00:00
|
|
|
net.user.events <- eventUpstreamConnectionError{net, fmt.Errorf("failed to connect: %v", err)}
|
2020-03-16 10:18:41 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.register()
|
2020-04-01 10:14:36 +00:00
|
|
|
if err := uc.runUntilRegistered(); err != nil {
|
|
|
|
uc.logger.Printf("failed to register: %v", err)
|
2020-04-04 02:48:25 +00:00
|
|
|
net.user.events <- eventUpstreamConnectionError{net, fmt.Errorf("failed to register: %v", err)}
|
2020-04-01 10:14:36 +00:00
|
|
|
uc.Close()
|
|
|
|
continue
|
|
|
|
}
|
2020-03-16 10:18:41 +00:00
|
|
|
|
2020-04-01 10:05:25 +00:00
|
|
|
net.user.events <- eventUpstreamConnected{uc}
|
2020-03-27 15:33:19 +00:00
|
|
|
if err := uc.readMessages(net.user.events); err != nil {
|
2020-03-16 10:18:41 +00:00
|
|
|
uc.logger.Printf("failed to handle messages: %v", err)
|
2020-04-04 02:48:25 +00:00
|
|
|
net.user.events <- eventUpstreamError{uc, fmt.Errorf("failed to handle messages: %v", err)}
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
uc.Close()
|
2020-03-27 23:51:58 +00:00
|
|
|
net.user.events <- eventUpstreamDisconnected{uc}
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-21 09:24:38 +00:00
|
|
|
func (net *network) upstream() *upstreamConn {
|
|
|
|
return net.conn
|
|
|
|
}
|
|
|
|
|
2020-04-01 13:40:20 +00:00
|
|
|
func (net *network) Stop() {
|
|
|
|
select {
|
|
|
|
case <-net.stopped:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
close(net.stopped)
|
|
|
|
}
|
|
|
|
|
|
|
|
if uc := net.upstream(); uc != nil {
|
|
|
|
uc.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-05 13:04:52 +00:00
|
|
|
func (net *network) createUpdateChannel(ch *Channel) error {
|
|
|
|
if dbCh, err := net.user.srv.db.GetChannel(net.ID, ch.Name); err == nil {
|
|
|
|
ch.ID = dbCh.ID
|
|
|
|
} else if err != ErrNoSuchChannel {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return net.user.srv.db.StoreChannel(net.ID, ch)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (net *network) deleteChannel(name string) error {
|
|
|
|
return net.user.srv.db.DeleteChannel(net.ID, name)
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
type user struct {
|
|
|
|
User
|
|
|
|
srv *Server
|
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
events chan event
|
2020-03-16 11:44:59 +00:00
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
networks []*network
|
|
|
|
downstreamConns []*downstreamConn
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
|
|
|
|
// LIST commands in progress
|
2020-03-27 23:51:58 +00:00
|
|
|
pendingLISTs []pendingLIST
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type pendingLIST struct {
|
|
|
|
downstreamID uint64
|
|
|
|
// list of per-upstream LIST commands not yet sent or completed
|
|
|
|
pendingCommands map[int64]*irc.Message
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newUser(srv *Server, record *User) *user {
|
|
|
|
return &user{
|
2020-03-27 15:33:19 +00:00
|
|
|
User: *record,
|
|
|
|
srv: srv,
|
|
|
|
events: make(chan event, 64),
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *user) forEachNetwork(f func(*network)) {
|
|
|
|
for _, network := range u.networks {
|
|
|
|
f(network)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *user) forEachUpstream(f func(uc *upstreamConn)) {
|
|
|
|
for _, network := range u.networks {
|
2020-03-21 09:24:38 +00:00
|
|
|
uc := network.upstream()
|
2020-04-01 10:14:36 +00:00
|
|
|
if uc == nil {
|
2020-03-16 10:18:41 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
f(uc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *user) forEachDownstream(f func(dc *downstreamConn)) {
|
|
|
|
for _, dc := range u.downstreamConns {
|
|
|
|
f(dc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *user) getNetwork(name string) *network {
|
|
|
|
for _, network := range u.networks {
|
|
|
|
if network.Addr == name {
|
|
|
|
return network
|
|
|
|
}
|
2020-04-01 13:02:59 +00:00
|
|
|
if network.Name != "" && network.Name == name {
|
|
|
|
return network
|
|
|
|
}
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *user) run() {
|
|
|
|
networks, err := u.srv.db.ListNetworks(u.Username)
|
|
|
|
if err != nil {
|
|
|
|
u.srv.Logger.Printf("failed to list networks for user %q: %v", u.Username, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, record := range networks {
|
|
|
|
network := newNetwork(u, &record)
|
|
|
|
u.networks = append(u.networks, network)
|
|
|
|
|
|
|
|
go network.run()
|
|
|
|
}
|
2020-03-16 11:44:59 +00:00
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
for e := range u.events {
|
|
|
|
switch e := e.(type) {
|
2020-04-01 10:05:25 +00:00
|
|
|
case eventUpstreamConnected:
|
2020-04-01 10:16:32 +00:00
|
|
|
uc := e.uc
|
2020-04-01 10:21:31 +00:00
|
|
|
|
|
|
|
uc.network.conn = uc
|
|
|
|
|
2020-04-01 10:16:32 +00:00
|
|
|
uc.updateAway()
|
2020-04-04 02:48:25 +00:00
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-05 13:15:42 +00:00
|
|
|
sendServiceNOTICE(dc, fmt.Sprintf("connected to %s", uc.network.GetName()))
|
2020-04-04 02:48:25 +00:00
|
|
|
})
|
|
|
|
uc.network.lastError = nil
|
2020-03-27 23:51:58 +00:00
|
|
|
case eventUpstreamDisconnected:
|
|
|
|
uc := e.uc
|
2020-04-01 10:21:31 +00:00
|
|
|
|
|
|
|
uc.network.conn = nil
|
|
|
|
|
2020-04-03 16:59:17 +00:00
|
|
|
for _, ml := range uc.messageLoggers {
|
|
|
|
if err := ml.Close(); err != nil {
|
|
|
|
uc.logger.Printf("failed to close message logger: %v", err)
|
|
|
|
}
|
2020-03-27 23:51:58 +00:00
|
|
|
}
|
2020-04-01 10:21:31 +00:00
|
|
|
|
2020-03-28 00:03:00 +00:00
|
|
|
uc.endPendingLISTs(true)
|
2020-04-04 02:48:25 +00:00
|
|
|
|
|
|
|
if uc.network.lastError == nil {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-05 13:15:42 +00:00
|
|
|
sendServiceNOTICE(dc, fmt.Sprintf("disconnected from %s", uc.network.GetName()))
|
2020-04-04 02:48:25 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
case eventUpstreamConnectionError:
|
|
|
|
net := e.net
|
|
|
|
|
|
|
|
if net.lastError == nil || net.lastError.Error() != e.err.Error() {
|
|
|
|
net.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-05 13:15:42 +00:00
|
|
|
sendServiceNOTICE(dc, fmt.Sprintf("failed connecting/registering to %s: %v", net.GetName(), e.err))
|
2020-04-04 02:48:25 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
net.lastError = e.err
|
|
|
|
case eventUpstreamError:
|
|
|
|
uc := e.uc
|
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-05 13:15:42 +00:00
|
|
|
sendServiceNOTICE(dc, fmt.Sprintf("disconnected from %s: %v", uc.network.GetName(), e.err))
|
2020-04-04 02:48:25 +00:00
|
|
|
})
|
|
|
|
uc.network.lastError = e.err
|
2020-03-27 15:33:19 +00:00
|
|
|
case eventUpstreamMessage:
|
|
|
|
msg, uc := e.msg, e.uc
|
2020-03-27 22:08:35 +00:00
|
|
|
if uc.isClosed() {
|
2020-03-21 07:29:44 +00:00
|
|
|
uc.logger.Printf("ignoring message on closed connection: %v", msg)
|
|
|
|
break
|
|
|
|
}
|
2020-03-16 11:44:59 +00:00
|
|
|
if err := uc.handleMessage(msg); err != nil {
|
|
|
|
uc.logger.Printf("failed to handle message %q: %v", msg, err)
|
|
|
|
}
|
2020-03-27 16:21:05 +00:00
|
|
|
case eventDownstreamConnected:
|
|
|
|
dc := e.dc
|
2020-03-27 18:17:58 +00:00
|
|
|
|
|
|
|
if err := dc.welcome(); err != nil {
|
|
|
|
dc.logger.Printf("failed to handle new registered connection: %v", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2020-03-27 16:21:05 +00:00
|
|
|
u.downstreamConns = append(u.downstreamConns, dc)
|
2020-04-01 10:16:32 +00:00
|
|
|
|
|
|
|
u.forEachUpstream(func(uc *upstreamConn) {
|
|
|
|
uc.updateAway()
|
|
|
|
})
|
2020-03-27 16:55:03 +00:00
|
|
|
case eventDownstreamDisconnected:
|
|
|
|
dc := e.dc
|
2020-04-01 14:02:31 +00:00
|
|
|
|
2020-04-06 16:31:48 +00:00
|
|
|
dc.forEachNetwork(func(net *network) {
|
|
|
|
seq := net.ring.Cur()
|
2020-04-01 14:02:31 +00:00
|
|
|
net.history[dc.clientName] = seq
|
2020-04-06 16:31:48 +00:00
|
|
|
})
|
2020-04-01 14:02:31 +00:00
|
|
|
|
2020-03-27 16:55:03 +00:00
|
|
|
for i := range u.downstreamConns {
|
|
|
|
if u.downstreamConns[i] == dc {
|
|
|
|
u.downstreamConns = append(u.downstreamConns[:i], u.downstreamConns[i+1:]...)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-04-01 10:16:32 +00:00
|
|
|
|
|
|
|
u.forEachUpstream(func(uc *upstreamConn) {
|
|
|
|
uc.updateAway()
|
|
|
|
})
|
2020-03-27 15:33:19 +00:00
|
|
|
case eventDownstreamMessage:
|
|
|
|
msg, dc := e.msg, e.dc
|
2020-03-21 07:29:44 +00:00
|
|
|
if dc.isClosed() {
|
|
|
|
dc.logger.Printf("ignoring message on closed connection: %v", msg)
|
|
|
|
break
|
|
|
|
}
|
2020-03-16 11:44:59 +00:00
|
|
|
err := dc.handleMessage(msg)
|
|
|
|
if ircErr, ok := err.(ircError); ok {
|
|
|
|
ircErr.Message.Prefix = dc.srv.prefix()
|
|
|
|
dc.SendMessage(ircErr.Message)
|
|
|
|
} else if err != nil {
|
|
|
|
dc.logger.Printf("failed to handle message %q: %v", msg, err)
|
|
|
|
dc.Close()
|
|
|
|
}
|
2020-03-27 15:33:19 +00:00
|
|
|
default:
|
|
|
|
u.srv.Logger.Printf("received unknown event type: %T", e)
|
2020-03-16 11:44:59 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-18 23:57:14 +00:00
|
|
|
func (u *user) createNetwork(net *Network) (*network, error) {
|
2020-03-25 10:28:25 +00:00
|
|
|
if net.ID != 0 {
|
|
|
|
panic("tried creating an already-existing network")
|
|
|
|
}
|
|
|
|
|
2020-03-18 23:57:14 +00:00
|
|
|
network := newNetwork(u, net)
|
2020-03-16 10:18:41 +00:00
|
|
|
err := u.srv.db.StoreNetwork(u.Username, &network.Network)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-25 10:28:25 +00:00
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
u.networks = append(u.networks, network)
|
2020-03-25 10:28:25 +00:00
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
go network.run()
|
|
|
|
return network, nil
|
|
|
|
}
|
2020-04-01 13:40:20 +00:00
|
|
|
|
|
|
|
func (u *user) deleteNetwork(id int64) error {
|
|
|
|
for i, net := range u.networks {
|
|
|
|
if net.ID != id {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := u.srv.db.DeleteNetwork(net.ID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
u.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
if dc.network != nil && dc.network == net {
|
|
|
|
dc.Close()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
net.Stop()
|
2020-04-01 13:48:56 +00:00
|
|
|
net.ring.Close()
|
2020-04-01 13:40:20 +00:00
|
|
|
u.networks = append(u.networks[:i], u.networks[i+1:]...)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
panic("tried deleting a non-existing network")
|
|
|
|
}
|