2020-03-13 17:13:03 +00:00
|
|
|
package soju
|
2020-02-06 15:18:19 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/tls"
|
2020-03-13 14:12:44 +00:00
|
|
|
"encoding/base64"
|
2020-03-23 02:21:43 +00:00
|
|
|
"errors"
|
2020-02-06 15:18:19 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net"
|
2020-03-25 22:51:28 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2020-02-06 18:22:04 +00:00
|
|
|
"strconv"
|
2020-02-06 17:07:35 +00:00
|
|
|
"strings"
|
2020-02-06 18:22:04 +00:00
|
|
|
"time"
|
2020-02-06 15:18:19 +00:00
|
|
|
|
2020-03-13 14:12:44 +00:00
|
|
|
"github.com/emersion/go-sasl"
|
2020-02-06 15:18:19 +00:00
|
|
|
"gopkg.in/irc.v3"
|
|
|
|
)
|
|
|
|
|
2020-02-06 18:22:04 +00:00
|
|
|
type upstreamChannel struct {
|
2020-03-26 04:51:47 +00:00
|
|
|
Name string
|
|
|
|
conn *upstreamConn
|
|
|
|
Topic string
|
|
|
|
TopicWho string
|
|
|
|
TopicTime time.Time
|
|
|
|
Status channelStatus
|
|
|
|
modes channelModes
|
|
|
|
creationTime string
|
|
|
|
Members map[string]*membership
|
|
|
|
complete bool
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 15:18:19 +00:00
|
|
|
type upstreamConn struct {
|
2020-03-04 17:22:58 +00:00
|
|
|
network *network
|
2020-02-06 19:25:37 +00:00
|
|
|
logger Logger
|
2020-02-06 18:22:04 +00:00
|
|
|
net net.Conn
|
|
|
|
irc *irc.Conn
|
|
|
|
srv *Server
|
2020-02-07 10:36:42 +00:00
|
|
|
user *user
|
2020-03-16 10:26:54 +00:00
|
|
|
outgoing chan<- *irc.Message
|
2020-03-27 22:08:35 +00:00
|
|
|
closed chan struct{}
|
2020-02-06 16:04:49 +00:00
|
|
|
|
|
|
|
serverName string
|
|
|
|
availableUserModes string
|
2020-03-20 23:48:19 +00:00
|
|
|
availableChannelModes map[byte]channelModeType
|
|
|
|
availableChannelTypes string
|
|
|
|
availableMemberships []membership
|
2020-02-06 18:22:04 +00:00
|
|
|
|
|
|
|
registered bool
|
2020-02-07 11:19:42 +00:00
|
|
|
nick string
|
2020-03-04 17:22:58 +00:00
|
|
|
username string
|
|
|
|
realname string
|
2020-03-20 23:48:19 +00:00
|
|
|
modes userModes
|
2020-02-06 18:22:04 +00:00
|
|
|
channels map[string]*upstreamChannel
|
2020-03-13 10:26:43 +00:00
|
|
|
caps map[string]string
|
2020-03-23 02:18:16 +00:00
|
|
|
batches map[string]batch
|
2020-04-01 10:16:32 +00:00
|
|
|
away bool
|
2020-03-13 14:12:44 +00:00
|
|
|
|
2020-03-23 02:21:43 +00:00
|
|
|
tagsSupported bool
|
|
|
|
labelsSupported bool
|
2020-03-26 01:39:04 +00:00
|
|
|
nextLabelID uint64
|
2020-03-23 00:34:34 +00:00
|
|
|
|
2020-03-13 14:12:44 +00:00
|
|
|
saslClient sasl.Client
|
|
|
|
saslStarted bool
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
|
|
|
|
// set of LIST commands in progress, per downstream
|
|
|
|
pendingLISTDownstreamSet map[uint64]struct{}
|
2020-03-25 22:51:28 +00:00
|
|
|
|
|
|
|
logs map[string]entityLog
|
|
|
|
}
|
|
|
|
|
|
|
|
type entityLog struct {
|
|
|
|
name string
|
|
|
|
file *os.File
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
|
|
|
|
2020-03-04 17:22:58 +00:00
|
|
|
func connectToUpstream(network *network) (*upstreamConn, error) {
|
|
|
|
logger := &prefixLogger{network.user.srv.Logger, fmt.Sprintf("upstream %q: ", network.Addr)}
|
2020-02-06 21:46:46 +00:00
|
|
|
|
2020-03-04 17:22:58 +00:00
|
|
|
addr := network.Addr
|
|
|
|
if !strings.ContainsRune(addr, ':') {
|
|
|
|
addr = addr + ":6697"
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Printf("connecting to TLS server at address %q", addr)
|
|
|
|
netConn, err := tls.Dial("tcp", addr, nil)
|
2020-02-06 21:46:46 +00:00
|
|
|
if err != nil {
|
2020-03-04 17:22:58 +00:00
|
|
|
return nil, fmt.Errorf("failed to dial %q: %v", addr, err)
|
2020-02-06 21:46:46 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 16:26:17 +00:00
|
|
|
setKeepAlive(netConn)
|
|
|
|
|
2020-03-16 10:26:54 +00:00
|
|
|
outgoing := make(chan *irc.Message, 64)
|
2020-02-17 11:36:42 +00:00
|
|
|
uc := &upstreamConn{
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
network: network,
|
|
|
|
logger: logger,
|
|
|
|
net: netConn,
|
|
|
|
irc: irc.NewConn(netConn),
|
|
|
|
srv: network.user.srv,
|
|
|
|
user: network.user,
|
|
|
|
outgoing: outgoing,
|
2020-03-28 18:59:56 +00:00
|
|
|
closed: make(chan struct{}),
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
channels: make(map[string]*upstreamChannel),
|
|
|
|
caps: make(map[string]string),
|
|
|
|
batches: make(map[string]batch),
|
|
|
|
availableChannelTypes: stdChannelTypes,
|
|
|
|
availableChannelModes: stdChannelModes,
|
|
|
|
availableMemberships: stdMemberships,
|
|
|
|
pendingLISTDownstreamSet: make(map[uint64]struct{}),
|
2020-03-25 22:51:28 +00:00
|
|
|
logs: make(map[string]entityLog),
|
2020-02-06 21:46:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2020-03-27 22:08:35 +00:00
|
|
|
for {
|
|
|
|
var closed bool
|
|
|
|
select {
|
|
|
|
case msg := <-outgoing:
|
|
|
|
if uc.srv.Debug {
|
|
|
|
uc.logger.Printf("sent: %v", msg)
|
|
|
|
}
|
|
|
|
if err := uc.irc.WriteMessage(msg); err != nil {
|
|
|
|
uc.logger.Printf("failed to write message: %v", err)
|
|
|
|
}
|
|
|
|
case <-uc.closed:
|
|
|
|
closed = true
|
2020-02-18 15:31:18 +00:00
|
|
|
}
|
2020-03-27 22:08:35 +00:00
|
|
|
if closed {
|
|
|
|
break
|
2020-02-06 21:46:46 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-17 11:36:42 +00:00
|
|
|
if err := uc.net.Close(); err != nil {
|
|
|
|
uc.logger.Printf("failed to close connection: %v", err)
|
2020-02-07 11:42:24 +00:00
|
|
|
} else {
|
2020-02-17 11:36:42 +00:00
|
|
|
uc.logger.Printf("connection closed")
|
2020-02-07 11:42:24 +00:00
|
|
|
}
|
2020-02-06 21:46:46 +00:00
|
|
|
}()
|
|
|
|
|
2020-02-17 11:36:42 +00:00
|
|
|
return uc, nil
|
2020-02-06 21:46:46 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 22:08:35 +00:00
|
|
|
func (uc *upstreamConn) isClosed() bool {
|
|
|
|
select {
|
|
|
|
case <-uc.closed:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:54:42 +00:00
|
|
|
// Close closes the connection. It is safe to call from any goroutine.
|
2020-02-17 11:36:42 +00:00
|
|
|
func (uc *upstreamConn) Close() error {
|
2020-03-27 22:08:35 +00:00
|
|
|
if uc.isClosed() {
|
2020-02-06 21:46:46 +00:00
|
|
|
return fmt.Errorf("upstream connection already closed")
|
|
|
|
}
|
2020-03-27 22:08:35 +00:00
|
|
|
close(uc.closed)
|
2020-02-06 21:46:46 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-04 14:44:13 +00:00
|
|
|
func (uc *upstreamConn) forEachDownstream(f func(*downstreamConn)) {
|
|
|
|
uc.user.forEachDownstream(func(dc *downstreamConn) {
|
2020-03-04 17:22:58 +00:00
|
|
|
if dc.network != nil && dc.network != uc.network {
|
2020-03-04 14:44:13 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
f(dc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
func (uc *upstreamConn) forEachDownstreamByID(id uint64, f func(*downstreamConn)) {
|
2020-03-23 02:21:43 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
if id != 0 && id != dc.id {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
f(dc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-02-17 11:36:42 +00:00
|
|
|
func (uc *upstreamConn) getChannel(name string) (*upstreamChannel, error) {
|
|
|
|
ch, ok := uc.channels[name]
|
2020-02-06 18:22:04 +00:00
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("unknown channel %q", name)
|
|
|
|
}
|
|
|
|
return ch, nil
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
|
|
|
|
2020-03-20 02:05:14 +00:00
|
|
|
func (uc *upstreamConn) isChannel(entity string) bool {
|
2020-03-20 23:48:19 +00:00
|
|
|
if i := strings.IndexByte(uc.availableChannelTypes, entity[0]); i >= 0 {
|
|
|
|
return true
|
2020-03-20 02:05:14 +00:00
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-03-28 00:03:00 +00:00
|
|
|
func (uc *upstreamConn) getPendingLIST() *pendingLIST {
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
for _, pl := range uc.user.pendingLISTs {
|
|
|
|
if _, ok := pl.pendingCommands[uc.network.ID]; !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return &pl
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-28 00:03:00 +00:00
|
|
|
func (uc *upstreamConn) endPendingLISTs(all bool) (found bool) {
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
found = false
|
|
|
|
for i := 0; i < len(uc.user.pendingLISTs); i++ {
|
|
|
|
pl := uc.user.pendingLISTs[i]
|
|
|
|
if _, ok := pl.pendingCommands[uc.network.ID]; !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
delete(pl.pendingCommands, uc.network.ID)
|
|
|
|
if len(pl.pendingCommands) == 0 {
|
|
|
|
uc.user.pendingLISTs = append(uc.user.pendingLISTs[:i], uc.user.pendingLISTs[i+1:]...)
|
|
|
|
i--
|
|
|
|
uc.forEachDownstreamByID(pl.downstreamID, func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_LISTEND,
|
|
|
|
Params: []string{dc.nick, "End of /LIST"},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
found = true
|
|
|
|
if !all {
|
|
|
|
delete(uc.pendingLISTDownstreamSet, pl.downstreamID)
|
|
|
|
uc.user.forEachUpstream(func(uc *upstreamConn) {
|
2020-03-28 00:03:00 +00:00
|
|
|
uc.trySendLIST(pl.downstreamID)
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-28 00:03:00 +00:00
|
|
|
func (uc *upstreamConn) trySendLIST(downstreamID uint64) {
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
// must be called with a lock in uc.user.pendingLISTsLock
|
|
|
|
|
|
|
|
if _, ok := uc.pendingLISTDownstreamSet[downstreamID]; ok {
|
|
|
|
// a LIST command is already pending
|
|
|
|
// we will try again when that command is completed
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, pl := range uc.user.pendingLISTs {
|
|
|
|
if pl.downstreamID != downstreamID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// this is the first pending LIST command list of the downstream
|
|
|
|
listCommand, ok := pl.pendingCommands[uc.network.ID]
|
|
|
|
if !ok {
|
|
|
|
// there is no command for this upstream in these LIST commands
|
|
|
|
// do not send anything
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// there is a command for this upstream in these LIST commands
|
|
|
|
// send it now
|
|
|
|
|
|
|
|
uc.SendMessageLabeled(downstreamID, listCommand)
|
|
|
|
|
|
|
|
uc.pendingLISTDownstreamSet[downstreamID] = struct{}{}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-20 23:48:19 +00:00
|
|
|
func (uc *upstreamConn) parseMembershipPrefix(s string) (membership *membership, nick string) {
|
|
|
|
for _, m := range uc.availableMemberships {
|
|
|
|
if m.Prefix == s[0] {
|
|
|
|
return &m, s[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, s
|
|
|
|
}
|
|
|
|
|
2020-02-17 11:36:42 +00:00
|
|
|
func (uc *upstreamConn) handleMessage(msg *irc.Message) error {
|
2020-03-23 02:21:43 +00:00
|
|
|
var label string
|
|
|
|
if l, ok := msg.GetTag("label"); ok {
|
|
|
|
label = l
|
|
|
|
}
|
|
|
|
|
2020-03-23 02:18:16 +00:00
|
|
|
var msgBatch *batch
|
|
|
|
if batchName, ok := msg.GetTag("batch"); ok {
|
|
|
|
b, ok := uc.batches[batchName]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unexpected batch reference: batch was not defined: %q", batchName)
|
|
|
|
}
|
|
|
|
msgBatch = &b
|
2020-03-23 02:21:43 +00:00
|
|
|
if label == "" {
|
|
|
|
label = msgBatch.Label
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
var downstreamID uint64 = 0
|
2020-03-23 02:21:43 +00:00
|
|
|
if label != "" {
|
|
|
|
var labelOffset uint64
|
2020-03-26 01:39:04 +00:00
|
|
|
n, err := fmt.Sscanf(label, "sd-%d-%d", &downstreamID, &labelOffset)
|
2020-03-23 02:21:43 +00:00
|
|
|
if err == nil && n < 2 {
|
|
|
|
err = errors.New("not enough arguments")
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unexpected message label: invalid downstream reference for label %q: %v", label, err)
|
|
|
|
}
|
2020-03-23 02:18:16 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 15:18:19 +00:00
|
|
|
switch msg.Command {
|
|
|
|
case "PING":
|
2020-02-17 15:17:31 +00:00
|
|
|
uc.SendMessage(&irc.Message{
|
2020-02-06 15:18:19 +00:00
|
|
|
Command: "PONG",
|
2020-02-18 19:40:32 +00:00
|
|
|
Params: msg.Params,
|
2020-02-17 15:17:31 +00:00
|
|
|
})
|
2020-02-06 21:46:46 +00:00
|
|
|
return nil
|
2020-02-06 17:10:13 +00:00
|
|
|
case "NOTICE":
|
2020-02-17 11:36:42 +00:00
|
|
|
uc.logger.Print(msg)
|
2020-03-13 15:39:49 +00:00
|
|
|
|
2020-03-26 04:53:13 +00:00
|
|
|
if msg.Prefix.User == "" && msg.Prefix.Host == "" { // server message
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-03-31 17:41:12 +00:00
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Command: "NOTICE",
|
|
|
|
Params: msg.Params,
|
|
|
|
})
|
2020-03-26 04:53:13 +00:00
|
|
|
})
|
|
|
|
} else { // regular user NOTICE
|
|
|
|
var nick, text string
|
|
|
|
if err := parseMessageParams(msg, &nick, &text); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-25 22:51:28 +00:00
|
|
|
target := nick
|
|
|
|
if nick == uc.nick {
|
|
|
|
target = msg.Prefix.Name
|
|
|
|
}
|
2020-03-31 15:30:45 +00:00
|
|
|
uc.appendLog(target, "<%s> %s", msg.Prefix.Name, text)
|
2020-03-25 22:51:28 +00:00
|
|
|
|
2020-03-26 04:53:13 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc, msg.Prefix),
|
|
|
|
Command: "NOTICE",
|
|
|
|
Params: []string{dc.marshalEntity(uc, nick), text},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2020-03-13 10:26:43 +00:00
|
|
|
case "CAP":
|
2020-03-13 14:12:44 +00:00
|
|
|
var subCmd string
|
|
|
|
if err := parseMessageParams(msg, nil, &subCmd); err != nil {
|
|
|
|
return err
|
2020-03-13 10:26:43 +00:00
|
|
|
}
|
2020-03-13 14:12:44 +00:00
|
|
|
subCmd = strings.ToUpper(subCmd)
|
|
|
|
subParams := msg.Params[2:]
|
|
|
|
switch subCmd {
|
|
|
|
case "LS":
|
|
|
|
if len(subParams) < 1 {
|
|
|
|
return newNeedMoreParamsError(msg.Command)
|
|
|
|
}
|
|
|
|
caps := strings.Fields(subParams[len(subParams)-1])
|
|
|
|
more := len(subParams) >= 2 && msg.Params[len(subParams)-2] == "*"
|
|
|
|
|
|
|
|
for _, s := range caps {
|
|
|
|
kv := strings.SplitN(s, "=", 2)
|
|
|
|
k := strings.ToLower(kv[0])
|
|
|
|
var v string
|
|
|
|
if len(kv) == 2 {
|
|
|
|
v = kv[1]
|
|
|
|
}
|
|
|
|
uc.caps[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
if more {
|
|
|
|
break // wait to receive all capabilities
|
|
|
|
}
|
|
|
|
|
2020-03-23 00:34:34 +00:00
|
|
|
requestCaps := make([]string, 0, 16)
|
2020-03-31 17:45:04 +00:00
|
|
|
for _, c := range []string{"message-tags", "batch", "labeled-response", "server-time"} {
|
2020-03-23 00:34:34 +00:00
|
|
|
if _, ok := uc.caps[c]; ok {
|
|
|
|
requestCaps = append(requestCaps, c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-13 14:12:44 +00:00
|
|
|
if uc.requestSASL() {
|
2020-03-23 00:34:34 +00:00
|
|
|
requestCaps = append(requestCaps, "sasl")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(requestCaps) > 0 {
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "CAP",
|
2020-03-23 00:34:34 +00:00
|
|
|
Params: []string{"REQ", strings.Join(requestCaps, " ")},
|
2020-03-13 14:12:44 +00:00
|
|
|
})
|
2020-03-23 00:34:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if uc.requestSASL() {
|
2020-03-13 14:12:44 +00:00
|
|
|
break // we'll send CAP END after authentication is completed
|
|
|
|
}
|
2020-03-13 10:26:43 +00:00
|
|
|
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "CAP",
|
|
|
|
Params: []string{"END"},
|
|
|
|
})
|
2020-03-13 14:12:44 +00:00
|
|
|
case "ACK", "NAK":
|
|
|
|
if len(subParams) < 1 {
|
|
|
|
return newNeedMoreParamsError(msg.Command)
|
|
|
|
}
|
|
|
|
caps := strings.Fields(subParams[0])
|
|
|
|
|
|
|
|
for _, name := range caps {
|
|
|
|
if err := uc.handleCapAck(strings.ToLower(name), subCmd == "ACK"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if uc.saslClient == nil {
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "CAP",
|
|
|
|
Params: []string{"END"},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
uc.logger.Printf("unhandled message: %v", msg)
|
|
|
|
}
|
|
|
|
case "AUTHENTICATE":
|
|
|
|
if uc.saslClient == nil {
|
|
|
|
return fmt.Errorf("received unexpected AUTHENTICATE message")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: if a challenge is 400 bytes long, buffer it
|
|
|
|
var challengeStr string
|
|
|
|
if err := parseMessageParams(msg, &challengeStr); err != nil {
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "AUTHENTICATE",
|
|
|
|
Params: []string{"*"},
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var challenge []byte
|
|
|
|
if challengeStr != "+" {
|
|
|
|
var err error
|
|
|
|
challenge, err = base64.StdEncoding.DecodeString(challengeStr)
|
|
|
|
if err != nil {
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "AUTHENTICATE",
|
|
|
|
Params: []string{"*"},
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp []byte
|
|
|
|
var err error
|
|
|
|
if !uc.saslStarted {
|
|
|
|
_, resp, err = uc.saslClient.Start()
|
|
|
|
uc.saslStarted = true
|
|
|
|
} else {
|
|
|
|
resp, err = uc.saslClient.Next(challenge)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "AUTHENTICATE",
|
|
|
|
Params: []string{"*"},
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: send response in multiple chunks if >= 400 bytes
|
|
|
|
var respStr = "+"
|
|
|
|
if resp != nil {
|
|
|
|
respStr = base64.StdEncoding.EncodeToString(resp)
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "AUTHENTICATE",
|
|
|
|
Params: []string{respStr},
|
|
|
|
})
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.RPL_LOGGEDIN:
|
2020-03-13 14:12:44 +00:00
|
|
|
var account string
|
|
|
|
if err := parseMessageParams(msg, nil, nil, &account); err != nil {
|
|
|
|
return err
|
2020-03-13 10:26:43 +00:00
|
|
|
}
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("logged in with account %q", account)
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.RPL_LOGGEDOUT:
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("logged out")
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.ERR_NICKLOCKED, irc.RPL_SASLSUCCESS, irc.ERR_SASLFAIL, irc.ERR_SASLTOOLONG, irc.ERR_SASLABORTED:
|
2020-03-13 14:12:44 +00:00
|
|
|
var info string
|
|
|
|
if err := parseMessageParams(msg, nil, &info); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
switch msg.Command {
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.ERR_NICKLOCKED:
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("invalid nick used with SASL authentication: %v", info)
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.ERR_SASLFAIL:
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("SASL authentication failed: %v", info)
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.ERR_SASLTOOLONG:
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("SASL message too long: %v", info)
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.saslClient = nil
|
|
|
|
uc.saslStarted = false
|
|
|
|
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "CAP",
|
|
|
|
Params: []string{"END"},
|
|
|
|
})
|
2020-02-06 15:39:09 +00:00
|
|
|
case irc.RPL_WELCOME:
|
2020-02-17 11:36:42 +00:00
|
|
|
uc.registered = true
|
|
|
|
uc.logger.Printf("connection registered")
|
2020-02-06 18:22:04 +00:00
|
|
|
|
2020-03-04 17:22:58 +00:00
|
|
|
channels, err := uc.srv.db.ListChannels(uc.network.ID)
|
|
|
|
if err != nil {
|
|
|
|
uc.logger.Printf("failed to list channels from database: %v", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ch := range channels {
|
2020-03-25 10:52:24 +00:00
|
|
|
params := []string{ch.Name}
|
|
|
|
if ch.Key != "" {
|
|
|
|
params = append(params, ch.Key)
|
|
|
|
}
|
2020-02-17 15:17:31 +00:00
|
|
|
uc.SendMessage(&irc.Message{
|
2020-02-06 18:22:04 +00:00
|
|
|
Command: "JOIN",
|
2020-03-25 10:52:24 +00:00
|
|
|
Params: params,
|
2020-02-17 15:17:31 +00:00
|
|
|
})
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-06 16:04:49 +00:00
|
|
|
case irc.RPL_MYINFO:
|
2020-03-20 23:48:19 +00:00
|
|
|
if err := parseMessageParams(msg, nil, &uc.serverName, nil, &uc.availableUserModes, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case irc.RPL_ISUPPORT:
|
|
|
|
if err := parseMessageParams(msg, nil, nil); err != nil {
|
2020-02-07 11:36:02 +00:00
|
|
|
return err
|
2020-02-06 16:04:49 +00:00
|
|
|
}
|
2020-03-20 23:48:19 +00:00
|
|
|
for _, token := range msg.Params[1 : len(msg.Params)-1] {
|
|
|
|
negate := false
|
|
|
|
parameter := token
|
|
|
|
value := ""
|
|
|
|
if strings.HasPrefix(token, "-") {
|
|
|
|
negate = true
|
|
|
|
token = token[1:]
|
|
|
|
} else {
|
|
|
|
if i := strings.IndexByte(token, '='); i >= 0 {
|
|
|
|
parameter = token[:i]
|
|
|
|
value = token[i+1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !negate {
|
|
|
|
switch parameter {
|
|
|
|
case "CHANMODES":
|
|
|
|
parts := strings.SplitN(value, ",", 5)
|
|
|
|
if len(parts) < 4 {
|
|
|
|
return fmt.Errorf("malformed ISUPPORT CHANMODES value: %v", value)
|
|
|
|
}
|
|
|
|
modes := make(map[byte]channelModeType)
|
|
|
|
for i, mt := range []channelModeType{modeTypeA, modeTypeB, modeTypeC, modeTypeD} {
|
|
|
|
for j := 0; j < len(parts[i]); j++ {
|
|
|
|
mode := parts[i][j]
|
|
|
|
modes[mode] = mt
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uc.availableChannelModes = modes
|
|
|
|
case "CHANTYPES":
|
|
|
|
uc.availableChannelTypes = value
|
|
|
|
case "PREFIX":
|
|
|
|
if value == "" {
|
|
|
|
uc.availableMemberships = nil
|
|
|
|
} else {
|
|
|
|
if value[0] != '(' {
|
|
|
|
return fmt.Errorf("malformed ISUPPORT PREFIX value: %v", value)
|
|
|
|
}
|
|
|
|
sep := strings.IndexByte(value, ')')
|
|
|
|
if sep < 0 || len(value) != sep*2 {
|
|
|
|
return fmt.Errorf("malformed ISUPPORT PREFIX value: %v", value)
|
|
|
|
}
|
|
|
|
memberships := make([]membership, len(value)/2-1)
|
|
|
|
for i := range memberships {
|
|
|
|
memberships[i] = membership{
|
|
|
|
Mode: value[i+1],
|
|
|
|
Prefix: value[sep+i+1],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uc.availableMemberships = memberships
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// TODO: handle ISUPPORT negations
|
|
|
|
}
|
2020-02-06 16:04:49 +00:00
|
|
|
}
|
2020-03-23 02:18:16 +00:00
|
|
|
case "BATCH":
|
|
|
|
var tag string
|
|
|
|
if err := parseMessageParams(msg, &tag); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(tag, "+") {
|
|
|
|
tag = tag[1:]
|
|
|
|
if _, ok := uc.batches[tag]; ok {
|
|
|
|
return fmt.Errorf("unexpected BATCH reference tag: batch was already defined: %q", tag)
|
|
|
|
}
|
|
|
|
var batchType string
|
|
|
|
if err := parseMessageParams(msg, nil, &batchType); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-23 02:21:43 +00:00
|
|
|
label := label
|
|
|
|
if label == "" && msgBatch != nil {
|
|
|
|
label = msgBatch.Label
|
|
|
|
}
|
2020-03-23 02:18:16 +00:00
|
|
|
uc.batches[tag] = batch{
|
|
|
|
Type: batchType,
|
|
|
|
Params: msg.Params[2:],
|
|
|
|
Outer: msgBatch,
|
2020-03-23 02:21:43 +00:00
|
|
|
Label: label,
|
2020-03-23 02:18:16 +00:00
|
|
|
}
|
|
|
|
} else if strings.HasPrefix(tag, "-") {
|
|
|
|
tag = tag[1:]
|
|
|
|
if _, ok := uc.batches[tag]; !ok {
|
|
|
|
return fmt.Errorf("unknown BATCH reference tag: %q", tag)
|
|
|
|
}
|
|
|
|
delete(uc.batches, tag)
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("unexpected BATCH reference tag: missing +/- prefix: %q", tag)
|
|
|
|
}
|
2020-02-07 11:19:42 +00:00
|
|
|
case "NICK":
|
2020-03-06 17:51:11 +00:00
|
|
|
if msg.Prefix == nil {
|
|
|
|
return fmt.Errorf("expected a prefix")
|
|
|
|
}
|
|
|
|
|
2020-02-07 11:36:02 +00:00
|
|
|
var newNick string
|
|
|
|
if err := parseMessageParams(msg, &newNick); err != nil {
|
|
|
|
return err
|
2020-02-07 11:19:42 +00:00
|
|
|
}
|
|
|
|
|
2020-02-17 11:36:42 +00:00
|
|
|
if msg.Prefix.Name == uc.nick {
|
|
|
|
uc.logger.Printf("changed nick from %q to %q", uc.nick, newNick)
|
|
|
|
uc.nick = newNick
|
2020-02-07 11:19:42 +00:00
|
|
|
}
|
|
|
|
|
2020-02-17 11:36:42 +00:00
|
|
|
for _, ch := range uc.channels {
|
2020-02-07 11:19:42 +00:00
|
|
|
if membership, ok := ch.Members[msg.Prefix.Name]; ok {
|
|
|
|
delete(ch.Members, msg.Prefix.Name)
|
|
|
|
ch.Members[newNick] = membership
|
2020-03-31 15:30:45 +00:00
|
|
|
uc.appendLog(ch.Name, "*** %s is now known as %s", msg.Prefix.Name, newNick)
|
2020-02-07 11:19:42 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-06 17:42:55 +00:00
|
|
|
|
|
|
|
if msg.Prefix.Name != uc.nick {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc, msg.Prefix),
|
|
|
|
Command: "NICK",
|
|
|
|
Params: []string{newNick},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2020-02-06 18:22:04 +00:00
|
|
|
case "JOIN":
|
2020-02-19 17:25:19 +00:00
|
|
|
if msg.Prefix == nil {
|
|
|
|
return fmt.Errorf("expected a prefix")
|
|
|
|
}
|
|
|
|
|
2020-02-07 11:36:02 +00:00
|
|
|
var channels string
|
|
|
|
if err := parseMessageParams(msg, &channels); err != nil {
|
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-07 09:54:03 +00:00
|
|
|
|
2020-02-07 11:36:02 +00:00
|
|
|
for _, ch := range strings.Split(channels, ",") {
|
2020-02-17 11:36:42 +00:00
|
|
|
if msg.Prefix.Name == uc.nick {
|
|
|
|
uc.logger.Printf("joined channel %q", ch)
|
|
|
|
uc.channels[ch] = &upstreamChannel{
|
2020-02-07 09:54:03 +00:00
|
|
|
Name: ch,
|
2020-02-17 11:36:42 +00:00
|
|
|
conn: uc,
|
2020-03-20 23:48:19 +00:00
|
|
|
Members: make(map[string]*membership),
|
2020-02-07 09:54:03 +00:00
|
|
|
}
|
2020-03-20 23:48:19 +00:00
|
|
|
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "MODE",
|
|
|
|
Params: []string{ch},
|
|
|
|
})
|
2020-02-07 09:54:03 +00:00
|
|
|
} else {
|
2020-02-17 11:36:42 +00:00
|
|
|
ch, err := uc.getChannel(ch)
|
2020-02-07 09:54:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-20 23:48:19 +00:00
|
|
|
ch.Members[msg.Prefix.Name] = nil
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-07 09:54:03 +00:00
|
|
|
|
2020-03-31 15:30:45 +00:00
|
|
|
uc.appendLog(ch, "*** Joins: %s (%s@%s)", msg.Prefix.Name, msg.Prefix.User, msg.Prefix.Host)
|
2020-03-25 22:51:28 +00:00
|
|
|
|
2020-03-04 14:44:13 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-02-19 17:25:19 +00:00
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc, msg.Prefix),
|
|
|
|
Command: "JOIN",
|
|
|
|
Params: []string{dc.marshalChannel(uc, ch)},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2020-02-07 09:54:03 +00:00
|
|
|
case "PART":
|
2020-02-19 17:25:19 +00:00
|
|
|
if msg.Prefix == nil {
|
|
|
|
return fmt.Errorf("expected a prefix")
|
|
|
|
}
|
|
|
|
|
2020-02-07 11:36:02 +00:00
|
|
|
var channels string
|
|
|
|
if err := parseMessageParams(msg, &channels); err != nil {
|
|
|
|
return err
|
2020-02-07 09:54:03 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 22:51:28 +00:00
|
|
|
var reason string
|
|
|
|
if len(msg.Params) > 1 {
|
|
|
|
reason = msg.Params[1]
|
|
|
|
}
|
|
|
|
|
2020-02-07 11:36:02 +00:00
|
|
|
for _, ch := range strings.Split(channels, ",") {
|
2020-02-17 11:36:42 +00:00
|
|
|
if msg.Prefix.Name == uc.nick {
|
|
|
|
uc.logger.Printf("parted channel %q", ch)
|
|
|
|
delete(uc.channels, ch)
|
2020-02-07 09:54:03 +00:00
|
|
|
} else {
|
2020-02-17 11:36:42 +00:00
|
|
|
ch, err := uc.getChannel(ch)
|
2020-02-07 09:54:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
delete(ch.Members, msg.Prefix.Name)
|
|
|
|
}
|
|
|
|
|
2020-03-31 15:30:45 +00:00
|
|
|
uc.appendLog(ch, "*** Parts: %s (%s@%s) (%s)", msg.Prefix.Name, msg.Prefix.User, msg.Prefix.Host, reason)
|
2020-03-25 22:51:28 +00:00
|
|
|
|
2020-03-04 14:44:13 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-02-19 17:25:19 +00:00
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc, msg.Prefix),
|
|
|
|
Command: "PART",
|
|
|
|
Params: []string{dc.marshalChannel(uc, ch)},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2020-03-25 22:46:36 +00:00
|
|
|
case "KICK":
|
|
|
|
if msg.Prefix == nil {
|
|
|
|
return fmt.Errorf("expected a prefix")
|
|
|
|
}
|
|
|
|
|
|
|
|
var channel, user string
|
|
|
|
if err := parseMessageParams(msg, &channel, &user); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var reason string
|
|
|
|
if len(msg.Params) > 2 {
|
|
|
|
reason = msg.Params[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
if user == uc.nick {
|
|
|
|
uc.logger.Printf("kicked from channel %q by %s", channel, msg.Prefix.Name)
|
|
|
|
delete(uc.channels, channel)
|
|
|
|
} else {
|
|
|
|
ch, err := uc.getChannel(channel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
delete(ch.Members, user)
|
|
|
|
}
|
|
|
|
|
2020-03-31 15:30:45 +00:00
|
|
|
uc.appendLog(channel, "*** %s was kicked by %s (%s)", user, msg.Prefix.Name, reason)
|
2020-03-25 22:51:28 +00:00
|
|
|
|
2020-03-25 22:46:36 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
params := []string{dc.marshalChannel(uc, channel), dc.marshalNick(uc, user)}
|
|
|
|
if reason != "" {
|
|
|
|
params = append(params, reason)
|
|
|
|
}
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc, msg.Prefix),
|
|
|
|
Command: "KICK",
|
|
|
|
Params: params,
|
|
|
|
})
|
|
|
|
})
|
2020-03-06 17:51:11 +00:00
|
|
|
case "QUIT":
|
|
|
|
if msg.Prefix == nil {
|
|
|
|
return fmt.Errorf("expected a prefix")
|
|
|
|
}
|
|
|
|
|
2020-03-25 22:51:28 +00:00
|
|
|
var reason string
|
|
|
|
if len(msg.Params) > 0 {
|
|
|
|
reason = msg.Params[0]
|
|
|
|
}
|
|
|
|
|
2020-03-06 17:51:11 +00:00
|
|
|
if msg.Prefix.Name == uc.nick {
|
|
|
|
uc.logger.Printf("quit")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ch := range uc.channels {
|
2020-03-25 22:51:28 +00:00
|
|
|
if _, ok := ch.Members[msg.Prefix.Name]; ok {
|
|
|
|
delete(ch.Members, msg.Prefix.Name)
|
|
|
|
|
2020-03-31 15:30:45 +00:00
|
|
|
uc.appendLog(ch.Name, "*** Quits: %s (%s@%s) (%s)", msg.Prefix.Name, msg.Prefix.User, msg.Prefix.Host, reason)
|
2020-03-25 22:51:28 +00:00
|
|
|
}
|
2020-03-06 17:51:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if msg.Prefix.Name != uc.nick {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc, msg.Prefix),
|
|
|
|
Command: "QUIT",
|
|
|
|
Params: msg.Params,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2020-02-06 18:22:04 +00:00
|
|
|
case irc.RPL_TOPIC, irc.RPL_NOTOPIC:
|
2020-02-07 11:36:02 +00:00
|
|
|
var name, topic string
|
|
|
|
if err := parseMessageParams(msg, nil, &name, &topic); err != nil {
|
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-17 11:36:42 +00:00
|
|
|
ch, err := uc.getChannel(name)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Command == irc.RPL_TOPIC {
|
2020-02-07 11:36:02 +00:00
|
|
|
ch.Topic = topic
|
2020-02-06 18:22:04 +00:00
|
|
|
} else {
|
|
|
|
ch.Topic = ""
|
|
|
|
}
|
|
|
|
case "TOPIC":
|
2020-02-07 11:36:02 +00:00
|
|
|
var name string
|
2020-03-04 14:45:14 +00:00
|
|
|
if err := parseMessageParams(msg, &name); err != nil {
|
2020-02-07 11:36:02 +00:00
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-17 11:36:42 +00:00
|
|
|
ch, err := uc.getChannel(name)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(msg.Params) > 1 {
|
|
|
|
ch.Topic = msg.Params[1]
|
|
|
|
} else {
|
|
|
|
ch.Topic = ""
|
|
|
|
}
|
2020-03-04 14:45:14 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
params := []string{dc.marshalChannel(uc, name)}
|
|
|
|
if ch.Topic != "" {
|
|
|
|
params = append(params, ch.Topic)
|
|
|
|
}
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc, msg.Prefix),
|
|
|
|
Command: "TOPIC",
|
|
|
|
Params: params,
|
|
|
|
})
|
|
|
|
})
|
2020-03-20 23:48:19 +00:00
|
|
|
case "MODE":
|
|
|
|
var name, modeStr string
|
|
|
|
if err := parseMessageParams(msg, &name, &modeStr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !uc.isChannel(name) { // user mode change
|
|
|
|
if name != uc.nick {
|
|
|
|
return fmt.Errorf("received MODE message for unknown nick %q", name)
|
|
|
|
}
|
|
|
|
return uc.modes.Apply(modeStr)
|
|
|
|
// TODO: notify downstreams about user mode change?
|
|
|
|
} else { // channel mode change
|
|
|
|
ch, err := uc.getChannel(name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if ch.modes != nil {
|
|
|
|
if err := ch.modes.Apply(uc.availableChannelModes, modeStr, msg.Params[2:]...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-25 22:51:28 +00:00
|
|
|
modeMsg := modeStr
|
|
|
|
for _, v := range msg.Params[2:] {
|
|
|
|
modeMsg += " " + v
|
|
|
|
}
|
2020-03-31 15:30:45 +00:00
|
|
|
uc.appendLog(ch.Name, "*** %s sets mode: %s", msg.Prefix.Name, modeMsg)
|
2020-03-25 22:51:28 +00:00
|
|
|
|
2020-03-20 23:48:19 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
params := []string{dc.marshalChannel(uc, name), modeStr}
|
|
|
|
params = append(params, msg.Params[2:]...)
|
|
|
|
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc, msg.Prefix),
|
|
|
|
Command: "MODE",
|
|
|
|
Params: params,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
case irc.RPL_UMODEIS:
|
|
|
|
if err := parseMessageParams(msg, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
modeStr := ""
|
|
|
|
if len(msg.Params) > 1 {
|
|
|
|
modeStr = msg.Params[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.modes = ""
|
|
|
|
if err := uc.modes.Apply(modeStr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// TODO: send RPL_UMODEIS to downstream connections when applicable
|
|
|
|
case irc.RPL_CHANNELMODEIS:
|
|
|
|
var channel string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
modeStr := ""
|
|
|
|
if len(msg.Params) > 2 {
|
|
|
|
modeStr = msg.Params[2]
|
|
|
|
}
|
|
|
|
|
|
|
|
ch, err := uc.getChannel(channel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
firstMode := ch.modes == nil
|
|
|
|
ch.modes = make(map[byte]string)
|
|
|
|
if err := ch.modes.Apply(uc.availableChannelModes, modeStr, msg.Params[3:]...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if firstMode {
|
|
|
|
modeStr, modeParams := ch.modes.Format()
|
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
params := []string{dc.nick, dc.marshalChannel(uc, channel), modeStr}
|
|
|
|
params = append(params, modeParams...)
|
|
|
|
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_CHANNELMODEIS,
|
|
|
|
Params: params,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2020-03-26 04:51:47 +00:00
|
|
|
case rpl_creationtime:
|
|
|
|
var channel, creationTime string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel, &creationTime); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ch, err := uc.getChannel(channel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
firstCreationTime := ch.creationTime == ""
|
|
|
|
ch.creationTime = creationTime
|
|
|
|
if firstCreationTime {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: rpl_creationtime,
|
|
|
|
Params: []string{dc.nick, channel, creationTime},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2020-02-06 18:22:04 +00:00
|
|
|
case rpl_topicwhotime:
|
2020-02-07 11:36:02 +00:00
|
|
|
var name, who, timeStr string
|
|
|
|
if err := parseMessageParams(msg, nil, &name, &who, &timeStr); err != nil {
|
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-17 11:36:42 +00:00
|
|
|
ch, err := uc.getChannel(name)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-02-07 11:36:02 +00:00
|
|
|
ch.TopicWho = who
|
|
|
|
sec, err := strconv.ParseInt(timeStr, 10, 64)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to parse topic time: %v", err)
|
|
|
|
}
|
|
|
|
ch.TopicTime = time.Unix(sec, 0)
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
case irc.RPL_LIST:
|
|
|
|
var channel, clients, topic string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel, &clients, &topic); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-28 00:03:00 +00:00
|
|
|
pl := uc.getPendingLIST()
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
if pl == nil {
|
|
|
|
return fmt.Errorf("unexpected RPL_LIST: no matching pending LIST")
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.forEachDownstreamByID(pl.downstreamID, func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_LIST,
|
|
|
|
Params: []string{dc.nick, dc.marshalChannel(uc, channel), clients, topic},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
case irc.RPL_LISTEND:
|
2020-03-28 00:03:00 +00:00
|
|
|
ok := uc.endPendingLISTs(false)
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unexpected RPL_LISTEND: no matching pending LIST")
|
|
|
|
}
|
2020-02-06 18:22:04 +00:00
|
|
|
case irc.RPL_NAMREPLY:
|
2020-02-07 11:36:02 +00:00
|
|
|
var name, statusStr, members string
|
|
|
|
if err := parseMessageParams(msg, nil, &statusStr, &name, &members); err != nil {
|
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-03-21 00:24:29 +00:00
|
|
|
|
|
|
|
ch, ok := uc.channels[name]
|
|
|
|
if !ok {
|
|
|
|
// NAMES on a channel we have not joined, forward to downstream
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-21 00:24:29 +00:00
|
|
|
channel := dc.marshalChannel(uc, name)
|
2020-03-27 19:09:38 +00:00
|
|
|
members := splitSpace(members)
|
2020-03-21 00:24:29 +00:00
|
|
|
for i, member := range members {
|
|
|
|
membership, nick := uc.parseMembershipPrefix(member)
|
|
|
|
members[i] = membership.String() + dc.marshalNick(uc, nick)
|
|
|
|
}
|
|
|
|
memberStr := strings.Join(members, " ")
|
|
|
|
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_NAMREPLY,
|
|
|
|
Params: []string{dc.nick, statusStr, channel, memberStr},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
return nil
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 11:36:02 +00:00
|
|
|
status, err := parseChannelStatus(statusStr)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ch.Status = status
|
|
|
|
|
2020-03-27 19:09:38 +00:00
|
|
|
for _, s := range splitSpace(members) {
|
2020-03-20 23:48:19 +00:00
|
|
|
membership, nick := uc.parseMembershipPrefix(s)
|
2020-02-06 18:22:04 +00:00
|
|
|
ch.Members[nick] = membership
|
|
|
|
}
|
|
|
|
case irc.RPL_ENDOFNAMES:
|
2020-02-07 11:36:02 +00:00
|
|
|
var name string
|
|
|
|
if err := parseMessageParams(msg, nil, &name); err != nil {
|
|
|
|
return err
|
2020-02-06 20:43:22 +00:00
|
|
|
}
|
2020-03-21 00:24:29 +00:00
|
|
|
|
|
|
|
ch, ok := uc.channels[name]
|
|
|
|
if !ok {
|
|
|
|
// NAMES on a channel we have not joined, forward to downstream
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-21 00:24:29 +00:00
|
|
|
channel := dc.marshalChannel(uc, name)
|
|
|
|
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_ENDOFNAMES,
|
|
|
|
Params: []string{dc.nick, channel, "End of /NAMES list"},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
return nil
|
2020-02-06 20:43:22 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 09:54:03 +00:00
|
|
|
if ch.complete {
|
|
|
|
return fmt.Errorf("received unexpected RPL_ENDOFNAMES")
|
|
|
|
}
|
2020-02-06 20:43:22 +00:00
|
|
|
ch.complete = true
|
2020-02-06 21:19:31 +00:00
|
|
|
|
2020-03-04 14:44:13 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-02-06 21:19:31 +00:00
|
|
|
forwardChannel(dc, ch)
|
2020-02-07 10:56:36 +00:00
|
|
|
})
|
2020-03-19 23:23:19 +00:00
|
|
|
case irc.RPL_WHOREPLY:
|
|
|
|
var channel, username, host, server, nick, mode, trailing string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel, &username, &host, &server, &nick, &mode, &trailing); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
parts := strings.SplitN(trailing, " ", 2)
|
|
|
|
if len(parts) != 2 {
|
|
|
|
return fmt.Errorf("received malformed RPL_WHOREPLY: wrong trailing parameter: %s", trailing)
|
|
|
|
}
|
|
|
|
realname := parts[1]
|
|
|
|
hops, err := strconv.Atoi(parts[0])
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("received malformed RPL_WHOREPLY: wrong hop count: %s", parts[0])
|
|
|
|
}
|
|
|
|
hops++
|
|
|
|
|
|
|
|
trailing = strconv.Itoa(hops) + " " + realname
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-19 23:23:19 +00:00
|
|
|
channel := channel
|
|
|
|
if channel != "*" {
|
|
|
|
channel = dc.marshalChannel(uc, channel)
|
|
|
|
}
|
|
|
|
nick := dc.marshalNick(uc, nick)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_WHOREPLY,
|
|
|
|
Params: []string{dc.nick, channel, username, host, server, nick, mode, trailing},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
case irc.RPL_ENDOFWHO:
|
|
|
|
var name string
|
|
|
|
if err := parseMessageParams(msg, nil, &name); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-19 23:23:19 +00:00
|
|
|
name := name
|
|
|
|
if name != "*" {
|
|
|
|
// TODO: support WHO masks
|
|
|
|
name = dc.marshalEntity(uc, name)
|
|
|
|
}
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_ENDOFWHO,
|
2020-03-21 23:46:56 +00:00
|
|
|
Params: []string{dc.nick, name, "End of /WHO list"},
|
2020-03-20 01:15:23 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
case irc.RPL_WHOISUSER:
|
|
|
|
var nick, username, host, realname string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick, &username, &host, nil, &realname); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-20 01:15:23 +00:00
|
|
|
nick := dc.marshalNick(uc, nick)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_WHOISUSER,
|
|
|
|
Params: []string{dc.nick, nick, username, host, "*", realname},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
case irc.RPL_WHOISSERVER:
|
|
|
|
var nick, server, serverInfo string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick, &server, &serverInfo); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-20 01:15:23 +00:00
|
|
|
nick := dc.marshalNick(uc, nick)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_WHOISSERVER,
|
|
|
|
Params: []string{dc.nick, nick, server, serverInfo},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
case irc.RPL_WHOISOPERATOR:
|
|
|
|
var nick string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-20 01:15:23 +00:00
|
|
|
nick := dc.marshalNick(uc, nick)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_WHOISOPERATOR,
|
|
|
|
Params: []string{dc.nick, nick, "is an IRC operator"},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
case irc.RPL_WHOISIDLE:
|
|
|
|
var nick string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-20 01:15:23 +00:00
|
|
|
nick := dc.marshalNick(uc, nick)
|
|
|
|
params := []string{dc.nick, nick}
|
|
|
|
params = append(params, msg.Params[2:]...)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_WHOISIDLE,
|
|
|
|
Params: params,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
case irc.RPL_WHOISCHANNELS:
|
|
|
|
var nick, channelList string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick, &channelList); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-27 19:09:38 +00:00
|
|
|
channels := splitSpace(channelList)
|
2020-03-20 01:15:23 +00:00
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-20 01:15:23 +00:00
|
|
|
nick := dc.marshalNick(uc, nick)
|
|
|
|
channelList := make([]string, len(channels))
|
|
|
|
for i, channel := range channels {
|
2020-03-20 23:48:19 +00:00
|
|
|
prefix, channel := uc.parseMembershipPrefix(channel)
|
2020-03-20 01:15:23 +00:00
|
|
|
channel = dc.marshalChannel(uc, channel)
|
|
|
|
channelList[i] = prefix.String() + channel
|
|
|
|
}
|
|
|
|
channels := strings.Join(channelList, " ")
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_WHOISCHANNELS,
|
|
|
|
Params: []string{dc.nick, nick, channels},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
case irc.RPL_ENDOFWHOIS:
|
|
|
|
var nick string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-03-20 01:15:23 +00:00
|
|
|
nick := dc.marshalNick(uc, nick)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_ENDOFWHOIS,
|
2020-03-21 23:46:56 +00:00
|
|
|
Params: []string{dc.nick, nick, "End of /WHOIS list"},
|
2020-03-19 23:23:19 +00:00
|
|
|
})
|
|
|
|
})
|
2020-02-07 10:07:01 +00:00
|
|
|
case "PRIVMSG":
|
2020-03-18 11:23:08 +00:00
|
|
|
if msg.Prefix == nil {
|
|
|
|
return fmt.Errorf("expected a prefix")
|
|
|
|
}
|
|
|
|
|
2020-03-25 22:51:28 +00:00
|
|
|
var nick, text string
|
|
|
|
if err := parseMessageParams(msg, &nick, &text); err != nil {
|
2020-02-19 17:25:19 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-03-18 11:23:08 +00:00
|
|
|
|
|
|
|
if msg.Prefix.Name == serviceNick {
|
|
|
|
uc.logger.Printf("skipping PRIVMSG from soju's service: %v", msg)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if nick == serviceNick {
|
|
|
|
uc.logger.Printf("skipping PRIVMSG to soju's service: %v", msg)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2020-03-31 17:45:04 +00:00
|
|
|
if _, ok := msg.Tags["time"]; !ok {
|
|
|
|
msg.Tags["time"] = irc.TagValue(time.Now().Format(serverTimeLayout))
|
|
|
|
}
|
|
|
|
|
2020-03-25 22:51:28 +00:00
|
|
|
target := nick
|
|
|
|
if nick == uc.nick {
|
|
|
|
target = msg.Prefix.Name
|
|
|
|
}
|
2020-03-31 15:30:45 +00:00
|
|
|
uc.appendLog(target, "<%s> %s", msg.Prefix.Name, text)
|
2020-03-25 22:51:28 +00:00
|
|
|
|
2020-03-25 09:53:08 +00:00
|
|
|
uc.network.ring.Produce(msg)
|
2020-03-18 02:11:38 +00:00
|
|
|
case "INVITE":
|
|
|
|
var nick string
|
|
|
|
var channel string
|
|
|
|
if err := parseMessageParams(msg, &nick, &channel); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc, msg.Prefix),
|
|
|
|
Command: "INVITE",
|
|
|
|
Params: []string{dc.marshalNick(uc, nick), dc.marshalChannel(uc, channel)},
|
|
|
|
})
|
|
|
|
})
|
2020-03-26 05:03:07 +00:00
|
|
|
case irc.RPL_INVITING:
|
|
|
|
var nick string
|
|
|
|
var channel string
|
|
|
|
if err := parseMessageParams(msg, &nick, &channel); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_INVITING,
|
|
|
|
Params: []string{dc.nick, dc.marshalNick(uc, nick), dc.marshalChannel(uc, channel)},
|
|
|
|
})
|
|
|
|
})
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
case irc.ERR_UNKNOWNCOMMAND, irc.RPL_TRYAGAIN:
|
|
|
|
var command, reason string
|
|
|
|
if err := parseMessageParams(msg, nil, &command, &reason); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if command == "LIST" {
|
2020-03-28 00:03:00 +00:00
|
|
|
ok := uc.endPendingLISTs(false)
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unexpected response for LIST: %q: no matching pending LIST", msg.Command)
|
|
|
|
}
|
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: uc.srv.prefix(),
|
|
|
|
Command: msg.Command,
|
|
|
|
Params: []string{dc.nick, "LIST", reason},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2020-03-23 00:34:34 +00:00
|
|
|
case "TAGMSG":
|
|
|
|
// TODO: relay to downstream connections that accept message-tags
|
2020-03-23 02:21:43 +00:00
|
|
|
case "ACK":
|
|
|
|
// Ignore
|
2020-04-01 10:16:32 +00:00
|
|
|
case irc.RPL_NOWAWAY, irc.RPL_UNAWAY:
|
|
|
|
// Ignore
|
2020-02-06 16:04:49 +00:00
|
|
|
case irc.RPL_YOURHOST, irc.RPL_CREATED:
|
2020-02-06 15:39:09 +00:00
|
|
|
// Ignore
|
|
|
|
case irc.RPL_LUSERCLIENT, irc.RPL_LUSEROP, irc.RPL_LUSERUNKNOWN, irc.RPL_LUSERCHANNELS, irc.RPL_LUSERME:
|
|
|
|
// Ignore
|
|
|
|
case irc.RPL_MOTDSTART, irc.RPL_MOTD, irc.RPL_ENDOFMOTD:
|
|
|
|
// Ignore
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
case irc.RPL_LISTSTART:
|
|
|
|
// Ignore
|
2020-02-06 15:39:09 +00:00
|
|
|
case rpl_localusers, rpl_globalusers:
|
|
|
|
// Ignore
|
2020-03-13 15:18:53 +00:00
|
|
|
case irc.RPL_STATSVLINE, rpl_statsping, irc.RPL_STATSBLINE, irc.RPL_STATSDLINE:
|
2020-02-06 15:39:09 +00:00
|
|
|
// Ignore
|
2020-02-06 15:18:19 +00:00
|
|
|
default:
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("unhandled message: %v", msg)
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
2020-02-06 15:39:09 +00:00
|
|
|
return nil
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 19:09:38 +00:00
|
|
|
func splitSpace(s string) []string {
|
|
|
|
return strings.FieldsFunc(s, func(r rune) bool {
|
|
|
|
return r == ' '
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-02-17 11:36:42 +00:00
|
|
|
func (uc *upstreamConn) register() {
|
2020-03-04 17:22:58 +00:00
|
|
|
uc.nick = uc.network.Nick
|
|
|
|
uc.username = uc.network.Username
|
|
|
|
if uc.username == "" {
|
|
|
|
uc.username = uc.nick
|
|
|
|
}
|
|
|
|
uc.realname = uc.network.Realname
|
|
|
|
if uc.realname == "" {
|
|
|
|
uc.realname = uc.nick
|
|
|
|
}
|
|
|
|
|
2020-03-13 10:26:43 +00:00
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "CAP",
|
|
|
|
Params: []string{"LS", "302"},
|
|
|
|
})
|
|
|
|
|
2020-03-13 11:06:02 +00:00
|
|
|
if uc.network.Pass != "" {
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "PASS",
|
|
|
|
Params: []string{uc.network.Pass},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-02-17 15:17:31 +00:00
|
|
|
uc.SendMessage(&irc.Message{
|
2020-02-06 15:18:19 +00:00
|
|
|
Command: "NICK",
|
2020-02-19 17:25:19 +00:00
|
|
|
Params: []string{uc.nick},
|
2020-02-17 15:17:31 +00:00
|
|
|
})
|
|
|
|
uc.SendMessage(&irc.Message{
|
2020-02-06 15:18:19 +00:00
|
|
|
Command: "USER",
|
2020-03-04 17:22:58 +00:00
|
|
|
Params: []string{uc.username, "0", "*", uc.realname},
|
2020-02-17 15:17:31 +00:00
|
|
|
})
|
2020-02-07 11:37:44 +00:00
|
|
|
}
|
|
|
|
|
2020-04-01 10:14:36 +00:00
|
|
|
func (uc *upstreamConn) runUntilRegistered() error {
|
|
|
|
for !uc.registered {
|
|
|
|
msg, err := uc.irc.ReadMessage()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to read message: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if uc.srv.Debug {
|
|
|
|
uc.logger.Printf("received: %v", msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uc.handleMessage(msg); err != nil {
|
|
|
|
return fmt.Errorf("failed to handle message %q: %v", msg, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-13 14:12:44 +00:00
|
|
|
func (uc *upstreamConn) requestSASL() bool {
|
|
|
|
if uc.network.SASL.Mechanism == "" {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
v, ok := uc.caps["sasl"]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if v != "" {
|
|
|
|
mechanisms := strings.Split(v, ",")
|
|
|
|
found := false
|
|
|
|
for _, mech := range mechanisms {
|
|
|
|
if strings.EqualFold(mech, uc.network.SASL.Mechanism) {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (uc *upstreamConn) handleCapAck(name string, ok bool) error {
|
|
|
|
auth := &uc.network.SASL
|
|
|
|
switch name {
|
|
|
|
case "sasl":
|
|
|
|
if !ok {
|
|
|
|
uc.logger.Printf("server refused to acknowledge the SASL capability")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch auth.Mechanism {
|
|
|
|
case "PLAIN":
|
|
|
|
uc.logger.Printf("starting SASL PLAIN authentication with username %q", auth.Plain.Username)
|
|
|
|
uc.saslClient = sasl.NewPlainClient("", auth.Plain.Username, auth.Plain.Password)
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unsupported SASL mechanism %q", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "AUTHENTICATE",
|
|
|
|
Params: []string{auth.Mechanism},
|
|
|
|
})
|
2020-03-23 00:34:34 +00:00
|
|
|
case "message-tags":
|
|
|
|
uc.tagsSupported = ok
|
2020-03-23 02:21:43 +00:00
|
|
|
case "labeled-response":
|
|
|
|
uc.labelsSupported = ok
|
2020-03-31 17:45:04 +00:00
|
|
|
case "batch", "server-time":
|
|
|
|
// Nothing to do
|
|
|
|
default:
|
|
|
|
uc.logger.Printf("received CAP ACK/NAK for a cap we don't support: %v", name)
|
2020-03-13 14:12:44 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
func (uc *upstreamConn) readMessages(ch chan<- event) error {
|
2020-02-06 15:18:19 +00:00
|
|
|
for {
|
2020-02-17 11:36:42 +00:00
|
|
|
msg, err := uc.irc.ReadMessage()
|
2020-02-06 15:18:19 +00:00
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
} else if err != nil {
|
|
|
|
return fmt.Errorf("failed to read IRC command: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-02-18 15:31:18 +00:00
|
|
|
if uc.srv.Debug {
|
|
|
|
uc.logger.Printf("received: %v", msg)
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
ch <- eventUpstreamMessage{msg, uc}
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 11:42:24 +00:00
|
|
|
return nil
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
2020-02-17 15:17:31 +00:00
|
|
|
|
2020-03-27 23:54:42 +00:00
|
|
|
// SendMessage queues a new outgoing message. It is safe to call from any
|
|
|
|
// goroutine.
|
2020-02-17 15:17:31 +00:00
|
|
|
func (uc *upstreamConn) SendMessage(msg *irc.Message) {
|
2020-03-16 10:26:54 +00:00
|
|
|
uc.outgoing <- msg
|
2020-02-17 15:17:31 +00:00
|
|
|
}
|
2020-03-23 02:21:43 +00:00
|
|
|
|
2020-03-26 03:30:11 +00:00
|
|
|
func (uc *upstreamConn) SendMessageLabeled(downstreamID uint64, msg *irc.Message) {
|
2020-03-23 02:21:43 +00:00
|
|
|
if uc.labelsSupported {
|
|
|
|
if msg.Tags == nil {
|
|
|
|
msg.Tags = make(map[string]irc.TagValue)
|
|
|
|
}
|
2020-03-26 03:30:11 +00:00
|
|
|
msg.Tags["label"] = irc.TagValue(fmt.Sprintf("sd-%d-%d", downstreamID, uc.nextLabelID))
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.nextLabelID++
|
2020-03-23 02:21:43 +00:00
|
|
|
}
|
|
|
|
uc.SendMessage(msg)
|
|
|
|
}
|
2020-03-25 22:51:28 +00:00
|
|
|
|
|
|
|
// TODO: handle moving logs when a network name changes, when support for this is added
|
2020-03-31 15:30:45 +00:00
|
|
|
func (uc *upstreamConn) appendLog(entity string, format string, a ...interface{}) {
|
2020-03-25 22:51:28 +00:00
|
|
|
if uc.srv.LogPath == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// TODO: enforce maximum open file handles (LRU cache of file handles)
|
|
|
|
// TODO: handle non-monotonic clock behaviour
|
|
|
|
now := time.Now()
|
|
|
|
year, month, day := now.Date()
|
|
|
|
name := fmt.Sprintf("%04d-%02d-%02d.log", year, month, day)
|
|
|
|
log, ok := uc.logs[entity]
|
|
|
|
if !ok || log.name != name {
|
|
|
|
if ok {
|
|
|
|
log.file.Close()
|
|
|
|
delete(uc.logs, entity)
|
|
|
|
}
|
|
|
|
// TODO: handle/forbid network/entity names with illegal path characters
|
|
|
|
dir := filepath.Join(uc.srv.LogPath, uc.user.Username, uc.network.Name, entity)
|
2020-03-31 20:21:49 +00:00
|
|
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
2020-03-25 22:51:28 +00:00
|
|
|
uc.logger.Printf("failed to log message: could not create logs directory %q: %v", dir, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
path := filepath.Join(dir, name)
|
|
|
|
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
|
|
|
|
if err != nil {
|
|
|
|
uc.logger.Printf("failed to log message: could not open or create log file %q: %v", path, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
log = entityLog{
|
|
|
|
name: name,
|
|
|
|
file: f,
|
|
|
|
}
|
|
|
|
uc.logs[entity] = log
|
|
|
|
}
|
|
|
|
|
|
|
|
format = "[%02d:%02d:%02d] " + format + "\n"
|
|
|
|
args := []interface{}{now.Hour(), now.Minute(), now.Second()}
|
|
|
|
args = append(args, a...)
|
|
|
|
|
|
|
|
if _, err := fmt.Fprintf(log.file, format, args...); err != nil {
|
|
|
|
uc.logger.Printf("failed to log message to %q: %v", log.name, err)
|
|
|
|
}
|
|
|
|
}
|
2020-04-01 10:16:32 +00:00
|
|
|
|
|
|
|
func (uc *upstreamConn) updateAway() {
|
|
|
|
away := true
|
|
|
|
uc.forEachDownstream(func(*downstreamConn) {
|
|
|
|
away = false
|
|
|
|
})
|
|
|
|
if away == uc.away {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if away {
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "AWAY",
|
|
|
|
Params: []string{"Auto away"},
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
uc.SendMessage(&irc.Message{
|
|
|
|
Command: "AWAY",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
uc.away = away
|
|
|
|
}
|